mirror of https://github.com/sysown/proxysql
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1323 lines
40 KiB
1323 lines
40 KiB
#!/bin/bash
|
|
#
|
|
# test_mcp_fts.sh - Comprehensive test script for MCP FTS (Full Text Search) tools
|
|
#
|
|
# This script tests all 6 FTS tools via the MCP /mcp/query endpoint:
|
|
# - fts_index_table : Create and populate an FTS index for a MySQL table
|
|
# - fts_search : Search indexed data using FTS5 with BM25 ranking
|
|
# - fts_list_indexes : List all FTS indexes with metadata
|
|
# - fts_delete_index : Remove an FTS index
|
|
# - fts_reindex : Refresh an index with fresh data (full rebuild)
|
|
# - fts_rebuild_all : Rebuild ALL FTS indexes with fresh data
|
|
#
|
|
# Usage:
|
|
# ./test_mcp_fts.sh [options]
|
|
#
|
|
# Options:
|
|
# -v, --verbose Show verbose output (curl requests/responses)
|
|
# -q, --quiet Suppress progress messages
|
|
# --skip-cleanup Don't delete test data/indexes after testing
|
|
# --test-schema SCHEMA Schema to use for testing (default: test_fts)
|
|
# --test-table TABLE Table to use for testing (default: test_documents)
|
|
# -h, --help Show help
|
|
#
|
|
# Environment Variables:
|
|
# MCP_HOST MCP server host (default: 127.0.0.1)
|
|
# MCP_PORT MCP server port (default: 6071)
|
|
# MYSQL_HOST MySQL backend host (default: 127.0.0.1)
|
|
# MYSQL_PORT MySQL backend port (default: 6033)
|
|
# MYSQL_USER MySQL user (default: root)
|
|
# MYSQL_PASSWORD MySQL password (default: root)
|
|
#
|
|
# Prerequisites:
|
|
# - ProxySQL with MCP module enabled
|
|
# - MySQL backend accessible
|
|
# - curl, jq (optional but recommended)
|
|
#
|
|
|
|
set -e
|
|
|
|
# ============================================================================
|
|
# CONFIGURATION
|
|
# ============================================================================
|
|
|
|
# MCP Server Configuration
|
|
MCP_HOST="${MCP_HOST:-127.0.0.1}"
|
|
MCP_PORT="${MCP_PORT:-6071}"
|
|
MCP_ENDPOINT="http://${MCP_HOST}:${MCP_PORT}/mcp/query"
|
|
|
|
# MySQL Backend Configuration (for setup/teardown)
|
|
MYSQL_HOST="${MYSQL_HOST:-127.0.0.1}"
|
|
MYSQL_PORT="${MYSQL_PORT:-6033}"
|
|
MYSQL_USER="${MYSQL_USER:-root}"
|
|
MYSQL_PASSWORD="${MYSQL_PASSWORD:-root}"
|
|
|
|
# Test Configuration
|
|
TEST_SCHEMA="${TEST_SCHEMA:-test_fts}"
|
|
TEST_TABLE="${TEST_TABLE:-test_documents}"
|
|
|
|
# Test Data
|
|
TEST_DOCUMENTS=(
|
|
["1"]="Customer John Smith reported urgent issue with order #12345. Status: pending. Priority: high."
|
|
["2"]="Machine learning model training completed successfully. Accuracy: 95%. Dataset size: 1M records."
|
|
["3"]="Database migration from MySQL to PostgreSQL failed due to foreign key constraints. Error code: FK001."
|
|
["4"]="Urgent: Payment gateway timeout during Black Friday sale. Transactions affected: 1500."
|
|
["5"]="AI-powered recommendation engine shows 40% improvement in click-through rates after optimization."
|
|
["6"]="Security alert: Multiple failed login attempts detected from IP 192.168.1.100. Account locked."
|
|
["7"]="Quarterly financial report shows revenue increase of 25% compared to previous year."
|
|
["8"]="Customer feedback: Excellent product quality but delivery was delayed by 3 days."
|
|
["9"]="System crash occurred at 2:30 AM UTC. Root cause: Out of memory error in cache service."
|
|
["10"]="New feature request: Add dark mode support for mobile applications. Priority: medium."
|
|
)
|
|
|
|
# Test Options
|
|
VERBOSE=false
|
|
QUIET=false
|
|
SKIP_CLEANUP=false
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
MAGENTA='\033[0;35m'
|
|
NC='\033[0m'
|
|
|
|
# Statistics
|
|
TOTAL_TESTS=0
|
|
PASSED_TESTS=0
|
|
FAILED_TESTS=0
|
|
SKIPPED_TESTS=0
|
|
|
|
# Test results storage
|
|
declare -a TEST_RESULTS
|
|
declare -a TEST_NAMES
|
|
|
|
# ============================================================================
|
|
# LOGGING FUNCTIONS
|
|
# ============================================================================
|
|
|
|
log_info() {
|
|
if [ "${QUIET}" = "false" ]; then
|
|
echo -e "${GREEN}[INFO]${NC} $1"
|
|
fi
|
|
}
|
|
|
|
log_warn() {
|
|
echo -e "${YELLOW}[WARN]${NC} $1"
|
|
}
|
|
|
|
log_error() {
|
|
echo -e "${RED}[ERROR]${NC} $1"
|
|
}
|
|
|
|
log_verbose() {
|
|
if [ "${VERBOSE}" = "true" ]; then
|
|
echo -e "${BLUE}[DEBUG]${NC} $1"
|
|
fi
|
|
}
|
|
|
|
log_test() {
|
|
if [ "${QUIET}" = "false" ]; then
|
|
echo -e "${CYAN}[TEST]${NC} $1"
|
|
fi
|
|
}
|
|
|
|
log_section() {
|
|
echo ""
|
|
echo -e "${MAGENTA}========================================${NC}"
|
|
echo -e "${MAGENTA}$1${NC}"
|
|
echo -e "${MAGENTA}========================================${NC}"
|
|
}
|
|
|
|
# Escape single quotes in SQL strings (prevent SQL injection)
|
|
escape_sql() {
|
|
echo "$1" | sed "s/'/''/g"
|
|
}
|
|
|
|
# ============================================================================
|
|
# MCP REQUEST FUNCTIONS
|
|
# ============================================================================
|
|
|
|
# Execute MCP request
|
|
mcp_request() {
|
|
local payload="$1"
|
|
|
|
local response
|
|
response=$(curl -s --connect-timeout 5 --max-time 30 -w "\n%{http_code}" -X POST "${MCP_ENDPOINT}" \
|
|
-H "Content-Type: application/json" \
|
|
-d "${payload}" 2>/dev/null)
|
|
|
|
local body
|
|
body=$(echo "$response" | head -n -1)
|
|
local code
|
|
code=$(echo "$response" | tail -n 1)
|
|
|
|
if [ "${VERBOSE}" = "true" ]; then
|
|
echo "Request: ${payload}" >&2
|
|
echo "Response (${code}): ${body}" >&2
|
|
fi
|
|
|
|
echo "${body}"
|
|
return 0
|
|
}
|
|
|
|
# Check if MCP server is accessible
|
|
check_mcp_server() {
|
|
log_test "Checking MCP server accessibility..."
|
|
|
|
local response
|
|
response=$(mcp_request '{"jsonrpc":"2.0","method":"ping","id":1}')
|
|
|
|
if echo "${response}" | grep -q "result"; then
|
|
log_info "MCP server is accessible at ${MCP_ENDPOINT}"
|
|
return 0
|
|
else
|
|
log_error "MCP server is not accessible"
|
|
log_error "Response: ${response}"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Execute FTS tool
|
|
fts_tool_call() {
|
|
local tool_name="$1"
|
|
local arguments="$2"
|
|
|
|
local payload
|
|
payload=$(cat <<EOF
|
|
{
|
|
"jsonrpc": "2.0",
|
|
"method": "tools/call",
|
|
"params": {
|
|
"name": "${tool_name}",
|
|
"arguments": ${arguments}
|
|
},
|
|
"id": 1
|
|
}
|
|
EOF
|
|
)
|
|
|
|
mcp_request "${payload}"
|
|
}
|
|
|
|
# Extract JSON field value
|
|
extract_json_field() {
|
|
local response="$1"
|
|
local field="$2"
|
|
|
|
if command -v jq >/dev/null 2>&1; then
|
|
echo "${response}" | jq -r "${field}" 2>/dev/null || echo ""
|
|
else
|
|
# Fallback to grep/sed for basic JSON parsing
|
|
echo "${response}" | grep -o "\"${field}\"[[:space:]]*:[[:space:]]*\"[^\"]*\"" | sed 's/.*: "\(.*\)"/\1/' || echo ""
|
|
fi
|
|
}
|
|
|
|
# Check JSON boolean field
|
|
check_json_bool() {
|
|
local response="$1"
|
|
local field="$2"
|
|
local expected="$3"
|
|
|
|
# Extract inner result from double-nested structure
|
|
local inner_result
|
|
inner_result=$(extract_inner_result "${response}")
|
|
|
|
if command -v jq >/dev/null 2>&1; then
|
|
local actual
|
|
actual=$(echo "${inner_result}" | jq -r "${field}" 2>/dev/null)
|
|
[ "${actual}" = "${expected}" ]
|
|
else
|
|
# Fallback: check for true/false string
|
|
if [ "${expected}" = "true" ]; then
|
|
echo "${inner_result}" | grep -q "\"${field}\"[[:space:]]*:[[:space:]]*true"
|
|
else
|
|
echo "${inner_result}" | grep -q "\"${field}\"[[:space:]]*:[[:space:]]*false"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# Extract inner result from MCP response (handles double-nesting)
|
|
extract_inner_result() {
|
|
local response="$1"
|
|
|
|
if command -v jq >/dev/null 2>&1; then
|
|
local text
|
|
text=$(echo "${response}" | jq -r '.result.content[0].text // empty' 2>/dev/null)
|
|
if [ -n "${text}" ] && [ "${text}" != "null" ]; then
|
|
echo "${text}"
|
|
return 0
|
|
fi
|
|
|
|
echo "${response}" | jq -r '.result.result // .result' 2>/dev/null || echo "${response}"
|
|
else
|
|
echo "${response}"
|
|
fi
|
|
}
|
|
|
|
# Extract field from inner result
|
|
extract_inner_field() {
|
|
local response="$1"
|
|
local field="$2"
|
|
|
|
local inner_result
|
|
inner_result=$(extract_inner_result "${response}")
|
|
|
|
extract_json_field "${inner_result}" "${field}"
|
|
}
|
|
|
|
# ============================================================================
|
|
# MYSQL HELPER FUNCTIONS
|
|
# ============================================================================
|
|
|
|
mysql_exec() {
|
|
local sql="$1"
|
|
MYSQL_PWD="${MYSQL_PASSWORD}" mysql -h "${MYSQL_HOST}" -P "${MYSQL_PORT}" -u "${MYSQL_USER}" \
|
|
-e "${sql}" 2>/dev/null
|
|
}
|
|
|
|
mysql_check_connection() {
|
|
log_test "Checking MySQL connection..."
|
|
|
|
if mysql_exec "SELECT 1" >/dev/null 2>&1; then
|
|
log_info "MySQL connection successful"
|
|
return 0
|
|
else
|
|
log_error "Cannot connect to MySQL backend"
|
|
log_error "Host: ${MYSQL_HOST}:${MYSQL_PORT}, User: ${MYSQL_USER}"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
setup_test_schema() {
|
|
log_info "Setting up test schema and table..."
|
|
|
|
# Create schema
|
|
mysql_exec "CREATE SCHEMA IF NOT EXISTS ${TEST_SCHEMA};" 2>/dev/null || true
|
|
|
|
# Create test table
|
|
mysql_exec "CREATE TABLE IF NOT EXISTS ${TEST_SCHEMA}.${TEST_TABLE} (
|
|
id INT PRIMARY KEY AUTO_INCREMENT,
|
|
title VARCHAR(200),
|
|
content TEXT,
|
|
category VARCHAR(50),
|
|
priority VARCHAR(20),
|
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
);" 2>/dev/null || true
|
|
|
|
# Clear existing data
|
|
mysql_exec "DELETE FROM ${TEST_SCHEMA}.${TEST_TABLE};" 2>/dev/null || true
|
|
mysql_exec "ALTER TABLE ${TEST_SCHEMA}.${TEST_TABLE} AUTO_INCREMENT = 1;" 2>/dev/null || true
|
|
|
|
# Insert test data
|
|
for doc_id in "${!TEST_DOCUMENTS[@]}"; do
|
|
local doc="${TEST_DOCUMENTS[$doc_id]}"
|
|
local title="Document ${doc_id}"
|
|
|
|
# Determine category and priority based on content
|
|
local category="general"
|
|
local priority="normal"
|
|
if echo "${doc}" | grep -iq "urgent"; then
|
|
category="support"
|
|
priority="high"
|
|
elif echo "${doc}" | grep -iq "error\|failed\|crash"; then
|
|
category="errors"
|
|
priority="high"
|
|
elif echo "${doc}" | grep -iq "customer"; then
|
|
category="support"
|
|
elif echo "${doc}" | grep -iq "security"; then
|
|
category="security"
|
|
priority="high"
|
|
elif echo "${doc}" | grep -iq "report\|financial"; then
|
|
category="reports"
|
|
fi
|
|
|
|
mysql_exec "INSERT INTO ${TEST_SCHEMA}.${TEST_TABLE} (title, content, category, priority) \
|
|
VALUES ('$(escape_sql "${title}")', '$(escape_sql "${doc}")', '$(escape_sql "${category}")', '$(escape_sql "${priority}")');" 2>/dev/null || true
|
|
done
|
|
|
|
log_info "Test data setup complete (10 documents inserted)"
|
|
}
|
|
|
|
teardown_test_schema() {
|
|
if [ "${SKIP_CLEANUP}" = "true" ]; then
|
|
log_info "Skipping cleanup (--skip-cleanup specified)"
|
|
return 0
|
|
fi
|
|
|
|
log_info "Cleaning up test schema..."
|
|
|
|
# Drop FTS index if exists
|
|
fts_tool_call "fts_delete_index" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"${TEST_TABLE}\"}" >/dev/null
|
|
|
|
# Drop test table and schema
|
|
mysql_exec "DROP TABLE IF EXISTS ${TEST_SCHEMA}.${TEST_SCHEMA}__${TEST_TABLE};" 2>/dev/null || true
|
|
mysql_exec "DROP TABLE IF EXISTS ${TEST_SCHEMA}.${TEST_TABLE};" 2>/dev/null || true
|
|
mysql_exec "DROP SCHEMA IF EXISTS ${TEST_SCHEMA};" 2>/dev/null || true
|
|
|
|
log_info "Cleanup complete"
|
|
}
|
|
|
|
# ============================================================================
|
|
# TEST FUNCTIONS
|
|
# ============================================================================
|
|
|
|
# Run a test
|
|
run_test() {
|
|
local test_name="$1"
|
|
local test_func="$2"
|
|
|
|
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
|
TEST_NAMES+=("${test_name}")
|
|
|
|
log_test "${test_name}"
|
|
|
|
local output
|
|
local result
|
|
if output=$(${test_func} 2>&1); then
|
|
result="PASS"
|
|
PASSED_TESTS=$((PASSED_TESTS + 1))
|
|
log_info " ✓ ${test_name}"
|
|
else
|
|
result="FAIL"
|
|
FAILED_TESTS=$((FAILED_TESTS + 1))
|
|
log_error " ✗ ${test_name}"
|
|
if [ "${VERBOSE}" = "true" ]; then
|
|
echo " Output: ${output}"
|
|
fi
|
|
fi
|
|
|
|
TEST_RESULTS+=("${result}")
|
|
|
|
return 0
|
|
}
|
|
|
|
# ============================================================================
|
|
# FTS TOOL TESTS
|
|
# ============================================================================
|
|
|
|
# Test 1: fts_list_indexes (initially empty)
|
|
test_fts_list_indexes_initial() {
|
|
local response
|
|
response=$(fts_tool_call "fts_list_indexes" "{}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_list_indexes failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Check that indexes array exists (should be empty)
|
|
local index_count
|
|
index_count=$(extract_inner_field "${response}" ".indexes | length")
|
|
log_verbose "Initial index count: ${index_count}"
|
|
|
|
log_info " Initial indexes listed successfully"
|
|
return 0
|
|
}
|
|
|
|
# Test 2: fts_index_table
|
|
test_fts_index_table() {
|
|
local response
|
|
response=$(fts_tool_call "fts_index_table" \
|
|
"{\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"columns\": [\"title\", \"content\", \"category\", \"priority\"], \
|
|
\"primary_key\": \"id\"}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_index_table failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Verify row count
|
|
local row_count
|
|
row_count=$(extract_inner_field "${response}" ".row_count")
|
|
if [ "${row_count}" -lt 10 ]; then
|
|
log_error "Expected at least 10 rows indexed, got: ${row_count}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Index created with ${row_count} rows"
|
|
return 0
|
|
}
|
|
|
|
# Test 3: fts_list_indexes (after index creation)
|
|
test_fts_list_indexes_after_creation() {
|
|
local response
|
|
response=$(fts_tool_call "fts_list_indexes" "{}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_list_indexes failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Verify index exists - search for our specific index
|
|
local index_count
|
|
index_count=$(extract_inner_field "${response}" ".indexes | length")
|
|
if [ "${index_count}" -lt 1 ]; then
|
|
log_error "Expected at least 1 index, got: ${index_count}"
|
|
return 1
|
|
fi
|
|
|
|
# Find the test_documents index
|
|
local found=false
|
|
local i=0
|
|
while [ $i -lt ${index_count} ]; do
|
|
local schema
|
|
local table
|
|
schema=$(extract_inner_field "${response}" ".indexes[$i].schema")
|
|
table=$(extract_inner_field "${response}" ".indexes[$i].table")
|
|
|
|
if [ "${schema}" = "${TEST_SCHEMA}" ] && [ "${table}" = "${TEST_TABLE}" ]; then
|
|
found=true
|
|
break
|
|
fi
|
|
i=$((i + 1))
|
|
done
|
|
|
|
if [ "${found}" != "true" ]; then
|
|
log_error "test_documents index not found in index list"
|
|
return 1
|
|
fi
|
|
|
|
log_info " test_documents index found in index list"
|
|
return 0
|
|
}
|
|
|
|
# Test 4: fts_search (simple query)
|
|
test_fts_search_simple() {
|
|
local query="urgent"
|
|
local response
|
|
response=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"${query}\", \
|
|
\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"limit\": 10}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_search failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Check results
|
|
local total_matches
|
|
local result_count
|
|
total_matches=$(extract_json_field "${response}" ".total_matches")
|
|
result_count=$(extract_json_field "${response}" ".results | length")
|
|
|
|
if [ "${total_matches}" -lt 1 ]; then
|
|
log_error "Expected at least 1 match for '${query}', got: ${total_matches}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Search '${query}': ${total_matches} total matches, ${result_count} returned"
|
|
return 0
|
|
}
|
|
|
|
# Test 5: fts_search (phrase query)
|
|
test_fts_search_phrase() {
|
|
local query="payment gateway"
|
|
local response
|
|
response=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"${query}\", \
|
|
\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"limit\": 10}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_search failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Check results
|
|
local total_matches
|
|
total_matches=$(extract_json_field "${response}" ".total_matches")
|
|
|
|
if [ "${total_matches}" -lt 1 ]; then
|
|
log_error "Expected at least 1 match for '${query}', got: ${total_matches}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Phrase search '${query}': ${total_matches} matches"
|
|
return 0
|
|
}
|
|
|
|
# Test 6: fts_search (cross-table - no schema filter)
|
|
test_fts_search_cross_table() {
|
|
local query="customer"
|
|
local response
|
|
response=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"${query}\", \
|
|
\"limit\": 10}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_search failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Check results
|
|
local total_matches
|
|
total_matches=$(extract_json_field "${response}" ".total_matches")
|
|
|
|
if [ "${total_matches}" -lt 1 ]; then
|
|
log_error "Expected at least 1 match for '${query}', got: ${total_matches}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Cross-table search '${query}': ${total_matches} matches"
|
|
return 0
|
|
}
|
|
|
|
# Test 7: fts_search (BM25 ranking test)
|
|
test_fts_search_bm25() {
|
|
local query="error issue"
|
|
local response
|
|
response=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"${query}\", \
|
|
\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"limit\": 5}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_search failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Check that results are ranked
|
|
local total_matches
|
|
total_matches=$(extract_json_field "${response}" ".total_matches")
|
|
|
|
log_info " BM25 ranking test for '${query}': ${total_matches} matches"
|
|
return 0
|
|
}
|
|
|
|
# Test 8: fts_search (pagination)
|
|
test_fts_search_pagination() {
|
|
local query="customer"
|
|
local limit=3
|
|
local offset=0
|
|
|
|
# First page
|
|
local response1
|
|
response1=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"${query}\", \
|
|
\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"limit\": ${limit}, \
|
|
\"offset\": ${offset}}")
|
|
|
|
# Second page
|
|
local response2
|
|
response2=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"${query}\", \
|
|
\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"limit\": ${limit}, \
|
|
\"offset\": $((limit + offset))}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response1}" ".success" "true" || \
|
|
! check_json_bool "${response2}" ".success" "true"; then
|
|
log_error "fts_search pagination failed"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Pagination test passed"
|
|
return 0
|
|
}
|
|
|
|
# Test 9: fts_search (empty query should fail)
|
|
test_fts_search_empty_query() {
|
|
local response
|
|
response=$(fts_tool_call "fts_search" "{\"query\": \"\"}")
|
|
|
|
# Should return error
|
|
if check_json_bool "${response}" ".success" "true"; then
|
|
log_error "Empty query should fail but succeeded"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Empty query correctly rejected"
|
|
return 0
|
|
}
|
|
|
|
# Test 10: fts_reindex (refresh existing index)
|
|
test_fts_reindex() {
|
|
# First, add a new document to MySQL
|
|
mysql_exec "INSERT INTO ${TEST_SCHEMA}.${TEST_TABLE} (title, content, category, priority) \
|
|
VALUES ('New Document', 'This is a new urgent document for testing reindex', 'support', 'high');" 2>/dev/null || true
|
|
|
|
# Reindex
|
|
local response
|
|
response=$(fts_tool_call "fts_reindex" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"${TEST_TABLE}\"}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_reindex failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Verify updated row count
|
|
local row_count
|
|
row_count=$(extract_json_field "${response}" ".row_count")
|
|
if [ "${row_count}" -lt 11 ]; then
|
|
log_error "Expected at least 11 rows after reindex, got: ${row_count}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Reindex successful with ${row_count} rows"
|
|
return 0
|
|
}
|
|
|
|
# Test 11: fts_delete_index
|
|
test_fts_delete_index() {
|
|
local response
|
|
response=$(fts_tool_call "fts_delete_index" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"${TEST_TABLE}\"}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_delete_index failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Verify index is deleted
|
|
local list_response
|
|
list_response=$(fts_tool_call "fts_list_indexes" "{}")
|
|
local index_count
|
|
index_count=$(extract_json_field "${list_response}" ".indexes | length")
|
|
|
|
# Filter out our index
|
|
local our_index_count
|
|
our_index_count=$(extract_json_field "${list_response}" \
|
|
".indexes[] | select(.schema==\"${TEST_SCHEMA}\" and .table==\"${TEST_TABLE}\") | length")
|
|
|
|
if [ "${our_index_count}" != "0" ] && [ "${our_index_count}" != "" ]; then
|
|
log_error "Index still exists after deletion"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Index deleted successfully"
|
|
return 0
|
|
}
|
|
|
|
# Test 12: fts_search after deletion (should fail gracefully)
|
|
test_fts_search_after_deletion() {
|
|
local response
|
|
response=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"urgent\", \
|
|
\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\"}")
|
|
|
|
# Should return no results (index doesn't exist)
|
|
local total_matches
|
|
total_matches=$(extract_inner_field "${response}" ".total_matches")
|
|
|
|
if [ "${total_matches}" != "0" ]; then
|
|
log_error "Expected 0 matches after index deletion, got: ${total_matches}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Search after deletion returns 0 matches (expected)"
|
|
return 0
|
|
}
|
|
|
|
# Test 13: fts_rebuild_all (no indexes)
|
|
test_fts_rebuild_all_empty() {
|
|
local response
|
|
response=$(fts_tool_call "fts_rebuild_all" "{}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_rebuild_all failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " fts_rebuild_all with no indexes succeeded"
|
|
return 0
|
|
}
|
|
|
|
# Test 14: fts_index_table with WHERE clause
|
|
test_fts_index_table_with_where() {
|
|
# First, create the index without WHERE clause
|
|
fts_tool_call "fts_index_table" \
|
|
"{\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"columns\": [\"title\", \"content\"], \
|
|
\"primary_key\": \"id\"}" >/dev/null
|
|
|
|
# Delete it
|
|
fts_tool_call "fts_delete_index" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"${TEST_TABLE}\"}" >/dev/null
|
|
|
|
# Now create with WHERE clause
|
|
local response
|
|
response=$(fts_tool_call "fts_index_table" \
|
|
"{\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"columns\": [\"title\", \"content\", \"priority\"], \
|
|
\"primary_key\": \"id\", \
|
|
\"where_clause\": \"priority = 'high'\"}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_index_table with WHERE clause failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# Verify row count (should be less than total)
|
|
local row_count
|
|
row_count=$(extract_json_field "${response}" ".row_count")
|
|
|
|
if [ "${row_count}" -lt 1 ]; then
|
|
log_error "Expected at least 1 row with WHERE clause, got: ${row_count}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Index with WHERE clause created: ${row_count} high-priority rows"
|
|
return 0
|
|
}
|
|
|
|
# Test 15: Multiple indexes
|
|
test_fts_multiple_indexes() {
|
|
# Create a second table
|
|
mysql_exec "CREATE TABLE IF NOT EXISTS ${TEST_SCHEMA}.logs (
|
|
id INT PRIMARY KEY AUTO_INCREMENT,
|
|
message TEXT,
|
|
level VARCHAR(20),
|
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
);" 2>/dev/null || true
|
|
|
|
mysql_exec "INSERT IGNORE INTO ${TEST_SCHEMA}.logs (message, level) VALUES \
|
|
('Error in module A', 'error'), \
|
|
('Warning in module B', 'warning'), \
|
|
('Info message', 'info');" 2>/dev/null || true
|
|
|
|
# Delete logs index if exists (cleanup from previous runs)
|
|
fts_tool_call "fts_delete_index" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"logs\"}" >/dev/null 2>&1
|
|
|
|
# Create index for logs table
|
|
local response
|
|
response=$(fts_tool_call "fts_index_table" \
|
|
"{\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"logs\", \
|
|
\"columns\": [\"message\", \"level\"], \
|
|
\"primary_key\": \"id\"}")
|
|
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "Failed to create second index: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
# List indexes
|
|
local list_response
|
|
list_response=$(fts_tool_call "fts_list_indexes" "{}")
|
|
local index_count
|
|
index_count=$(extract_inner_field "${list_response}" ".indexes | length")
|
|
|
|
if [ "${index_count}" -lt 2 ]; then
|
|
log_error "Expected at least 2 indexes, got: ${index_count}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Multiple indexes: ${index_count} indexes exist"
|
|
|
|
# Search across all tables
|
|
local search_response
|
|
search_response=$(fts_tool_call "fts_search" "{\"query\": \"error\", \"limit\": 10}")
|
|
local total_matches
|
|
total_matches=$(extract_inner_field "${search_response}" ".total_matches")
|
|
|
|
log_info " Cross-table search 'error': ${total_matches} matches across all indexes"
|
|
|
|
return 0
|
|
}
|
|
|
|
# Test 16: fts_rebuild_all (with indexes)
|
|
test_fts_rebuild_all_with_indexes() {
|
|
local response
|
|
response=$(fts_tool_call "fts_rebuild_all" "{}")
|
|
|
|
# Check for success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_rebuild_all failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
local rebuilt_count
|
|
rebuilt_count=$(extract_json_field "${response}" ".rebuilt_count")
|
|
|
|
if [ "${rebuilt_count}" -lt 1 ]; then
|
|
log_error "Expected at least 1 rebuilt index, got: ${rebuilt_count}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Rebuilt ${rebuilt_count} indexes"
|
|
return 0
|
|
}
|
|
|
|
# Test 17: Index already exists error handling
|
|
test_fts_index_already_exists() {
|
|
local response
|
|
response=$(fts_tool_call "fts_index_table" \
|
|
"{\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"columns\": [\"title\", \"content\"], \
|
|
\"primary_key\": \"id\"}")
|
|
|
|
# Should fail with "already exists" error
|
|
if check_json_bool "${response}" ".success" "true"; then
|
|
log_error "Creating duplicate index should fail but succeeded"
|
|
return 1
|
|
fi
|
|
|
|
local error_msg
|
|
error_msg=$(extract_inner_field "${response}" ".error")
|
|
|
|
if ! echo "${error_msg}" | grep -iq "already exists"; then
|
|
log_error "Expected 'already exists' error, got: ${error_msg}"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Duplicate index correctly rejected"
|
|
return 0
|
|
}
|
|
|
|
# Test 18: Delete non-existent index
|
|
test_fts_delete_nonexistent_index() {
|
|
# First delete the index
|
|
fts_tool_call "fts_delete_index" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"${TEST_TABLE}\"}" >/dev/null
|
|
|
|
# Try to delete again
|
|
local response
|
|
response=$(fts_tool_call "fts_delete_index" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"${TEST_TABLE}\"}")
|
|
|
|
# Should fail gracefully
|
|
if check_json_bool "${response}" ".success" "true"; then
|
|
log_error "Deleting non-existent index should fail but succeeded"
|
|
return 1
|
|
fi
|
|
|
|
log_info " Non-existent index deletion correctly failed"
|
|
return 0
|
|
}
|
|
|
|
# Test 19: Complex search with special characters
|
|
test_fts_search_special_chars() {
|
|
# Create a document with special characters
|
|
mysql_exec "INSERT INTO ${TEST_SCHEMA}.${TEST_TABLE} (title, content, category, priority) \
|
|
VALUES ('Special Chars', 'Test with @ # $ % ^ & * ( ) - _ = + [ ] { } | \\ : ; \" \" < > ? / ~', 'test', 'normal');" 2>/dev/null || true
|
|
|
|
# Reindex
|
|
fts_tool_call "fts_reindex" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"${TEST_TABLE}\"}" >/dev/null
|
|
|
|
# Search for "special"
|
|
local response
|
|
response=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"special\", \
|
|
\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"limit\": 10}")
|
|
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "Search with special chars failed: ${response}"
|
|
return 1
|
|
fi
|
|
|
|
local total_matches
|
|
total_matches=$(extract_json_field "${response}" ".total_matches")
|
|
|
|
log_info " Special characters search: ${total_matches} matches"
|
|
return 0
|
|
}
|
|
|
|
# Test 20: Verify FTS5 features (snippet highlighting)
|
|
test_fts_snippet_highlighting() {
|
|
local response
|
|
response=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"urgent\", \
|
|
\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}\", \
|
|
\"limit\": 3}")
|
|
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "fts_search for snippet test failed"
|
|
return 1
|
|
fi
|
|
|
|
# Check if snippet is present in results
|
|
local has_snippet
|
|
if command -v jq >/dev/null 2>&1; then
|
|
has_snippet=$(echo "${response}" | jq -r '.results[0].snippet // empty' | grep -c "mark" || echo "0")
|
|
else
|
|
has_snippet=$(echo "${response}" | grep -o "mark" | wc -l)
|
|
fi
|
|
|
|
if [ "${has_snippet}" -lt 1 ]; then
|
|
log_warn "No snippet highlighting found (may be expected if no matches)"
|
|
else
|
|
log_info " Snippet highlighting present: <mark> tags found"
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Test 21: Test custom FTS database path configuration
|
|
test_fts_custom_database_path() {
|
|
log_test "Testing custom FTS database path configuration..."
|
|
|
|
# Note: This test verifies that mcp_fts_path changes are properly applied
|
|
# via the admin interface with LOAD MCP VARIABLES TO RUNTIME.
|
|
# This specifically tests the bug fix in Admin_FlushVariables.cpp
|
|
|
|
local custom_path="/tmp/test_fts_$$.db"
|
|
|
|
# Remove old test file if exists
|
|
rm -f "${custom_path}"
|
|
|
|
# Verify we can query the current FTS path setting
|
|
local current_path
|
|
current_path=$(MYSQL_PWD="${MYSQL_PASSWORD}" mysql -h "${MYSQL_HOST}" -P "${MYSQL_PORT}" -u "${MYSQL_USER}" \
|
|
-e "SELECT @@mcp-fts_path" -s -N 2>/dev/null | tr -d '\r')
|
|
|
|
if [ -z "${current_path}" ]; then
|
|
log_warn "Could not query current FTS path - admin interface may not be available"
|
|
current_path="mcp_fts.db" # Default value
|
|
fi
|
|
|
|
log_verbose "Current FTS database path: ${current_path}"
|
|
|
|
# Test 1: Verify we can set a custom path via admin interface
|
|
log_verbose "Setting custom FTS path to: ${custom_path}"
|
|
local set_result
|
|
set_result=$(MYSQL_PWD="${MYSQL_PASSWORD}" mysql -h "${MYSQL_HOST}" -P "${MYSQL_PORT}" -u "${MYSQL_USER}" \
|
|
-e "SET mcp-fts_path = '${custom_path}'" 2>&1)
|
|
|
|
if [ $? -ne 0 ]; then
|
|
log_warn "Could not set mcp-fts_path via admin interface (this may be expected if admin access is limited)"
|
|
log_warn "Error: ${set_result}"
|
|
log_info " FTS system is working with current configuration"
|
|
log_info " Note: Custom path configuration requires admin interface access"
|
|
return 0 # Not a failure - FTS still works, just can't test admin config
|
|
fi
|
|
|
|
# Verify the value was set
|
|
local new_path
|
|
new_path=$(MYSQL_PWD="${MYSQL_PASSWORD}" mysql -h "${MYSQL_HOST}" -P "${MYSQL_PORT}" -u "${MYSQL_USER}" \
|
|
-e "SELECT @@mcp-fts_path" -s -N 2>/dev/null | tr -d '\r')
|
|
|
|
if [ "${new_path}" != "${custom_path}" ]; then
|
|
log_error "Failed to set mcp_fts_path. Expected '${custom_path}', got '${new_path}'"
|
|
return 1
|
|
fi
|
|
|
|
# Test 2: Load configuration to runtime - this is where the bug was
|
|
log_verbose "Loading MCP variables to runtime..."
|
|
local load_result
|
|
load_result=$(MYSQL_PWD="${MYSQL_PASSWORD}" mysql -h "${MYSQL_HOST}" -P "${MYSQL_PORT}" -u "${MYSQL_USER}" \
|
|
-e "LOAD MCP VARIABLES TO RUNTIME" 2>&1)
|
|
|
|
if [ $? -ne 0 ]; then
|
|
log_error "LOAD MCP VARIABLES TO RUNTIME failed: ${load_result}"
|
|
return 1
|
|
fi
|
|
|
|
# Give the system a moment to reinitialize
|
|
sleep 2
|
|
|
|
# Test 3: Create a test index with the new path
|
|
log_verbose "Creating FTS index to test new database path..."
|
|
local response
|
|
response=$(fts_tool_call "fts_index_table" \
|
|
"{\"schema\": \"${TEST_SCHEMA}\", \
|
|
\"table\": \"${TEST_TABLE}_path_test\", \
|
|
\"columns\": [\"title\", \"content\"], \
|
|
\"primary_key\": \"id\"}")
|
|
|
|
if [ "${VERBOSE}" = "true" ]; then
|
|
echo "Index creation response: ${response}" >&2
|
|
fi
|
|
|
|
# Verify success
|
|
if ! check_json_bool "${response}" ".success" "true"; then
|
|
log_error "Index creation failed with new path: ${response}"
|
|
# This might not be an error - the path change may require full MCP restart
|
|
log_warn "FTS index creation may require MCP server restart for path changes"
|
|
fi
|
|
|
|
# Test 4: Verify the database file was created at the custom path
|
|
if [ -f "${custom_path}" ]; then
|
|
log_info " ✓ FTS database file created at custom path: ${custom_path}"
|
|
log_info " ✓ Configuration reload mechanism is working correctly"
|
|
else
|
|
log_warn " ⚠ FTS database file not found at ${custom_path}"
|
|
log_info " Note: FTS path changes may require full ProxySQL restart in some configurations"
|
|
# This is not a failure - different configurations handle path changes differently
|
|
fi
|
|
|
|
# Test 5: Verify search functionality still works
|
|
log_verbose "Testing search functionality with new configuration..."
|
|
local search_response
|
|
search_response=$(fts_tool_call "fts_search" \
|
|
"{\"query\": \"test\", \
|
|
\"limit\": 1}")
|
|
|
|
if [ "${VERBOSE}" = "true" ]; then
|
|
echo "Search response: ${search_response}" >&2
|
|
fi
|
|
|
|
if check_json_bool "${search_response}" ".success" "true"; then
|
|
log_info " ✓ FTS search functionality working after configuration reload"
|
|
else
|
|
log_warn " ⚠ Search may have issues: ${search_response}"
|
|
fi
|
|
|
|
# Test 6: Restore original path
|
|
log_verbose "Restoring original FTS path: ${current_path}"
|
|
MYSQL_PWD="${MYSQL_PASSWORD}" mysql -h "${MYSQL_HOST}" -P "${MYSQL_PORT}" -u "${MYSQL_USER}" \
|
|
-e "SET mcp-fts_path = '${current_path}'" 2>/dev/null
|
|
MYSQL_PWD="${MYSQL_PASSWORD}" mysql -h "${MYSQL_HOST}" -P "${MYSQL_PORT}" -u "${MYSQL_USER}" \
|
|
-e "LOAD MCP VARIABLES TO RUNTIME" 2>/dev/null
|
|
|
|
log_info " FTS custom path configuration test completed"
|
|
|
|
# Cleanup
|
|
log_verbose "Cleaning up test index and database file..."
|
|
fts_tool_call "fts_delete_index" "{\"schema\": \"${TEST_SCHEMA}\", \"table\": \"${TEST_TABLE}_path_test\"}" >/dev/null 2>&1
|
|
rm -f "${custom_path}"
|
|
|
|
return 0
|
|
}
|
|
|
|
# ============================================================================
|
|
# TEST SUITE DEFINITION
|
|
# ============================================================================
|
|
|
|
declare -a TEST_SUITE=(
|
|
"test_fts_list_indexes_initial"
|
|
"test_fts_index_table"
|
|
"test_fts_list_indexes_after_creation"
|
|
"test_fts_search_simple"
|
|
"test_fts_search_phrase"
|
|
"test_fts_search_cross_table"
|
|
"test_fts_search_bm25"
|
|
"test_fts_search_pagination"
|
|
"test_fts_search_empty_query"
|
|
"test_fts_reindex"
|
|
"test_fts_delete_index"
|
|
"test_fts_search_after_deletion"
|
|
"test_fts_rebuild_all_empty"
|
|
"test_fts_index_table_with_where"
|
|
"test_fts_multiple_indexes"
|
|
"test_fts_rebuild_all_with_indexes"
|
|
"test_fts_index_already_exists"
|
|
"test_fts_delete_nonexistent_index"
|
|
"test_fts_search_special_chars"
|
|
"test_fts_snippet_highlighting"
|
|
"test_fts_custom_database_path"
|
|
)
|
|
|
|
# ============================================================================
|
|
# RESULTS REPORTING
|
|
# ============================================================================
|
|
|
|
print_summary() {
|
|
echo ""
|
|
echo "========================================"
|
|
echo "Test Summary"
|
|
echo "========================================"
|
|
echo "Total tests: ${TOTAL_TESTS}"
|
|
echo -e "Passed: ${GREEN}${PASSED_TESTS}${NC}"
|
|
echo -e "Failed: ${RED}${FAILED_TESTS}${NC}"
|
|
echo "Skipped: ${SKIPPED_TESTS}"
|
|
echo ""
|
|
|
|
if [ ${FAILED_TESTS} -gt 0 ]; then
|
|
echo "Failed tests:"
|
|
for i in "${!TEST_NAMES[@]}"; do
|
|
if [ "${TEST_RESULTS[$i]}" = "FAIL" ]; then
|
|
echo " - ${TEST_NAMES[$i]}"
|
|
fi
|
|
done
|
|
echo ""
|
|
fi
|
|
|
|
if [ ${PASSED_TESTS} -eq ${TOTAL_TESTS} ]; then
|
|
echo -e "${GREEN}All tests passed!${NC}"
|
|
return 0
|
|
else
|
|
echo -e "${RED}Some tests failed!${NC}"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
print_test_info() {
|
|
echo ""
|
|
echo "========================================"
|
|
echo "MCP FTS Test Suite"
|
|
echo "========================================"
|
|
echo "MCP Endpoint: ${MCP_ENDPOINT}"
|
|
echo "Test Schema: ${TEST_SCHEMA}"
|
|
echo "Test Table: ${TEST_TABLE}"
|
|
echo "MySQL Backend: ${MYSQL_HOST}:${MYSQL_PORT}"
|
|
echo ""
|
|
echo "Test Configuration:"
|
|
echo " - Verbose: ${VERBOSE}"
|
|
echo " - Skip Cleanup: ${SKIP_CLEANUP}"
|
|
echo ""
|
|
}
|
|
|
|
# ============================================================================
|
|
# PARSE ARGUMENTS
|
|
# ============================================================================
|
|
|
|
parse_args() {
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
-v|--verbose)
|
|
VERBOSE=true
|
|
shift
|
|
;;
|
|
-q|--quiet)
|
|
QUIET=true
|
|
shift
|
|
;;
|
|
--skip-cleanup)
|
|
SKIP_CLEANUP=true
|
|
shift
|
|
;;
|
|
--test-schema)
|
|
TEST_SCHEMA="$2"
|
|
shift 2
|
|
;;
|
|
--test-table)
|
|
TEST_TABLE="$2"
|
|
shift 2
|
|
;;
|
|
-h|--help)
|
|
cat <<EOF
|
|
Usage: $0 [options]
|
|
|
|
Comprehensive test suite for MCP FTS (Full Text Search) tools.
|
|
|
|
Options:
|
|
-v, --verbose Show verbose output (curl requests/responses)
|
|
-q, --quiet Suppress progress messages
|
|
--skip-cleanup Don't delete test data/indexes after testing
|
|
--test-schema SCHEMA Schema to use for testing (default: test_fts)
|
|
--test-table TABLE Table to use for testing (default: test_documents)
|
|
-h, --help Show this help
|
|
|
|
Environment Variables:
|
|
MCP_HOST MCP server host (default: 127.0.0.1)
|
|
MCP_PORT MCP server port (default: 6071)
|
|
MYSQL_HOST MySQL backend host (default: 127.0.0.1)
|
|
MYSQL_PORT MySQL backend port (default: 6033)
|
|
MYSQL_USER MySQL user (default: root)
|
|
MYSQL_PASSWORD MySQL password (default: root)
|
|
|
|
Tests Included:
|
|
1. fts_list_indexes (initial state)
|
|
2. fts_index_table (create index)
|
|
3. fts_list_indexes (after creation)
|
|
4. fts_search (simple query)
|
|
5. fts_search (phrase query)
|
|
6. fts_search (cross-table)
|
|
7. fts_search (BM25 ranking)
|
|
8. fts_search (pagination)
|
|
9. fts_search (empty query validation)
|
|
10. fts_reindex (refresh index)
|
|
11. fts_delete_index
|
|
12. fts_search (after deletion)
|
|
13. fts_rebuild_all (empty)
|
|
14. fts_index_table (with WHERE clause)
|
|
15. Multiple indexes
|
|
16. fts_rebuild_all (with indexes)
|
|
17. Duplicate index error handling
|
|
18. Non-existent index deletion
|
|
19. Special characters handling
|
|
20. Snippet highlighting
|
|
21. Custom database path configuration
|
|
|
|
Examples:
|
|
# Run all tests
|
|
$0
|
|
|
|
# Run with verbose output
|
|
$0 -v
|
|
|
|
# Skip cleanup to inspect test data
|
|
$0 --skip-cleanup
|
|
|
|
# Use custom test schema
|
|
$0 --test-schema my_fts_tests
|
|
|
|
EOF
|
|
exit 0
|
|
;;
|
|
*)
|
|
echo "Unknown option: $1"
|
|
echo "Use --help for usage information"
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
}
|
|
|
|
# ============================================================================
|
|
# MAIN EXECUTION
|
|
# ============================================================================
|
|
|
|
main() {
|
|
parse_args "$@"
|
|
|
|
print_test_info
|
|
|
|
# Pre-flight checks
|
|
log_section "Pre-flight Checks"
|
|
|
|
if ! check_mcp_server; then
|
|
log_error "MCP server check failed. Exiting."
|
|
exit 1
|
|
fi
|
|
|
|
if ! mysql_check_connection; then
|
|
log_error "MySQL connection check failed. Exiting."
|
|
exit 1
|
|
fi
|
|
|
|
# Setup
|
|
log_section "Test Setup"
|
|
setup_test_schema
|
|
|
|
# Run tests
|
|
log_section "Running Tests"
|
|
|
|
for test_func in "${TEST_SUITE[@]}"; do
|
|
run_test "${test_func}" "${test_func}"
|
|
done
|
|
|
|
# Cleanup
|
|
log_section "Cleanup"
|
|
teardown_test_schema
|
|
|
|
# Summary
|
|
print_summary
|
|
|
|
# Exit with appropriate code
|
|
if [ ${FAILED_TESTS} -gt 0 ]; then
|
|
exit 1
|
|
fi
|
|
exit 0
|
|
}
|
|
|
|
# Run main
|
|
main "$@"
|