Model Context Protocol server for AI assistant integration with Bittensor validator analysis
The ValiScore MCP Server provides AI assistants with direct access to Bittensor validator analysis and scoring capabilities. It implements the Model Context Protocol (MCP) standard, enabling seamless integration with any MCP-compatible client.
git clone https://github.com/sonoran-softworks/valiscore.git
cd valiscore
pip install -e .
# Check if MCP server is available
valiscore-cli mcp-server --help
# Show MCP configuration
valiscore-cli mcp-config-show
# Start server with default settings
valiscore-cli mcp-server
# Start with specific network
valiscore-cli mcp-server --network finney
# Start with custom host and port
valiscore-cli mcp-server --host localhost --port 8000 --network finney --verbose
import asyncio
from mcp import ClientSession, StdioServerParameters
async def main():
async with ClientSession(
StdioServerParameters(
command="valiscore-cli",
args=["mcp-server"]
)
) as session:
# List available tools
tools = await session.list_tools()
print(f"Available tools: {[tool.name for tool in tools.tools]}")
# Analyze a subnet
result = await session.call_tool(
"analyze_subnet",
{"subnet_id": 1, "network": "finney"}
)
print(f"Analysis result: {result.content[0].text}")
asyncio.run(main())
Analyze a Bittensor subnet with comprehensive validator and performance data.
analyze_subnet(subnet_id: int, network: str = "finney") -> str
Parameter | Type | Required | Description |
---|---|---|---|
subnet_id | int | Yes | Bittensor subnet ID to analyze |
network | str | No | Network to connect to (finney, test, local) |
JSON string containing comprehensive subnet analysis
result = await session.call_tool("analyze_subnet", {
"subnet_id": 1,
"network": "finney"
})
print(result.content[0].text)
Compare multiple Bittensor subnets side-by-side.
compare_subnets(subnet_ids: List[int], network: str = "finney") -> str
Parameter | Type | Required | Description |
---|---|---|---|
subnet_ids | List[int] | Yes | List of subnet IDs to compare |
network | str | No | Network to connect to |
result = await session.call_tool("compare_subnets", {
"subnet_ids": [1, 2, 3],
"network": "finney"
})
Score multiple responses using BLEU, ROUGE, and weighted metrics.
score_responses(reference: str, responses: List[str], weights: Dict = None) -> str
Parameter | Type | Required | Description |
---|---|---|---|
reference | str | Yes | Reference text to compare against |
responses | List[str] | Yes | List of candidate responses to score |
weights | Dict | No | Custom weights for metrics |
result = await session.call_tool("score_responses", {
"reference": "The answer is 42",
"responses": ["The answer is 42", "It's 42", "42 is the answer"],
"weights": {"bleu": 0.4, "rouge1": 0.3, "rouge2": 0.2, "rougeL": 0.1}
})
Calculate Shapley values for fair contribution distribution.
calculate_shapley(contributions: List[float]) -> str
result = await session.call_tool("calculate_shapley", {
"contributions": [0.8, 0.6, 0.9]
})
Real-time monitoring of subnet activity and performance.
monitor_subnet(subnet_id: int, duration: int = 300, interval: int = 30, network: str = "finney") -> str
result = await session.call_tool("monitor_subnet", {
"subnet_id": 1,
"duration": 300, # 5 minutes
"interval": 30, # 30 seconds
"network": "finney"
})
Export analysis results to various formats.
export_analysis(subnet_id: int, format: str = "json", network: str = "finney") -> str
result = await session.call_tool("export_analysis", {
"subnet_id": 1,
"format": "json",
"network": "finney"
})
MCP resources provide queryable access to Bittensor data and analysis results.
subnet://{network}/{subnet_id}
Access subnet information, validator data, and performance metrics.
validator://{network}/{subnet_id}/{validator_uid}
Detailed information about specific validators.
analysis://{network}/{analysis_id}
Stored analysis results and reports.
# List available resources
resources = await session.list_resources()
print(f"Available resources: {[r.uri for r in resources.resources]}")
# Read a specific resource
content = await session.read_resource("subnet://finney/1")
print(content.text)
Variable | Default | Description |
---|---|---|
VALISCORE_MCP_HOST | localhost | MCP server host address |
VALISCORE_MCP_PORT | 8000 | MCP server port |
VALISCORE_DEFAULT_NETWORK | finney | Default Bittensor network |
VALISCORE_CONNECTION_TIMEOUT | 30 | Connection timeout in seconds |
VALISCORE_MAX_CONCURRENT | 10 | Maximum concurrent connections |
VALISCORE_LOG_LEVEL | INFO | Logging level |
# config.yaml
mcp_server:
host: localhost
port: 8000
network: finney
timeout: 30
max_concurrent: 10
log_level: INFO
bittensor:
default_network: finney
connection_timeout: 30
retry_attempts: 3
scoring:
default_weights:
bleu: 0.4
rouge1: 0.3
rouge2: 0.2
rougeL: 0.1
import { ClientSession, StdioServerParameters } from '@modelcontextprotocol/sdk/client/index.js';
async function main() {
const session = new ClientSession(
new StdioServerParameters({
command: 'valiscore-cli',
args: ['mcp-server']
})
);
await session.initialize();
try {
// Analyze a subnet
const result = await session.callTool('analyze_subnet', {
subnet_id: 1,
network: 'finney'
});
console.log('Analysis result:', result.content[0].text);
} finally {
await session.close();
}
}
main().catch(console.error);
use mcp::client::{ClientSession, StdioServerParameters};
#[tokio::main]
async fn main() -> Result<(), Box> {
let session = ClientSession::new(
StdioServerParameters::new("valiscore-cli", vec!["mcp-server"])
).await?;
// Analyze a subnet
let result = session.call_tool("analyze_subnet", serde_json::json!({
"subnet_id": 1,
"network": "finney"
})).await?;
println!("Analysis result: {}", result.content[0].text);
Ok(())
}
import asyncio
from mcp import ClientSession, StdioServerParameters
async def complete_workflow():
async with ClientSession(
StdioServerParameters(
command="valiscore-cli",
args=["mcp-server", "--network", "finney"]
)
) as session:
# 1. Analyze subnet
analysis = await session.call_tool("analyze_subnet", {
"subnet_id": 1
})
print("Subnet Analysis:", analysis.content[0].text)
# 2. Compare multiple subnets
comparison = await session.call_tool("compare_subnets", {
"subnet_ids": [1, 2, 3]
})
print("Subnet Comparison:", comparison.content[0].text)
# 3. Score some responses
scores = await session.call_tool("score_responses", {
"reference": "The answer is 42",
"responses": ["The answer is 42", "It's 42", "42 is the answer"]
})
print("Response Scores:", scores.content[0].text)
# 4. Calculate Shapley values
shapley = await session.call_tool("calculate_shapley", {
"contributions": [0.8, 0.6, 0.9]
})
print("Shapley Values:", shapley.content[0].text)
# 5. Export results
export = await session.call_tool("export_analysis", {
"subnet_id": 1,
"format": "json"
})
print("Export Result:", export.content[0].text)
asyncio.run(complete_workflow())
If you can't connect to the MCP server:
valiscore-cli mcp-server --help
If a tool is not available:
session.list_tools()
For slow response times:
# Start server in debug mode
valiscore-cli mcp-server --verbose --log-level DEBUG
# Check server logs
tail -f valiscore_mcp.log