sinc-prompt Integrationsv1.0

Use sinc-prompt with LangChain, Python, JavaScript, Claude Code (MCP), and the CLI. Code examples for every platform.

sinc-LLM Integrations: LangChain, Python, Claude Code, CLI, JavaScript

IntegrationMethodInstall
Python LangChainSincPromptTemplatepip install sinc-llm langchain
Python Raw APIAnthropic / OpenAI SDKpip install sinc-llm anthropic
MCP Claude CodeMCP Serverpip install sinc-llm
CLI Terminalsinc-llm CLIpip install sinc-llm
JS JavaScriptfetch APINo install (browser/Node.js)

1. LangChain Integration

Build a custom PromptTemplate that enforces sinc structure with all 6 bands.

from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompt_values import StringPromptValue
from sinc_llm import compute_snr
import json


class SincPromptTemplate(BasePromptTemplate):
    """LangChain PromptTemplate that enforces sinc-prompt structure."""

    input_variables: list[str] = [
        "persona", "context", "data", "constraints", "format_spec", "task"
    ]

    def format_prompt(self, **kwargs) -> StringPromptValue:
        sinc_json = {
            "formula": "x(t) = Sigma x(nT) * sinc((t - nT) / T)",
            "T": "specification-axis",
            "fragments": [
                {"n": 0, "t": "PERSONA",     "x": kwargs["persona"]},
                {"n": 1, "t": "CONTEXT",     "x": kwargs["context"]},
                {"n": 2, "t": "DATA",        "x": kwargs["data"]},
                {"n": 3, "t": "CONSTRAINTS", "x": kwargs["constraints"]},
                {"n": 4, "t": "FORMAT",      "x": kwargs["format_spec"]},
                {"n": 5, "t": "TASK",        "x": kwargs["task"]},
            ]
        }
        snr = compute_snr(sinc_json["fragments"])
        if snr < 0.65:
            raise ValueError(
                f"SNR {snr:.4f} below minimum 0.65. "
                f"Add more CONSTRAINTS content (42.7% of quality)."
            )
        return StringPromptValue(text=json.dumps(sinc_json))

    def format(self, **kwargs) -> str:
        return self.format_prompt(**kwargs).text


# Usage
template = SincPromptTemplate()
prompt = template.format(
    persona="You are a senior Python engineer with 10 years of Django experience.",
    context="We are migrating a Django 3.2 monolith to Django 5.0. The codebase has 340 models across 28 apps. CI runs 4,200 tests in 18 minutes.",
    data="Current test pass rate: 94.2%. Blocked migrations: 12. Deprecated API calls: 847. Python 3.9 to 3.12 upgrade completed last sprint.",
    constraints="State facts directly -- never hedge. Use exact file paths for every code reference. Never suggest changes to models without showing the migration. Always specify which Django version introduced the API change. Test every suggestion against the 4,200-test suite mentally. Never combine unrelated changes in one migration file. Always preserve backward compatibility with the data layer.",
    format_spec="Lead with a priority-ranked table of migration blockers. Then one section per blocker with: root cause, fix (with code diff), migration command, and rollback command. No trailing summary.",
    task="Produce a migration plan for the 12 blocked migrations, ranked by dependency order. Include exact commands to run each migration and verify success."
)

# Use with any LangChain LLM
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-sonnet-4-20250514")
response = llm.invoke(prompt)

2. Python Raw API Call

Send a sinc-prompt directly to the Anthropic API using the SDK.

import anthropic
import json
from sinc_llm import scatter, compute_snr

client = anthropic.Anthropic()

# Option A: Build manually
sinc_prompt = {
    "formula": "x(t) = Sigma x(nT) * sinc((t - nT) / T)",
    "T": "specification-axis",
    "fragments": [
        {"n": 0, "t": "PERSONA",     "x": "You are a database performance engineer."},
        {"n": 1, "t": "CONTEXT",     "x": "PostgreSQL 16.2, 48 cores, 256GB RAM, 2TB NVMe. Production database serving 12,000 req/s."},
        {"n": 2, "t": "DATA",        "x": "Slow query log shows 3 queries >500ms. Table sizes: orders=240M rows, users=8.2M rows, products=1.1M rows."},
        {"n": 3, "t": "CONSTRAINTS", "x": "Never suggest EXPLAIN without ANALYZE. Always include index creation cost estimate. Never recommend dropping an index without showing dependent queries. State exact row counts. Use pg_stat_statements data. Never hedge."},
        {"n": 4, "t": "FORMAT",      "x": "Table: Query | Current ms | Target ms | Fix. Then detailed sections per query with EXPLAIN ANALYZE output interpretation."},
        {"n": 5, "t": "TASK",        "x": "Optimize the 3 slow queries to under 50ms each. Provide CREATE INDEX statements and expected improvement percentages."}
    ]
}

# Compute SNR before sending
snr = compute_snr(sinc_prompt["fragments"])
print(f"SNR: {snr:.4f}")  # Should be >= 0.70

# Send as system prompt
response = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=4096,
    system=json.dumps(sinc_prompt),
    messages=[{"role": "user", "content": "Execute the task defined in the sinc-prompt."}]
)
print(response.content[0].text)


# Option B: Auto-scatter a raw prompt
raw = "You are a DB expert. Fix our slow PostgreSQL queries. Be specific."
sinc_prompt = scatter(raw)
snr = compute_snr(sinc_prompt["fragments"])
print(f"Auto-scattered SNR: {snr:.4f}")

3. Claude Code (MCP Server)

The sinc-llm package ships with a built-in MCP server. Configure it in Claude Code:

3.1 Add to MCP Configuration

# .claude/mcp.json
{
  "mcpServers": {
    "sinc-tools": {
      "command": "python",
      "args": ["-m", "sinc_llm.mcp_server"],
      "env": {}
    }
  }
}

3.2 Available MCP Tools

ToolInputOutput
sinc_scatter Raw prompt string sinc-prompt JSON with 6 bands
sinc_validate sinc-prompt JSON Validation result + SNR score
sinc_compute_snr sinc-prompt JSON SNR score + zone function breakdown

3.3 Usage in Claude Code

# Claude Code will auto-discover the tools. Ask it:
"Use sinc_scatter to structure this prompt: 'Analyze our React app for performance issues'"

# Or validate existing prompts:
"Use sinc_validate to check this prompt JSON: {...}"

See the MCP Developer Guide for building custom MCP servers with sinc-prompt validation.

4. CLI (Command Line)

The sinc-llm CLI provides terminal access to scatter, validate, and compute SNR.

4.1 Install

pip install sinc-llm

4.2 Commands

# Auto-scatter a raw prompt into sinc format
sinc-llm scatter "You are a security auditor. Review this code for vulnerabilities."

# Validate a .sinc.json file
sinc-llm validate prompt.sinc.json

# Compute SNR for a sinc-prompt file
sinc-llm snr prompt.sinc.json

# Scatter and save to file
sinc-llm scatter "Your raw prompt here" --output task.sinc.json

# Pipe from stdin
echo "Review this PR for bugs" | sinc-llm scatter --stdin

# Validate with verbose zone function output
sinc-llm validate prompt.sinc.json --verbose

4.3 Example Output

$ sinc-llm validate my-prompt.sinc.json

  Nyquist completeness: 6/6 PASS
  SNR: 0.8234 (EXCELLENT)

  Band Analysis:
    n=0 PERSONA     [OK]   24 tokens
    n=1 CONTEXT     [OK]   67 tokens
    n=2 DATA        [OK]   41 tokens
    n=3 CONSTRAINTS [OK]  118 tokens  (longest, correct)
    n=4 FORMAT      [OK]   52 tokens
    n=5 TASK        [OK]   19 tokens

  Zone Functions:
    G(Z1) = 1.0000
    H(Z2) = 0.9847
    R(Z3) = 1.0000
    G(Z4) = 1.0000

5. JavaScript (fetch API)

Use sinc-prompt from any JavaScript environment — browser, Node.js, Deno, or Bun. No dependencies required.

// Build a sinc-prompt
const sincPrompt = {
  formula: "x(t) = Sigma x(nT) * sinc((t - nT) / T)",
  T: "specification-axis",
  fragments: [
    { n: 0, t: "PERSONA",     x: "You are a frontend performance engineer." },
    { n: 1, t: "CONTEXT",     x: "React 19 app with 847 components. Lighthouse score: 34. LCP: 8.2s. CLS: 0.42." },
    { n: 2, t: "DATA",        x: "Bundle size: 2.4MB (gzipped: 680KB). Largest chunks: vendor.js (1.1MB), app.js (890KB). 23 render-blocking resources." },
    { n: 3, t: "CONSTRAINTS", x: "Never suggest changes without measuring impact. Use exact byte counts. Always show before/after bundle sizes. Never recommend lazy loading without specifying the exact component path. State Lighthouse score impact for each fix. Never combine unrelated optimizations. Always verify fixes against Core Web Vitals thresholds: LCP < 2.5s, FID < 100ms, CLS < 0.1." },
    { n: 4, t: "FORMAT",      x: "Priority-ranked table: Fix | Impact | Effort | LCP delta. Then one section per fix with code diff and expected metric change." },
    { n: 5, t: "TASK",        x: "Produce a performance optimization plan to reach Lighthouse score 90+. Rank fixes by LCP impact." }
  ]
};

// Validate locally (Nyquist check)
function validateSinc(prompt) {
  const bands = new Set(prompt.fragments.map(f => f.n));
  const missing = [0,1,2,3,4,5].filter(n => !bands.has(n));
  if (missing.length > 0) {
    const names = { 0:"PERSONA", 1:"CONTEXT", 2:"DATA", 3:"CONSTRAINTS", 4:"FORMAT", 5:"TASK" };
    throw new Error(`Missing bands: ${missing.map(n => names[n]).join(", ")}`);
  }
  return true;
}

// Send to Anthropic API
async function sendSincPrompt(sincPrompt) {
  validateSinc(sincPrompt);

  const response = await fetch("https://api.anthropic.com/v1/messages", {
    method: "POST",
    headers: {
      "Content-Type": "application/json",
      "x-api-key": process.env.ANTHROPIC_API_KEY,
      "anthropic-version": "2023-06-01"
    },
    body: JSON.stringify({
      model: "claude-sonnet-4-20250514",
      max_tokens: 4096,
      system: JSON.stringify(sincPrompt),
      messages: [{ role: "user", content: "Execute the task defined in the sinc-prompt." }]
    })
  });

  return response.json();
}

// Validate against remote schema
async function validateAgainstSchema(sincPrompt) {
  const schema = await fetch("https://tokencalc.pro/schema/sinc-prompt-v1.json").then(r => r.json());
  // Use Ajv or similar JSON Schema validator
  // npm install ajv
  const Ajv = require("ajv");
  const ajv = new Ajv();
  const validate = ajv.compile(schema);
  return validate(sincPrompt);
}

Resources