https://github.com/stackql/stackql-provider-anthropic
generate stackql provider for Anthropic from openapi specs
https://github.com/stackql/stackql-provider-anthropic
anthropic stackql stackql-provider
Last synced: 3 months ago
JSON representation
generate stackql provider for Anthropic from openapi specs
- Host: GitHub
- URL: https://github.com/stackql/stackql-provider-anthropic
- Owner: stackql
- License: mit
- Created: 2025-09-18T10:04:36.000Z (4 months ago)
- Default Branch: main
- Last Pushed: 2025-09-18T10:28:10.000Z (4 months ago)
- Last Synced: 2025-09-18T12:39:36.762Z (4 months ago)
- Topics: anthropic, stackql, stackql-provider
- Language: JavaScript
- Homepage: https://anthropic-provider.stackql.io/
- Size: 291 KB
- Stars: 1
- Watchers: 0
- Forks: 0
- Open Issues: 0
-
Metadata Files:
- Readme: README.md
- License: LICENSE
Awesome Lists containing this project
README
# `anthropic` provider for [`stackql`](https://github.com/stackql/stackql)
This repository is used to generate and document the `anthropic` provider for StackQL, allowing you to query and interact with Anthropic's services using SQL-like syntax. The provider is built using the `@stackql/provider-utils` package, which provides tools for converting OpenAPI specifications into StackQL-compatible provider schemas.
## Prerequisites
To use the Anthropic provider with StackQL, you'll need:
1. An Anthropic account with appropriate API credentials
2. An Anthropic API key with sufficient permissions for the resources you want to access
3. StackQL CLI installed on your system (see [StackQL](https://github.com/stackql/stackql))
## 1. Create the Open API Specification
Since Anthropic doesn't currently provide an official OpenAPI specification, we need to create one based on their API documentation:
```bash
mkdir -p provider-dev/downloaded
# Create a JSON OpenAPI spec based on Anthropic's API documentation
cat > provider-dev/downloaded/anthropic-openapi.json << 'EOF'
{
"openapi": "3.0.0",
"info": {
"title": "Anthropic API",
"description": "Anthropic API for Claude and other AI models",
"version": "1.0.0"
},
"servers": [
{
"url": "https://api.anthropic.com"
}
],
"components": {
"securitySchemes": {
"ApiKeyAuth": {
"type": "apiKey",
"in": "header",
"name": "x-api-key"
},
"AnthropicVersionHeader": {
"type": "apiKey",
"in": "header",
"name": "anthropic-version"
}
}
},
"security": [
{
"ApiKeyAuth": [],
"AnthropicVersionHeader": []
}
],
"paths": {
"/v1/messages": {
"post": {
"operationId": "createMessage",
"summary": "Create a message",
"description": "Create a message and receive a response from Claude",
"tags": ["messages"],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"required": ["model", "messages"],
"properties": {
"model": {
"type": "string",
"description": "The model that will complete your prompt",
"example": "claude-3-opus-20240229"
},
"messages": {
"type": "array",
"description": "A list of messages comprising the conversation so far",
"items": {
"type": "object",
"required": ["role", "content"],
"properties": {
"role": {
"type": "string",
"enum": ["user", "assistant"],
"description": "The role of the message's author"
},
"content": {
"type": "string",
"description": "The content of the message"
}
}
}
},
"max_tokens": {
"type": "integer",
"description": "The maximum number of tokens to generate",
"default": 1024
},
"temperature": {
"type": "number",
"description": "Amount of randomness injected into the response",
"default": 1.0
},
"system": {
"type": "string",
"description": "System prompt to guide Claude's behavior"
},
"metadata": {
"type": "object",
"description": "An object containing metadata about the request"
},
"stream": {
"type": "boolean",
"description": "Whether to stream the response",
"default": false
}
}
}
}
}
},
"responses": {
"200": {
"description": "Message created successfully",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "The identifier for the message"
},
"type": {
"type": "string",
"description": "The type of object"
},
"role": {
"type": "string",
"description": "The role of the message author"
},
"content": {
"type": "array",
"description": "The content of the message",
"items": {
"type": "object",
"properties": {
"type": {
"type": "string",
"description": "The type of content"
},
"text": {
"type": "string",
"description": "The text content"
}
}
}
},
"model": {
"type": "string",
"description": "The model used"
},
"stop_reason": {
"type": "string",
"description": "The reason the model stopped generating"
},
"usage": {
"type": "object",
"description": "Usage statistics for the request",
"properties": {
"input_tokens": {
"type": "integer",
"description": "Number of tokens in the input"
},
"output_tokens": {
"type": "integer",
"description": "Number of tokens in the output"
}
}
}
}
}
}
}
}
}
}
},
"/v1/completions": {
"post": {
"operationId": "createCompletion",
"summary": "Create a completion",
"description": "Create a completion (legacy API)",
"tags": ["completions"],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"required": ["model", "prompt"],
"properties": {
"model": {
"type": "string",
"description": "The model that will complete your prompt"
},
"prompt": {
"type": "string",
"description": "The prompt to complete"
},
"max_tokens_to_sample": {
"type": "integer",
"description": "The maximum number of tokens to generate",
"default": 1024
},
"temperature": {
"type": "number",
"description": "Amount of randomness injected into the response",
"default": 1.0
},
"stop_sequences": {
"type": "array",
"description": "Sequences that will cause the model to stop generating",
"items": {
"type": "string"
}
},
"stream": {
"type": "boolean",
"description": "Whether to stream the response",
"default": false
}
}
}
}
}
},
"responses": {
"200": {
"description": "Completion created successfully",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"completion": {
"type": "string",
"description": "The completion text"
},
"model": {
"type": "string",
"description": "The model used"
},
"stop_reason": {
"type": "string",
"description": "The reason the model stopped generating"
}
}
}
}
}
}
}
}
},
"/v1/models": {
"get": {
"operationId": "listModels",
"summary": "List models",
"description": "List available models",
"tags": ["models"],
"responses": {
"200": {
"description": "List of available models",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"models": {
"type": "array",
"description": "List of available models",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Model identifier"
},
"description": {
"type": "string",
"description": "Model description"
},
"context_window": {
"type": "integer",
"description": "Context window size in tokens"
},
"max_output_tokens": {
"type": "integer",
"description": "Maximum tokens in the output"
}
}
}
}
}
}
}
}
}
}
}
}
}
}
EOF
```
## 2. Split into Service Specs
Next, split the OpenAPI specification into service-specific files:
```bash
rm -rf provider-dev/source/*
npm run split -- \
--provider-name anthropic \
--api-doc provider-dev/downloaded/anthropic-openapi.json \
--svc-discriminator tag \
--output-dir provider-dev/source \
--overwrite \
--svc-name-overrides "$(cat <