Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 108 additions & 0 deletions components/cometapi/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
# CometAPI Integration for Pipedream

This integration provides access to CometAPI's powerful AI models through Pipedream workflows. CometAPI offers access to various LLM models including GPT, Claude, Gemini, Grok, DeepSeek, and Qwen series.

## Features

- **Retrieve Available Models**: Get a list of all available models from CometAPI
- **Chat Completion**: Send conversational messages to AI models
- **Text Completion**: Generate text completions from prompts

## Authentication

To use this integration, you'll need a CometAPI API key:

1. Visit [CometAPI Console](https://api.cometapi.com/console/token)
2. Generate your API key
3. Add it to your Pipedream account in the CometAPI app configuration

## Supported Models

CometAPI provides access to state-of-the-art models including:

### GPT Series
- `gpt-5-chat-latest`
- `chatgpt-4o-latest`
- `gpt-5-mini`
- `gpt-4o-mini`

### Claude Series
- `claude-opus-4-1-20250805`
- `claude-sonnet-4-20250514`
- `claude-3-5-haiku-latest`

### Gemini Series
- `gemini-2.5-pro`
- `gemini-2.5-flash`
- `gemini-2.0-flash`

### Other Models
- Grok series (`grok-4-0709`, `grok-3`)
- DeepSeek series (`deepseek-v3.1`, `deepseek-chat`)
- Qwen series (`qwen3-30b-a3b`)

## Usage Examples

### Chat Completion
```javascript
// Example messages array for chat completion
[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is the capital of France?"
}
]
```

### Text Completion
```javascript
// Simple prompt for text completion
"Once upon a time in a land far away"
```

## Configuration Options

### Common Parameters
- **Model**: Choose from available CometAPI models
- **Max Tokens**: Maximum number of tokens to generate (default: varies by model)
- **Temperature**: Controls randomness (0.0 = deterministic, 2.0 = very random)
- **Top P**: Nucleus sampling parameter (0.0 to 1.0)
- **Top K**: Limits vocabulary to top K tokens
- **Frequency Penalty**: Reduces repetition of frequent tokens
- **Presence Penalty**: Encourages discussing new topics
- **Seed**: For deterministic outputs

### Advanced Parameters
- **Stop**: Array of stop sequences
- **Stream**: Enable streaming responses (default: false)
- **Repetition Penalty**: Additional repetition control

## Error Handling

The integration includes comprehensive error handling:
- Authentication errors with clear messages
- API rate limit handling
- Invalid parameter validation
- Network timeout protection (5-minute default)

## Rate Limits

Please refer to [CometAPI Pricing](https://api.cometapi.com/pricing) for current rate limits and usage policies.

## Support

For CometAPI-specific issues:
- [CometAPI Documentation](https://api.cometapi.com/doc)
- [CometAPI Website](https://www.cometapi.com/)

For Pipedream integration issues:
- [Pipedream Community](https://pipedream.com/community)
- [Pipedream Documentation](https://pipedream.com/docs)

## License

This integration follows Pipedream's component licensing terms.
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import cometapi from "../../cometapi.app.mjs";

export default {
key: "cometapi-retrieve-available-models",
name: "Retrieve Available Models",
version: "0.0.1",
description: "Returns a list of all models available through the CometAPI including GPT, Claude, Gemini, Grok, DeepSeek, and Qwen series. Use this to discover available models before making completion requests. [See the documentation](https://api.cometapi.com/doc)",
type: "action",
props: {
cometapi,
},
async run({ $ }) {
const response = await this.cometapi.listModels({
$,
});

$.export("$summary", `Successfully retrieved ${response.data?.length || 0} available model(s)!`);
return response;
},
};
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
import { ConfigurationError } from "@pipedream/platform";
import { parseObject } from "../../common/utils.mjs";
import cometapi from "../../cometapi.app.mjs";

export default {
key: "cometapi-send-chat-completion-request",
name: "Send Chat Completion Request",
version: "0.0.1",
description: "Send a chat completion request to any available CometAPI model. " +
"Perfect for conversational AI, Q&A systems, and interactive applications. " +
"Supports system messages, conversation history, and advanced parameters. " +
"[See the documentation](https://api.cometapi.com/doc)",
type: "action",
props: {
cometapi,
model: {
propDefinition: [
cometapi,
"model",
],
},
messages: {
type: "object[]",
label: "Messages",
description: "A list of message objects with 'role' and 'content' properties. " +
"Roles can be 'system', 'user', 'assistant', or 'function'. " +
"Example: {\"role\":\"user\",\"content\":\"Hello, how are you?\"}. " +
"[See docs](https://api.cometapi.com/doc).",
},
maxTokens: {
propDefinition: [
cometapi,
"maxTokens",
],
},
temperature: {
propDefinition: [
cometapi,
"temperature",
],
},
topP: {
propDefinition: [
cometapi,
"topP",
],
},
topK: {
propDefinition: [
cometapi,
"topK",
],
},
frequencyPenalty: {
propDefinition: [
cometapi,
"frequencyPenalty",
],
},
presencePenalty: {
propDefinition: [
cometapi,
"presencePenalty",
],
},
repetitionPenalty: {
propDefinition: [
cometapi,
"repetitionPenalty",
],
},
seed: {
propDefinition: [
cometapi,
"seed",
],
},
stop: {
propDefinition: [
cometapi,
"stop",
],
},
stream: {
propDefinition: [
cometapi,
"stream",
],
},
},
async run({ $ }) {
// Validate model is provided
if (!this.model) {
throw new ConfigurationError("Model is required");
}

// Validate and parse messages
const messages = parseObject(this.messages);

if (!Array.isArray(messages) || messages.length === 0) {
throw new ConfigurationError("Messages must be a non-empty array");
}

// Validate message format
for (const [
index,
message,
] of messages.entries()) {
if (!message.role || !message.content) {
throw new ConfigurationError(
`Message at index ${index} must have both 'role' and 'content' properties`,
);
}

if (![
"system",
"user",
"assistant",
"function",
].includes(message.role)) {
throw new ConfigurationError(
`Message at index ${index} has invalid role '${message.role}'. ` +
"Must be 'system', 'user', 'assistant', or 'function'",
);
}

if (typeof message.content !== "string" || message.content.trim() === "") {
throw new ConfigurationError(
`Message at index ${index} must have non-empty string content`,
);
}
}

// Normalize and validate numeric parameters
const toNum = (v) => (v === undefined || v === null || v === ""
? undefined
: Number(v));
const temperature = toNum(this.temperature);
const topP = toNum(this.topP);
const topK = toNum(this.topK);
const frequencyPenalty = toNum(this.frequencyPenalty);
const presencePenalty = toNum(this.presencePenalty);
const repetitionPenalty = toNum(this.repetitionPenalty);
const maxTokens = toNum(this.maxTokens);
const seed = toNum(this.seed);

// Validate numeric parameters
if (temperature !== undefined &&
(!Number.isFinite(temperature) || temperature < 0 || temperature > 2)) {
throw new ConfigurationError("Temperature must be a number between 0.0 and 2.0");
}
if (topP !== undefined &&
(!Number.isFinite(topP) || topP < 0 || topP > 1)) {
throw new ConfigurationError("Top P must be a number between 0.0 and 1.0");
}
if (frequencyPenalty !== undefined &&
(!Number.isFinite(frequencyPenalty) || frequencyPenalty < -2 || frequencyPenalty > 2)) {
throw new ConfigurationError(
"Frequency Penalty must be a number between -2.0 and 2.0",
);
}
if (presencePenalty !== undefined &&
(!Number.isFinite(presencePenalty) || presencePenalty < -2 || presencePenalty > 2)) {
throw new ConfigurationError(
"Presence Penalty must be a number between -2.0 and 2.0",
);
}
if (topK !== undefined &&
(!Number.isFinite(topK) || topK < 0)) {
throw new ConfigurationError("Top K must be a non-negative number");
}
if (maxTokens !== undefined &&
(!Number.isFinite(maxTokens) || maxTokens <= 0)) {
throw new ConfigurationError("Max Tokens must be a positive number");
}

const data = {
model: this.model,
messages,
stream: this.stream || false,
max_tokens: maxTokens,
temperature,
top_p: topP,
top_k: topK,
frequency_penalty: frequencyPenalty,
presence_penalty: presencePenalty,
repetition_penalty: repetitionPenalty,
seed,
stop: this.stop,
};

// Remove undefined values
Object.keys(data).forEach((key) => {
if (data[key] === undefined) {
delete data[key];
}
});

const response = await this.cometapi.sendChatCompletionRequest({
$,
data,
});

$.export("$summary", `Successfully sent chat completion request using model ${this.model}`);
return response;
},
};
Loading