From 215269b829e1c04b2249c741f40ca58f25fa4a12 Mon Sep 17 00:00:00 2001 From: examples-bot Date: Thu, 2 Apr 2026 11:17:23 +0000 Subject: [PATCH] =?UTF-8?q?feat(examples):=20add=20230=20=E2=80=94=20n8n?= =?UTF-8?q?=20community=20node=20for=20Deepgram=20(STT,=20TTS,=20Audio=20I?= =?UTF-8?q?ntelligence)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../.env.example | 2 + .../README.md | 79 ++++ .../package.json | 47 +++ .../credentials/DeepgramApi.credentials.ts | 41 ++ .../src/example-workflow.json | 94 +++++ .../src/nodes/Deepgram/Deepgram.node.json | 18 + .../src/nodes/Deepgram/Deepgram.node.ts | 377 ++++++++++++++++++ .../src/nodes/Deepgram/deepgram.svg | 4 + .../tests/test.js | 173 ++++++++ .../tsconfig.json | 30 ++ 10 files changed, 865 insertions(+) create mode 100644 examples/230-n8n-deepgram-community-node-typescript/.env.example create mode 100644 examples/230-n8n-deepgram-community-node-typescript/README.md create mode 100644 examples/230-n8n-deepgram-community-node-typescript/package.json create mode 100644 examples/230-n8n-deepgram-community-node-typescript/src/credentials/DeepgramApi.credentials.ts create mode 100644 examples/230-n8n-deepgram-community-node-typescript/src/example-workflow.json create mode 100644 examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/Deepgram.node.json create mode 100644 examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/Deepgram.node.ts create mode 100644 examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/deepgram.svg create mode 100644 examples/230-n8n-deepgram-community-node-typescript/tests/test.js create mode 100644 examples/230-n8n-deepgram-community-node-typescript/tsconfig.json diff --git a/examples/230-n8n-deepgram-community-node-typescript/.env.example b/examples/230-n8n-deepgram-community-node-typescript/.env.example new file mode 100644 index 0000000..99314a3 --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/.env.example @@ -0,0 +1,2 @@ +# Deepgram — https://console.deepgram.com/ +DEEPGRAM_API_KEY= diff --git a/examples/230-n8n-deepgram-community-node-typescript/README.md b/examples/230-n8n-deepgram-community-node-typescript/README.md new file mode 100644 index 0000000..74c16e8 --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/README.md @@ -0,0 +1,79 @@ +# n8n Community Nodes for Deepgram + +An n8n community node package that exposes Deepgram's core speech AI APIs — transcription (pre-recorded), text-to-speech, and audio intelligence — as drag-and-drop nodes in n8n workflow automations. + +## What you'll build + +A reusable n8n community node package (`n8n-nodes-deepgram`) that adds a **Deepgram** node to n8n with three resources: **Transcription** (speech-to-text via pre-recorded audio URL or file upload), **Text-to-Speech** (Aura 2 voice synthesis), and **Audio Intelligence** (summarization, topic detection, sentiment analysis). An example workflow is included that chains all three: transcribe audio → analyze it → speak the summary. + +## Prerequisites + +- Node.js 18+ +- n8n installed locally or self-hosted — [install guide](https://docs.n8n.io/hosting/installation/) +- Deepgram account — [get a free API key](https://console.deepgram.com/) + +## Environment variables + +| Variable | Where to find it | +|----------|-----------------| +| `DEEPGRAM_API_KEY` | [Deepgram console](https://console.deepgram.com/) → Settings → API Keys | + +## Install and run + +```bash +# Clone and enter the example directory +cd examples/230-n8n-deepgram-community-node-typescript + +# Install dependencies +npm install + +# Build the TypeScript source +npm run build + +# Link the package into your local n8n installation +cd ~/.n8n +mkdir -p custom +cd custom +npm init -y +npm install /path/to/examples/230-n8n-deepgram-community-node-typescript + +# Start n8n — the Deepgram node will appear in the node list +n8n start +``` + +### Import the example workflow + +1. Open n8n in your browser (default: `http://localhost:5678`) +2. Go to **Workflows → Import from File** +3. Select `src/example-workflow.json` +4. Add your Deepgram API key in **Credentials → Deepgram API** +5. Click **Execute Workflow** + +## Key parameters + +| Parameter | Value | Description | +|-----------|-------|-------------| +| `model` | `nova-3` | Deepgram transcription model — best accuracy and speed | +| `smart_format` | `true` | Adds punctuation, capitalization, and number formatting | +| `diarize` | `false` | Speaker identification for multi-speaker audio | +| `voice` | `aura-2-thalia-en` | TTS voice model for speech synthesis | +| `summarize` | `v2` | Audio intelligence summarization engine | +| `topics` | `true` | Topic detection across the transcript | +| `sentiment` | `true` | Sentiment analysis per utterance | +| `tag` | `deepgram-examples` | Tags all API calls for console tracking | + +## How it works + +1. **Credential setup** — The Deepgram API credential type stores your API key and authenticates every request via `Authorization: Token `. A built-in test call to `GET /v1/projects` validates the key on save. + +2. **Transcription resource** — Accepts an audio URL or binary file input. Sends a `POST /v1/listen` request with configurable model, formatting, diarization, and language options. Returns the full Deepgram response including transcript, word timings, confidence scores, and metadata. + +3. **Text-to-Speech resource** — Accepts text input and a voice model selection. Sends a `POST /v1/speak` request and returns the synthesized audio as binary data (MP3), ready to be saved or passed to downstream nodes. + +4. **Audio Intelligence resource** — Sends audio to `POST /v1/listen` with intelligence features enabled (summarize, topics, sentiment, intents). Returns the transcript plus structured intelligence results. + +5. **Example workflow** — Chains Transcription → Audio Intelligence → TTS: transcribe a NASA spacewalk recording, generate a summary with topic detection, then speak the summary aloud. + +## Starter templates + +[deepgram-starters](https://github.com/orgs/deepgram-starters/repositories) diff --git a/examples/230-n8n-deepgram-community-node-typescript/package.json b/examples/230-n8n-deepgram-community-node-typescript/package.json new file mode 100644 index 0000000..2040ee8 --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/package.json @@ -0,0 +1,47 @@ +{ + "name": "n8n-nodes-deepgram", + "version": "0.1.0", + "description": "n8n community nodes for Deepgram speech-to-text, text-to-speech, and audio intelligence", + "license": "MIT", + "keywords": [ + "n8n-community-node-package", + "deepgram", + "speech-to-text", + "text-to-speech", + "audio-intelligence" + ], + "author": { + "name": "Deepgram", + "url": "https://deepgram.com" + }, + "scripts": { + "build": "tsc", + "test": "node tests/test.js" + }, + "files": [ + "dist" + ], + "n8n": { + "n8nNodesApiVersion": 1, + "credentials": [ + "dist/credentials/DeepgramApi.credentials.js" + ], + "nodes": [ + "dist/nodes/Deepgram/Deepgram.node.js" + ] + }, + "dependencies": { + "@deepgram/sdk": "5.0.0" + }, + "devDependencies": { + "@types/node": "^25.5.0", + "n8n-workflow": "*", + "typescript": "~5.7.0" + }, + "peerDependencies": { + "n8n-workflow": "*" + }, + "engines": { + "node": ">=18" + } +} diff --git a/examples/230-n8n-deepgram-community-node-typescript/src/credentials/DeepgramApi.credentials.ts b/examples/230-n8n-deepgram-community-node-typescript/src/credentials/DeepgramApi.credentials.ts new file mode 100644 index 0000000..731cc78 --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/src/credentials/DeepgramApi.credentials.ts @@ -0,0 +1,41 @@ +import type { + IAuthenticateGeneric, + ICredentialTestRequest, + ICredentialType, + INodeProperties, +} from 'n8n-workflow'; + +export class DeepgramApi implements ICredentialType { + name = 'deepgramApi'; + + displayName = 'Deepgram API'; + + documentationUrl = 'https://developers.deepgram.com/docs/authenticating'; + + properties: INodeProperties[] = [ + { + displayName: 'API Key', + name: 'apiKey', + type: 'string', + typeOptions: { password: true }, + default: '', + }, + ]; + + authenticate: IAuthenticateGeneric = { + type: 'generic', + properties: { + headers: { + Authorization: '=Token {{$credentials?.apiKey}}', + }, + }, + }; + + test: ICredentialTestRequest = { + request: { + baseURL: 'https://api.deepgram.com', + url: '/v1/projects', + method: 'GET', + }, + }; +} diff --git a/examples/230-n8n-deepgram-community-node-typescript/src/example-workflow.json b/examples/230-n8n-deepgram-community-node-typescript/src/example-workflow.json new file mode 100644 index 0000000..5664f74 --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/src/example-workflow.json @@ -0,0 +1,94 @@ +{ + "name": "Deepgram STT → Intelligence → TTS Pipeline", + "nodes": [ + { + "parameters": {}, + "id": "start-node", + "name": "Start", + "type": "n8n-nodes-base.manualTrigger", + "typeVersion": 1, + "position": [240, 300] + }, + { + "parameters": { + "resource": "transcription", + "operation": "transcribeUrl", + "audioUrl": "https://dpgr.am/spacewalk.wav", + "model": "nova-3", + "smartFormat": true, + "diarize": true, + "language": "" + }, + "id": "transcribe-node", + "name": "Transcribe Audio", + "type": "n8n-nodes-deepgram.deepgram", + "typeVersion": 1, + "position": [460, 300], + "credentials": { + "deepgramApi": { + "id": "1", + "name": "Deepgram API" + } + } + }, + { + "parameters": { + "resource": "intelligence", + "operation": "analyze", + "intelligenceAudioUrl": "https://dpgr.am/spacewalk.wav", + "summarize": true, + "detectTopics": true, + "sentimentAnalysis": true, + "intents": false + }, + "id": "intelligence-node", + "name": "Analyze Audio", + "type": "n8n-nodes-deepgram.deepgram", + "typeVersion": 1, + "position": [680, 300], + "credentials": { + "deepgramApi": { + "id": "1", + "name": "Deepgram API" + } + } + }, + { + "parameters": { + "resource": "tts", + "operation": "speak", + "text": "={{ $json.results.summary.short }}", + "voice": "aura-2-thalia-en", + "outputBinaryField": "data" + }, + "id": "tts-node", + "name": "Speak Summary", + "type": "n8n-nodes-deepgram.deepgram", + "typeVersion": 1, + "position": [900, 300], + "credentials": { + "deepgramApi": { + "id": "1", + "name": "Deepgram API" + } + } + } + ], + "connections": { + "Start": { + "main": [ + [{ "node": "Transcribe Audio", "type": "main", "index": 0 }] + ] + }, + "Transcribe Audio": { + "main": [ + [{ "node": "Analyze Audio", "type": "main", "index": 0 }] + ] + }, + "Analyze Audio": { + "main": [ + [{ "node": "Speak Summary", "type": "main", "index": 0 }] + ] + } + } +} diff --git a/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/Deepgram.node.json b/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/Deepgram.node.json new file mode 100644 index 0000000..41ffee0 --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/Deepgram.node.json @@ -0,0 +1,18 @@ +{ + "node": "n8n-nodes-deepgram.deepgram", + "nodeVersion": "1.0", + "codexVersion": "1.0", + "categories": ["AI", "Developer Tools"], + "resources": { + "primaryDocumentation": [ + { + "url": "https://developers.deepgram.com/docs" + } + ], + "credentialDocumentation": [ + { + "url": "https://developers.deepgram.com/docs/authenticating" + } + ] + } +} diff --git a/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/Deepgram.node.ts b/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/Deepgram.node.ts new file mode 100644 index 0000000..2b6c42c --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/Deepgram.node.ts @@ -0,0 +1,377 @@ +import type { + IExecuteFunctions, + INodeExecutionData, + INodeType, + INodeTypeDescription, + IDataObject, +} from 'n8n-workflow'; +import { NodeConnectionTypes, NodeOperationError } from 'n8n-workflow'; +import { DeepgramClient } from '@deepgram/sdk'; + +export class Deepgram implements INodeType { + description: INodeTypeDescription = { + displayName: 'Deepgram', + name: 'deepgram', + icon: 'file:deepgram.svg', + group: ['transform'], + version: 1, + subtitle: '={{$parameter["operation"] + ": " + $parameter["resource"]}}', + description: 'Interact with the Deepgram API for speech-to-text, text-to-speech, and audio intelligence', + defaults: { + name: 'Deepgram', + }, + inputs: [NodeConnectionTypes.Main], + outputs: [NodeConnectionTypes.Main], + usableAsTool: true, + credentials: [ + { + name: 'deepgramApi', + required: true, + }, + ], + properties: [ + { + displayName: 'Resource', + name: 'resource', + type: 'options', + noDataExpression: true, + options: [ + { + name: 'Transcription', + value: 'transcription', + }, + { + name: 'Text-to-Speech', + value: 'tts', + }, + { + name: 'Audio Intelligence', + value: 'intelligence', + }, + ], + default: 'transcription', + }, + + // ── Transcription operations ── + { + displayName: 'Operation', + name: 'operation', + type: 'options', + noDataExpression: true, + displayOptions: { show: { resource: ['transcription'] } }, + options: [ + { + name: 'Transcribe URL', + value: 'transcribeUrl', + action: 'Transcribe audio from a URL', + description: 'Transcribe audio from a publicly accessible URL', + }, + { + name: 'Transcribe File', + value: 'transcribeFile', + action: 'Transcribe an audio file', + description: 'Transcribe audio from binary input data', + }, + ], + default: 'transcribeUrl', + }, + { + displayName: 'Audio URL', + name: 'audioUrl', + type: 'string', + default: '', + required: true, + displayOptions: { show: { resource: ['transcription'], operation: ['transcribeUrl'] } }, + description: 'URL of the audio file to transcribe', + }, + { + displayName: 'Input Binary Field', + name: 'binaryField', + type: 'string', + default: 'data', + required: true, + displayOptions: { show: { resource: ['transcription'], operation: ['transcribeFile'] } }, + description: 'Name of the binary property containing the audio file', + }, + { + displayName: 'Model', + name: 'model', + type: 'options', + displayOptions: { show: { resource: ['transcription'] } }, + options: [ + { name: 'Nova-3 (General)', value: 'nova-3' }, + { name: 'Nova-3 Phone Call', value: 'nova-3-phonecall' }, + { name: 'Nova-3 Medical', value: 'nova-3-medical' }, + { name: 'Nova-2 (General)', value: 'nova-2' }, + ], + default: 'nova-3', + description: 'Deepgram transcription model to use', + }, + { + displayName: 'Smart Format', + name: 'smartFormat', + type: 'boolean', + displayOptions: { show: { resource: ['transcription'] } }, + default: true, + description: 'Whether to add punctuation, capitalization, and paragraph formatting', + }, + { + displayName: 'Diarize', + name: 'diarize', + type: 'boolean', + displayOptions: { show: { resource: ['transcription'] } }, + default: false, + description: 'Whether to identify different speakers in the audio', + }, + { + displayName: 'Language', + name: 'language', + type: 'string', + displayOptions: { show: { resource: ['transcription'] } }, + default: '', + description: 'BCP-47 language code (e.g. "en", "es", "fr"). Leave empty for auto-detect.', + }, + + // ── TTS operations ── + { + displayName: 'Operation', + name: 'operation', + type: 'options', + noDataExpression: true, + displayOptions: { show: { resource: ['tts'] } }, + options: [ + { + name: 'Speak', + value: 'speak', + action: 'Convert text to speech', + description: 'Convert text to audio using Deepgram Aura TTS', + }, + ], + default: 'speak', + }, + { + displayName: 'Text', + name: 'text', + type: 'string', + typeOptions: { rows: 4 }, + default: '', + required: true, + displayOptions: { show: { resource: ['tts'] } }, + description: 'Text to convert to speech', + }, + { + displayName: 'Voice', + name: 'voice', + type: 'options', + displayOptions: { show: { resource: ['tts'] } }, + options: [ + { name: 'Aura 2 Thalia (Female, English)', value: 'aura-2-thalia-en' }, + { name: 'Aura 2 Helena (Female, English)', value: 'aura-2-helena-en' }, + { name: 'Aura 2 Andromeda (Female, English)', value: 'aura-2-andromeda-en' }, + { name: 'Aura 2 Orpheus (Male, English)', value: 'aura-2-orpheus-en' }, + { name: 'Aura 2 Arcas (Male, English)', value: 'aura-2-arcas-en' }, + { name: 'Aura 2 Perseus (Male, English)', value: 'aura-2-perseus-en' }, + ], + default: 'aura-2-thalia-en', + description: 'Voice model to use for speech synthesis', + }, + { + displayName: 'Output Binary Field', + name: 'outputBinaryField', + type: 'string', + displayOptions: { show: { resource: ['tts'] } }, + default: 'data', + description: 'Name of the binary property to store the audio output', + }, + + // ── Audio Intelligence operations ── + { + displayName: 'Operation', + name: 'operation', + type: 'options', + noDataExpression: true, + displayOptions: { show: { resource: ['intelligence'] } }, + options: [ + { + name: 'Analyze', + value: 'analyze', + action: 'Analyze audio with intelligence features', + description: 'Transcribe and analyze audio with summarization, topic detection, or sentiment analysis', + }, + ], + default: 'analyze', + }, + { + displayName: 'Audio URL', + name: 'intelligenceAudioUrl', + type: 'string', + default: '', + required: true, + displayOptions: { show: { resource: ['intelligence'] } }, + description: 'URL of the audio file to analyze', + }, + { + displayName: 'Summarize', + name: 'summarize', + type: 'boolean', + displayOptions: { show: { resource: ['intelligence'] } }, + default: true, + description: 'Whether to generate a summary of the audio content', + }, + { + displayName: 'Detect Topics', + name: 'detectTopics', + type: 'boolean', + displayOptions: { show: { resource: ['intelligence'] } }, + default: false, + description: 'Whether to detect topics discussed in the audio', + }, + { + displayName: 'Sentiment Analysis', + name: 'sentimentAnalysis', + type: 'boolean', + displayOptions: { show: { resource: ['intelligence'] } }, + default: false, + description: 'Whether to perform sentiment analysis on the audio', + }, + { + displayName: 'Intents', + name: 'intents', + type: 'boolean', + displayOptions: { show: { resource: ['intelligence'] } }, + default: false, + description: 'Whether to detect intents in the audio', + }, + ], + }; + + async execute(this: IExecuteFunctions): Promise { + const items = this.getInputData(); + const returnData: INodeExecutionData[] = []; + const resource = this.getNodeParameter('resource', 0) as string; + const operation = this.getNodeParameter('operation', 0) as string; + + const credentials = await this.getCredentials('deepgramApi'); + const client = new DeepgramClient({ apiKey: credentials.apiKey as string }); + + for (let i = 0; i < items.length; i++) { + try { + if (resource === 'transcription') { + const result = await handleTranscription.call(this, client, i, operation); + returnData.push({ json: result as unknown as IDataObject }); + } else if (resource === 'tts') { + const result = await handleTts.call(this, client, i); + returnData.push(result); + } else if (resource === 'intelligence') { + const result = await handleIntelligence.call(this, client, i); + returnData.push({ json: result as unknown as IDataObject }); + } + } catch (error) { + if (this.continueOnFail()) { + returnData.push({ + json: { error: (error as Error).message }, + pairedItem: { item: i }, + }); + continue; + } + throw new NodeOperationError(this.getNode(), error as Error, { itemIndex: i }); + } + } + + return [returnData]; + } +} + +async function handleTranscription( + this: IExecuteFunctions, + client: InstanceType, + itemIndex: number, + operation: string, +): Promise { + const model = this.getNodeParameter('model', itemIndex) as string; + const smartFormat = this.getNodeParameter('smartFormat', itemIndex) as boolean; + const diarize = this.getNodeParameter('diarize', itemIndex) as boolean; + const language = this.getNodeParameter('language', itemIndex, '') as string; + + const options: IDataObject = { + model, + smart_format: smartFormat, + diarize, + tag: 'deepgram-examples', + }; + if (language) options.language = language; + + if (operation === 'transcribeUrl') { + const audioUrl = this.getNodeParameter('audioUrl', itemIndex) as string; + // SDK v5: flat options object with url included + return (await client.listen.v1.media.transcribeUrl({ + url: audioUrl, + ...options, + })) as unknown as IDataObject; + } + + const binaryField = this.getNodeParameter('binaryField', itemIndex) as string; + const buffer = await this.helpers.getBinaryDataBuffer(itemIndex, binaryField); + // SDK v5: transcribeFile takes buffer and flat options + return (await client.listen.v1.media.transcribeFile(buffer, { + ...options, + })) as unknown as IDataObject; +} + +async function handleTts( + this: IExecuteFunctions, + client: InstanceType, + itemIndex: number, +): Promise { + const text = this.getNodeParameter('text', itemIndex) as string; + const voice = this.getNodeParameter('voice', itemIndex) as string; + const outputField = this.getNodeParameter('outputBinaryField', itemIndex, 'data') as string; + + // SDK v5: speak.v1.audio.generate() returns BinaryResponse + const response = await client.speak.v1.audio.generate({ + text, + model: voice, + tag: 'deepgram-examples', + }); + + const audioBuffer = Buffer.from(await response.arrayBuffer()); + + const binaryData = await this.helpers.prepareBinaryData( + audioBuffer, + 'speech.mp3', + 'audio/mpeg', + ); + + return { + json: { success: true, voice, textLength: text.length }, + binary: { [outputField]: binaryData }, + }; +} + +async function handleIntelligence( + this: IExecuteFunctions, + client: InstanceType, + itemIndex: number, +): Promise { + const audioUrl = this.getNodeParameter('intelligenceAudioUrl', itemIndex) as string; + const summarize = this.getNodeParameter('summarize', itemIndex) as boolean; + const detectTopics = this.getNodeParameter('detectTopics', itemIndex) as boolean; + const sentimentAnalysis = this.getNodeParameter('sentimentAnalysis', itemIndex) as boolean; + const intents = this.getNodeParameter('intents', itemIndex) as boolean; + + const options: IDataObject = { + model: 'nova-3', + smart_format: true, + tag: 'deepgram-examples', + }; + if (summarize) options.summarize = 'v2'; + if (detectTopics) options.topics = true; + if (sentimentAnalysis) options.sentiment = true; + if (intents) options.intents = true; + + // SDK v5: intelligence features are query params on pre-recorded transcription + return (await client.listen.v1.media.transcribeUrl({ + url: audioUrl, + ...options, + })) as unknown as IDataObject; +} diff --git a/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/deepgram.svg b/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/deepgram.svg new file mode 100644 index 0000000..8b0216b --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/src/nodes/Deepgram/deepgram.svg @@ -0,0 +1,4 @@ + + + + diff --git a/examples/230-n8n-deepgram-community-node-typescript/tests/test.js b/examples/230-n8n-deepgram-community-node-typescript/tests/test.js new file mode 100644 index 0000000..076d2b0 --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/tests/test.js @@ -0,0 +1,173 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const https = require('https'); + +// ── Credential check — MUST be first ────────────────────────────────────── +const envExample = path.join(__dirname, '..', '.env.example'); +const required = fs.readFileSync(envExample, 'utf8') + .split('\n') + .filter((l) => /^[A-Z][A-Z0-9_]+=/.test(l.trim())) + .map((l) => l.split('=')[0].trim()); + +const missing = required.filter((k) => !process.env[k]); +if (missing.length > 0) { + console.error(`MISSING_CREDENTIALS: ${missing.join(',')}`); + process.exit(2); +} +// ────────────────────────────────────────────────────────────────────────── + +const API_KEY = process.env.DEEPGRAM_API_KEY; +const KNOWN_AUDIO_URL = 'https://dpgr.am/spacewalk.wav'; +const EXPECTED_WORDS = ['spacewalk', 'astronaut', 'nasa']; + +function request(options, body) { + return new Promise((resolve, reject) => { + const req = https.request(options, (res) => { + const chunks = []; + res.on('data', (chunk) => chunks.push(chunk)); + res.on('end', () => { + const buffer = Buffer.concat(chunks); + if (res.headers['content-type']?.includes('application/json')) { + try { + resolve({ status: res.statusCode, data: JSON.parse(buffer.toString()) }); + } catch { + resolve({ status: res.statusCode, data: buffer }); + } + } else { + resolve({ status: res.statusCode, data: buffer }); + } + }); + }); + req.on('error', reject); + if (body) req.write(typeof body === 'string' ? body : JSON.stringify(body)); + req.end(); + }); +} + +async function testCredentialValidation() { + console.log('1. Testing credential validation (GET /v1/projects)...'); + const res = await request({ + hostname: 'api.deepgram.com', + path: '/v1/projects', + method: 'GET', + headers: { Authorization: `Token ${API_KEY}` }, + }); + if (res.status !== 200) throw new Error(`Credential test failed: HTTP ${res.status}`); + console.log(' ✓ API key is valid'); +} + +async function testTranscribeUrl() { + console.log('2. Testing pre-recorded transcription (POST /v1/listen)...'); + const res = await request( + { + hostname: 'api.deepgram.com', + path: '/v1/listen?model=nova-3&smart_format=true&tag=deepgram-examples', + method: 'POST', + headers: { + Authorization: `Token ${API_KEY}`, + 'Content-Type': 'application/json', + }, + }, + JSON.stringify({ url: KNOWN_AUDIO_URL }), + ); + if (res.status !== 200) throw new Error(`Transcription failed: HTTP ${res.status}`); + + const transcript = res.data?.results?.channels?.[0]?.alternatives?.[0]?.transcript; + if (!transcript || transcript.length < 20) { + throw new Error(`Transcript too short or empty: "${transcript}"`); + } + + const lower = transcript.toLowerCase(); + const found = EXPECTED_WORDS.filter((w) => lower.includes(w)); + if (found.length === 0) { + throw new Error(`Expected words not found. Got: "${transcript.substring(0, 200)}"`); + } + + console.log(` ✓ Transcript received (${transcript.length} chars, found: ${found.join(', ')})`); +} + +async function testTts() { + console.log('3. Testing text-to-speech (POST /v1/speak)...'); + const res = await request( + { + hostname: 'api.deepgram.com', + path: '/v1/speak?model=aura-2-thalia-en&tag=deepgram-examples', + method: 'POST', + headers: { + Authorization: `Token ${API_KEY}`, + 'Content-Type': 'application/json', + }, + }, + JSON.stringify({ text: 'Hello from the Deepgram n8n community node.' }), + ); + if (res.status !== 200) throw new Error(`TTS failed: HTTP ${res.status}`); + + if (!Buffer.isBuffer(res.data) || res.data.length < 1000) { + throw new Error(`TTS response too small: ${res.data?.length ?? 0} bytes`); + } + console.log(` ✓ Audio received (${res.data.length} bytes)`); +} + +async function testAudioIntelligence() { + console.log('4. Testing audio intelligence — summarize (POST /v1/listen?summarize=v2)...'); + const res = await request( + { + hostname: 'api.deepgram.com', + path: '/v1/listen?model=nova-3&smart_format=true&summarize=v2&tag=deepgram-examples', + method: 'POST', + headers: { + Authorization: `Token ${API_KEY}`, + 'Content-Type': 'application/json', + }, + }, + JSON.stringify({ url: KNOWN_AUDIO_URL }), + ); + if (res.status !== 200) throw new Error(`Intelligence failed: HTTP ${res.status}`); + + const summary = res.data?.results?.summary?.short; + if (!summary || summary.length < 10) { + throw new Error(`Summary too short or missing: "${summary}"`); + } + console.log(` ✓ Summary received (${summary.length} chars): "${summary.substring(0, 100)}..."`); +} + +async function testTypeScriptCompilation() { + console.log('5. Testing TypeScript compilation...'); + const nodeFile = path.join(__dirname, '..', 'src', 'nodes', 'Deepgram', 'Deepgram.node.ts'); + const credFile = path.join(__dirname, '..', 'src', 'credentials', 'DeepgramApi.credentials.ts'); + + if (!fs.existsSync(nodeFile)) throw new Error(`Node file not found: ${nodeFile}`); + if (!fs.existsSync(credFile)) throw new Error(`Credential file not found: ${credFile}`); + + const nodeSource = fs.readFileSync(nodeFile, 'utf8'); + if (!nodeSource.includes('INodeType')) throw new Error('Node missing INodeType interface'); + if (!nodeSource.includes("tag: 'deepgram-examples'") && !nodeSource.includes("tag: \"deepgram-examples\"")) { + throw new Error('Node missing deepgram-examples tag'); + } + if (!nodeSource.includes('deepgramApi')) throw new Error('Node missing credential reference'); + + const credSource = fs.readFileSync(credFile, 'utf8'); + if (!credSource.includes('ICredentialType')) throw new Error('Credential missing ICredentialType'); + + console.log(' ✓ Source files valid'); +} + +async function run() { + await testCredentialValidation(); + await testTranscribeUrl(); + await testTts(); + await testAudioIntelligence(); + await testTypeScriptCompilation(); +} + +run() + .then(() => { + console.log('\n✓ All tests passed'); + process.exit(0); + }) + .catch((err) => { + console.error(`\n✗ Test failed: ${err.message}`); + process.exit(1); + }); diff --git a/examples/230-n8n-deepgram-community-node-typescript/tsconfig.json b/examples/230-n8n-deepgram-community-node-typescript/tsconfig.json new file mode 100644 index 0000000..a841983 --- /dev/null +++ b/examples/230-n8n-deepgram-community-node-typescript/tsconfig.json @@ -0,0 +1,30 @@ +{ + "compilerOptions": { + "strict": true, + "module": "commonjs", + "moduleResolution": "node", + "target": "es2019", + "lib": ["es2019", "es2020", "es2022.error"], + "removeComments": true, + "useUnknownInCatchVariables": false, + "forceConsistentCasingInFileNames": true, + "noImplicitAny": true, + "noImplicitReturns": true, + "noUnusedLocals": true, + "strictNullChecks": true, + "preserveConstEnums": true, + "esModuleInterop": true, + "resolveJsonModule": true, + "incremental": true, + "declaration": true, + "sourceMap": true, + "skipLibCheck": true, + "outDir": "./dist/" + }, + "include": [ + "src/credentials/**/*", + "src/nodes/**/*", + "src/nodes/**/*.json", + "package.json" + ] +}