Skip to main content
This module contains flow types and flow management APIs for the telephony system. Related APIs:

Flow Types (Core)

These types are the foundation of the flow system. The frontend needs these to build flow editors and execute flows.
// ============================================
// BRANDED TYPES
// ============================================

type FlowId = string & { readonly __brand: 'FlowId' };
type NodeId = string & { readonly __brand: 'NodeId' };

// ============================================
// VARIABLE SYSTEM
// ============================================

enum VariableType {
  FLOW_VARIABLE = 'flow_variable',
  CUSTOM_ATTRIBUTE = 'custom_attribute',
  CONTACT_FIELD = 'contact_field',
  SYSTEM_VARIABLE = 'system_variable',
}

interface VariableReference {
  type: VariableType;
  name: string;
}

type VariableValue = string | number | boolean | null;

type FlowVariables = Record<string, VariableValue>;

// System variables available during execution
const SYSTEM_VARIABLES = {
  CALL_ID: 'sys.callId',
  CALL_STATUS: 'sys.callStatus',
  CALL_DIRECTION: 'sys.callDirection',
  ANSWERED_BY: 'sys.answeredBy',
  CONTACT_ID: 'sys.contactId',
  CONTACT_PHONE: 'sys.contactPhone',
  CONTACT_NAME: 'sys.contactName',
  ORGANIZATION_ID: 'sys.organizationId',
  PROGRAM_ID: 'sys.programId',
};

// ============================================
// VALUE SOURCE (Dynamic Value Resolution)
// ============================================

type ValueSource =
  | FlowVariableReference
  | CustomAttributeReference
  | ContactFieldReference;

interface FlowVariableReference {
  source: 'variable';
  variableName: string;
}

interface CustomAttributeReference {
  source: 'customAttribute';
  attributeName: string;
}

interface ContactFieldReference {
  source: 'contactField';
  field: ContactField;
}

type ContactField =
  | 'phone'
  | 'email'
  | 'firstName'
  | 'lastName'
  | 'fullName'
  | 'address'
  | 'city'
  | 'state'
  | 'zip'
  | 'occupation'
  | 'gender'
  | 'preferredChannel';

// ============================================
// NODE TYPES ENUM
// ============================================

enum NodeType {
  DIAL = 'dial',           // Initiates outbound call
  ANSWER = 'answer',       // Answers inbound call (NOT needed after DIAL)
  HANGUP = 'hangup',       // Terminates call (terminal node)
  PLAY = 'play',           // Plays audio
  COLLECT_AUDIO = 'collect_audio', // Records audio with transcription
  DTMF = 'dtmf',           // Collects digit input
  CONDITION = 'condition', // Branches based on expression
  SET_VARIABLE = 'set_variable', // Sets a variable
  SAY = 'say',             // Text-to-speech with template variables
  SMS = 'sms',             // Send SMS to contact
  UPDATE_CONTACT = 'update_contact', // Update contact custom attribute
  CONNECT_AGENT = 'connect_agent', // Connect caller to AI agent (Agent Studio)
}

// ============================================
// BASE NODE
// ============================================

interface BaseNode {
  id: NodeId;
  type: NodeType;
  label?: string;
  description?: string;
}

// ============================================
// DIAL NODE
// ============================================

interface DialNode extends BaseNode {
  type: NodeType.DIAL;
  config: {
    timeout?: number;      // Default: 30000ms
    enableAMD?: boolean;   // Default: true (Answering Machine Detection)
  };
  outputs: {
    onAnswer: NodeId;      // Human detected or AMD disabled
    onVoicemail?: NodeId;  // Machine detected (only if enableAMD=true)
    onNoAnswer?: NodeId;   // Timeout
    onBusy?: NodeId;       // Busy signal
    onRejected?: NodeId;   // User hangup during ring
    onError?: NodeId;      // System failure
    default?: NodeId;      // Fallback
  };
}

// ============================================
// ANSWER NODE (Inbound calls only)
// ============================================

interface AnswerNode extends BaseNode {
  type: NodeType.ANSWER;
  config: Record<string, never>;  // No config needed
  outputs: {
    onComplete: NodeId;
    onError?: NodeId;
    default?: NodeId;
  };
}

// ============================================
// PLAY NODE
// ============================================

type SoundLanguage = 'ar' | 'fr';
type NumberMode = 'full' | 'twoByTwo';
type DateMode = 'dayMonthYear' | 'monthAsWord';

type AudioItem = AudioFileItem | NumberItem | DateItem | WordItem;

interface AudioFileItem {
  type: 'audioFile';
  audioId: string | ValueSource;
}

interface NumberItem {
  type: 'number';
  value: number | ValueSource;
  mode: NumberMode;
  language: SoundLanguage;
  dictionaryId?: string | null;  // null = system dictionary
}

interface DateItem {
  type: 'date';
  value: string | ValueSource;   // DD/MM/YYYY format
  mode: DateMode;
  language: SoundLanguage;
  dictionaryId?: string | null;
}

interface WordItem {
  type: 'word';
  value: string | ValueSource;   // Dictionary key
  dictionaryId: string | null;
  language: SoundLanguage;
}

interface BeepConfig {
  enabled: boolean;
  key?: string;  // Dictionary key for beep sound
}

interface PlayNode extends BaseNode {
  type: NodeType.PLAY;
  config: {
    audioItems: AudioItem[];
    beepAfter?: BeepConfig;
    allowBargeIn?: boolean;      // Enable DTMF interruption
    bargeInDtmfNodeId?: NodeId;  // DTMF node to jump to on barge-in
  };
  outputs: {
    onComplete: NodeId;
    onError?: NodeId;
    default?: NodeId;
  };
}

// ============================================
// DTMF NODE
// ============================================

enum DTMFMode {
  SINGLE_DIGIT = 'single_digit',
  MULTI_DIGIT = 'multi_digit',
}

interface DTMFNode extends BaseNode {
  type: NodeType.DTMF;
  config: {
    mode: DTMFMode;
    variable: string;            // Where to store input
    timeout: number;             // Overall timeout
    skipIfAlreadySet?: boolean;  // Skip if variable already has value (from barge-in)
    singleDigitConfig?: {
      allowedDigits?: string[];  // Restrict to specific digits
    };
    multiDigitConfig?: {
      minDigits: number;
      maxDigits: number;
      terminators?: string[];    // Default: ['#']
      interDigitTimeout?: number; // Default: 3000ms
    };
    retry?: {
      maxRetries: number;
      invalidAudioId?: string;   // Play on invalid input
      timeoutAudioId?: string;   // Play on timeout
    };
  };
  outputs: {
    branches?: Record<string, NodeId>;  // Branch by digit (e.g., "1" -> nodeId)
    onComplete?: NodeId;         // Collection succeeded (if not using branches)
    onTimeout?: NodeId;          // Timeout after retries
    onInvalid?: NodeId;          // Invalid input after retries
    onMaxRetries?: NodeId;
    onError?: NodeId;
    default?: NodeId;
  };
}

// ============================================
// COLLECT_AUDIO NODE
// ============================================

type TranscriptionLanguage = 'ar-MA' | 'fr-FR' | 'en-US' | 'auto';

interface CollectAudioNode extends BaseNode {
  type: NodeType.COLLECT_AUDIO;
  config: {
    // Recording settings
    maxDuration: number;              // Max recording duration in seconds
    firstSilenceTimeout: number;      // Time in seconds to wait for user to start speaking
    silenceDetection?: boolean;       // Stop on silence (default: true)
    silenceDuration?: number;         // Silence duration to stop in seconds (default: 2)
    beep?: boolean;                   // Play beep before recording (default: false)

    // Retry settings
    maxRetry?: number;                // Max retry attempts if no speech (default: 0)
    fallbackAudio?: AudioItem;        // Audio to play when no speech detected, before retry

    // Variable to store recording ID
    recordingIdVariable: string;      // Variable name to store the recording ID

    // Transcription settings (optional)
    transcription?: boolean;          // Enable speech-to-text (default: false)
    languages?: TranscriptionLanguage[]; // Languages for transcription (default: ['auto'])
    transcriptionVariable?: string;   // Variable name to store transcription text
  };
  outputs: {
    onComplete: NodeId;               // Recording completed successfully (user spoke)
    onMaxDuration?: NodeId;           // Max duration reached (user kept speaking)
    onNoSpeech?: NodeId;              // No speech detected after retries
    onError?: NodeId;                 // System error
    default?: NodeId;                 // Fallback
  };
}

// ============================================
// CONDITION NODE
// ============================================

enum ConditionOperator {
  EQ = 'eq',
  NEQ = 'neq',
  GT = 'gt',
  LT = 'lt',
  GTE = 'gte',
  LTE = 'lte',
  CONTAINS = 'contains',
  STARTS_WITH = 'startsWith',
  ENDS_WITH = 'endsWith',
}

type ConditionExpression = SimpleCondition | AndCondition | OrCondition;

interface SimpleCondition {
  type: 'simple';
  variable: VariableReference;
  operator: ConditionOperator;
  value: VariableValue | VariableReference;
}

interface AndCondition {
  type: 'and';
  conditions: ConditionExpression[];
}

interface OrCondition {
  type: 'or';
  conditions: ConditionExpression[];
}

interface ConditionNode extends BaseNode {
  type: NodeType.CONDITION;
  config: {
    expression: ConditionExpression;
  };
  outputs: {
    onTrue: NodeId;
    onFalse: NodeId;
    onError?: NodeId;
  };
}

// ============================================
// SET_VARIABLE NODE
// ============================================

/**
 * SET_VARIABLE sets a flow variable to a static value or variable reference.
 */
interface SetVariableNode extends BaseNode {
  type: NodeType.SET_VARIABLE;
  config: {
    variable: string;
    value: VariableValue | VariableReference;
  };
  outputs: {
    onComplete: NodeId;
    onError?: NodeId;
    default?: NodeId;
  };
}

// ============================================
// SAY NODE - Text-to-Speech
// ============================================

/**
 * Language codes for TTS synthesis.
 */
type TTSLanguage =
  | 'ar-MA'  // Moroccan Darija (primary)
  | 'ar'     // Modern Standard Arabic
  | 'fr'     // French
  | 'en';    // English

type VoiceGender = 'male' | 'female';
type VoiceTone = 'neutral' | 'friendly' | 'formal';

/**
 * SAY Node - Text-to-speech with template variable support.
 *
 * Template syntax: {{ $source.path }}
 * Sources:
 * - $contact.fieldName (contact fields and custom attributes)
 * - $variables.varName (flow variables)
 * - $call.from / $call.to / $call.direction (call info)
 *
 * @example "Hello {{ $contact.firstName }}, your balance is {{ $variables.balance }} dirhams."
 */
interface SayNode extends BaseNode {
  type: NodeType.SAY;
  config: {
    /**
     * Template text with {{ $source.path }} placeholders.
     * Missing variables cause node failure (no silent fallback).
     */
    text: string;

    /**
     * Language/dialect for TTS.
     * Determines voice selection and pronunciation.
     */
    language: TTSLanguage;

    /**
     * Voice gender. Default: 'female'
     */
    gender?: VoiceGender;

    /**
     * Voice tone/style. Default: 'neutral'
     */
    tone?: VoiceTone;

    /**
     * Allow DTMF barge-in (same as PlayNode).
     */
    allowBargeIn?: boolean;
    bargeInDtmfNodeId?: NodeId;
  };
  outputs: {
    onComplete: NodeId;
    onError?: NodeId;
    default?: NodeId;
  };
}

// ============================================
// SMS NODE - Send SMS during call flow
// ============================================

/**
 * SMS Node - Send SMS to contact during call flow execution.
 *
 * Uses same template syntax as SAY node: {{ $source.path }}
 * Sources:
 * - $contact.fieldName (contact fields and custom attributes)
 * - $variables.varName (flow variables)
 * - $call.from / $call.to / $call.direction (call info)
 *
 * Cost: 1.887 credits per SMS (billed at end of call)
 *
 * @example messageTemplate: "Your confirmation code is {{ $variables.code }}"
 */
interface SmsNode extends BaseNode {
  type: NodeType.SMS;
  config: {
    /**
     * Message template with {{ $source.path }} placeholders.
     * Missing variables cause node failure (no silent fallback).
     */
    messageTemplate: string;

    /**
     * Sender ID for the SMS (phone number or alphanumeric).
     * Can also use template: "{{ $variables.senderId }}"
     */
    senderId: string;
  };
  outputs: {
    onComplete: NodeId;    // SMS sent successfully
    onError?: NodeId;      // SMS sending failed (non-fatal, call continues)
    default?: NodeId;      // Fallback
  };
}

// ============================================
// UPDATE_CONTACT NODE - Update contact custom attribute
// ============================================

/**
 * UPDATE_CONTACT Node - Update a contact's custom attribute during call flow.
 *
 * Uses same template syntax as SAY/SMS nodes for dynamic values: {{ $source.path }}
 * Sources:
 * - $contact.fieldName (contact fields and custom attributes)
 * - $variables.varName (flow variables)
 * - $call.from / $call.to / $call.direction (call info)
 *
 * Value types:
 * - Static values: string, number, or boolean (e.g., "gold", 100, true)
 * - Template values: "{{ $variables.score }}" (resolved at runtime)
 *
 * Cost: Free (0 credits)
 *
 * @example
 * // Static value
 * { attributeName: "tier", value: "gold" }
 *
 * // From flow variable
 * { attributeName: "credit_score", value: "{{ $variables.calculated_score }}" }
 *
 * // From another contact attribute
 * { attributeName: "previous_tier", value: "{{ $contact.tier }}" }
 */
interface UpdateContactNode extends BaseNode {
  type: NodeType.UPDATE_CONTACT;
  config: {
    /**
     * Name of the custom attribute to update.
     * Must be a valid custom attribute defined for the organization.
     */
    attributeName: string;

    /**
     * Value to set. Can be:
     * - Static: string, number, or boolean
     * - Template: "{{ $source.path }}" for dynamic resolution
     *
     * Type must match the attribute's defined type (TEXT, NUMBER, BOOLEAN).
     */
    value: string | number | boolean;
  };
  outputs: {
    onComplete: NodeId;    // Update successful
    onError?: NodeId;      // Update failed (non-fatal, call continues)
    default?: NodeId;      // Fallback
  };
}

// ============================================
// CONNECT_AGENT NODE - AI Voice Conversation
// ============================================

/**
 * Voice configuration is now defined at the agent level (AgentVoiceConfig)
 * and no longer overridden per-node. See Agent Studio API for details.
 *
 * Pipeline modes:
 * - 'batch': Deepgram Nova-3 (STT) → LLM → ElevenLabs Flash v2.5 (TTS)
 * - 'streaming': Same STT/TTS providers with streaming LLM + sentence-chunked TTS
 * - 'sts' (future): Speech-to-speech via OpenAI Realtime or Gemini Live
 */

/**
 * Context variable mapping - passes flow variables to agent context.
 */
interface ConnectAgentContextVariable {
  /** Flow variable name to read from */
  flowVariable: string;
  /** Key name in agent context */
  contextKey: string;
  /** Optional description for agent (included in context) */
  description?: string;
}

/**
 * Variable extraction configuration.
 * Extracts data from conversation to flow variables.
 */
interface ConnectAgentExtractVariable {
  /** Variable name to store extracted value */
  variableName: string;
  /** Extraction method */
  method: 'last_response' | 'pattern' | 'semantic';
  /** Regex pattern (required for 'pattern' method) */
  pattern?: string;
  /** LLM prompt (required for 'semantic' method) - FUTURE */
  prompt?: string;
}

/**
 * Exit reason from connect-agent conversation.
 */
type ConnectAgentExitReason =
  | 'completed'           // Agent signaled completion (phrase_match mode: [COMPLETE])
  | 'function_call_exit'  // Agent called end_conversation tool (function_call mode)
  | 'exit_phrase'         // User said an exit phrase
  | 'max_turns'           // Maximum turns reached
  | 'timeout'             // Conversation or turn timeout
  | 'user_hangup'         // User hung up
  | 'error';              // System/agent error

/**
 * CONNECT_AGENT Node - Connects caller to an AI agent.
 *
 * Flow:
 * 1. Load agent config from Agent Studio
 * 2. Create conversation with Mastra
 * 3. Play initial greeting (if configured)
 * 4. Loop: Listen (STT) → Agent Response (LLM) → Speak (TTS)
 * 5. Check exit conditions after each turn
 * 6. Extract variables and finalize (billing, persistence)
 *
 * Exit conditions:
 * - completed: Agent signals completion (phrase_match mode: response contains [COMPLETE])
 * - function_call_exit: Agent called end_conversation tool (function_call mode)
 * - exit_phrase: User says a configured exit phrase
 * - max_turns: Maximum turn limit reached
 * - timeout: Conversation or turn timeout
 * - user_hangup: User hung up the call
 * - error: System/agent error occurred
 *
 * See: Agent Studio API for agent configuration.
 *
 * @example
 * {
 *   "id": "node_agent_1",
 *   "type": "connect_agent",
 *   "config": {
 *     "agentId": "550e8400-e29b-41d4-a716-446655440000",
 *     "maxTurns": 10,
 *     "conversationTimeout": 300000,
 *     "turnTimeout": 10000,
 *     "exitPhrases": ["goodbye", "bye"],
 *     "exitMode": "function_call",
 *     "initialMessage": "Hello! How can I help you today?",
 *     "contextVariables": [
 *       { "flowVariable": "customerName", "contextKey": "customer_name" }
 *     ],
 *     "extractVariables": [
 *       { "variableName": "intent", "method": "last_response" }
 *     ]
 *   },
 *   "outputs": {
 *     "onComplete": "node_next",
 *     "onExitPhrase": "node_goodbye",
 *     "onHangup": "node_end"
 *   }
 * }
 */
interface ConnectAgentNode extends BaseNode {
  type: NodeType.CONNECT_AGENT;
  config: {
    /**
     * ID of the agent to connect (from Agent Studio).
     * Agent must be active and belong to the same organization.
     */
    agentId: string;

    /**
     * Maximum conversation turns before auto-exit.
     * Default: 10, Range: 1-50
     */
    maxTurns?: number;

    /**
     * Total conversation timeout in milliseconds.
     * Default: 300000 (5 minutes), Range: 30000-600000
     */
    conversationTimeout?: number;

    /**
     * Per-turn silence timeout in milliseconds.
     * How long to wait for user to speak before timing out.
     * Default: 10000 (10 seconds), Range: 3000-30000
     */
    turnTimeout?: number;

    /**
     * Phrases that trigger immediate exit.
     * Case-insensitive partial match against user transcripts.
     * Default: ['goodbye', 'bye', 'thank you goodbye']
     */
    exitPhrases?: string[];

    /**
     * Flow variables to pass as agent context.
     * Available to agent in system prompt context.
     */
    contextVariables?: ConnectAgentContextVariable[];

    /**
     * Variables to extract from conversation.
     * Extracted after conversation ends, before finalization.
     */
    extractVariables?: ConnectAgentExtractVariable[];

    /**
     * Initial message for agent to speak.
     * Played before first user turn (greeting message).
     */
    initialMessage?: string;

    /**
     * Whether to record the full conversation audio.
     * Default: false (FUTURE FEATURE)
     */
    recordConversation?: boolean;

    /**
     * Exit detection mode.
     * - 'function_call': LLM calls end_conversation tool to exit (default).
     *   Phrase matching still works as fallback. [COMPLETE] matching is disabled.
     * - 'phrase_match': Legacy string matching. No tools injected.
     *   Exit phrases + [COMPLETE] string matching.
     * Default: 'function_call'
     */
    exitMode?: 'function_call' | 'phrase_match';
  };
  outputs: {
    /** Normal completion path (agent completed or function call exit) */
    onComplete: NodeId;
    /** Exit phrase detected path */
    onExitPhrase?: NodeId;
    /** Max turns reached path */
    onMaxTurns?: NodeId;
    /** Timeout path (conversation or turn timeout) */
    onTimeout?: NodeId;
    /** User hangup path */
    onHangup?: NodeId;
    /** Error path (system/agent failures) */
    onError?: NodeId;
    /** Fallback for unhandled cases */
    default?: NodeId;
  };
}

// ============================================
// HANGUP NODE (Terminal)
// ============================================

interface HangupNode extends BaseNode {
  type: NodeType.HANGUP;
  config: {
    /** Optional reason for audit trail */
    reason?: string;
  };
  outputs: never;  // Terminal node - no outputs
}

// ============================================
// FLOW NODE (Discriminated Union)
// ============================================

type FlowNode =
  | DialNode
  | AnswerNode
  | PlayNode
  | CollectAudioNode
  | DTMFNode
  | ConditionNode
  | SetVariableNode
  | SayNode
  | SmsNode
  | UpdateContactNode
  | ConnectAgentNode
  | HangupNode;

// ============================================
// FLOW DEFINITION
// ============================================

interface FlowGraph {
  nodes: FlowNode[];
  startNodeId: NodeId;
}

interface FlowMetadata {
  tags?: string[];
  category?: string;
  author?: string;
  version?: string;
  [key: string]: unknown;
}

interface VariableSchema {
  [key: string]: {
    type: 'string' | 'number' | 'boolean';
    required?: boolean;
    defaultValue?: VariableValue;
    description?: string;
  };
}

interface FlowDefinition {
  id: FlowId;
  organizationId: string;
  name: string;
  description?: string;
  version: number;
  metadata: FlowMetadata;
  graph: FlowGraph;
  variableSchema?: VariableSchema;
  createdAt: string;   // ISO date
  updatedAt: string;   // ISO date
}

// ============================================
// EXECUTION TYPES
// ============================================

enum FlowExecutionOutcome {
  COMPLETED = 'completed',
  USER_HANGUP = 'user_hangup',
  FAILED = 'failed',
  TIMEOUT = 'timeout',
  NO_ANSWER = 'no_answer',
  BUSY = 'busy',
  REJECTED = 'rejected',
  CANCELLED = 'cancelled',
}

interface FlowExecutionResult {
  callId: string;
  flowId: string;
  outcome: FlowExecutionOutcome;
  outcomeReason: string;
  finalVariables: FlowVariables;
  error?: FlowExecutionErrorDetails;
  sessionSnapshot: CallSessionSnapshot;
  timing: ExecutionTiming;
}

interface FlowExecutionErrorDetails {
  type: string;
  message: string;
  code?: string;
  nodeId?: string;
  stack?: string;
}

interface CallSessionSnapshot {
  callId: string;
  status: string;
  direction: string;
  createdAt: string;
  answeredAt?: string;
  terminatedAt?: string;
  durationMs?: number;
}

interface ExecutionTiming {
  startedAt: string;
  completedAt: string;
  durationMs: number;
  nodeExecutionCount: number;
}

// ============================================
// VALIDATION TYPES
// ============================================

interface ValidationError {
  field: string;
  message: string;
  code?: string;
}

interface ValidateFlowResponse {
  valid: boolean;
  errors: ValidationError[];
}

Flows API

Base path: /flows

Request/Response Types

interface CreateFlowInput {
  name: string;
  description?: string;
  graph: FlowGraph;
  metadata?: FlowMetadata;
  variableSchema?: VariableSchema;
}

interface UpdateFlowInput {
  name?: string;
  description?: string;
  graph?: FlowGraph;
  metadata?: FlowMetadata;
  variableSchema?: VariableSchema;
}

interface ExecuteFlowRequest {
  flowId: string;
  contactId: string;
  fromPhone: string;
  initialVariables?: Record<string, string | number | boolean | null>;
}

// Response is FlowExecutionResult (see above)

Endpoints

Create a new flow


Get all flows


Get flow by ID


Get latest version of a flow by name


Update an existing flow


Delete a flow



Validate a flow without saving


Execute a flow

curl -X POST https://api.gomobile.ma/api/flows/execute \
  -H "x-api-key: YOUR_API_KEY" \
  -H "Content-Type: application/json" \
  -d '
{
  "flowId": "flow_789",
  "contactId": "contact_123",
  "fromPhone": "+212987654321",
  "initialVariables": {
    "campaign": "winter_promo"
  }
}
'
Response:
{
  "callId": "call_abc123",
  "flowId": "flow_789",
  "outcome": "COMPLETED",
  "outcomeReason": "Flow completed successfully",
  "finalVariables": {
    "sys.callId": "call_abc123",
    "sys.callDirection": "outbound",
    "campaign": "winter_promo",
    "dtmf.response": "1"
  },
  "sessionSnapshot": {
    "callId": "call_abc123",
    "status": "terminated",
    "direction": "outbound",
    "createdAt": "2025-01-15T10:30:00.000Z",
    "answeredAt": "2025-01-15T10:30:05.000Z",
    "terminatedAt": "2025-01-15T10:31:30.000Z",
    "durationMs": 85000
  },
  "timing": {
    "startedAt": "2025-01-15T10:30:00.000Z",
    "completedAt": "2025-01-15T10:31:30.000Z",
    "durationMs": 90000,
    "nodeExecutionCount": 5
  }
}