Skip to the content.

Please note that there might be a delay in loading the embedding_model. It is advisable to setup your own caching mechanism for the same. For example, you may see the following line:
Fetching 5 files: 100%|██████████| 5/5 [00:00<00:00, 15318.86it/s]

Step 1: Create an Agent using the POST API http://localhost:5001/agent with the payload:

{
  "agent_config": {
      "agent_name": "Alfred",
      "agent_type": "other",
      "agent_welcome_message": "How are you doing Bruce?",
      "tasks": [
          {
              "task_type": "conversation",
              "toolchain": {
                  "execution": "parallel",
                  "pipelines": [
                      [
                          "transcriber",
                          "llm",
                          "synthesizer"
                      ]
                  ]
              },
              "tools_config": {
                  "input": {
                      "format": "wav",
                      "provider": "twilio"
                  },
                  "llm_agent": {
                      "agent_type": "simple_llm_agent",
                      "agent_flow_type": "streaming",
                      "routes": {
                        "embedding_model": "snowflake/snowflake-arctic-embed-m",
                        "routes": [
                          {
                            "route_name": "politics",
                            "utterances": [
                              "Who do you think will win the elections?",
                              "Whom would you vote for?"
                            ],
                            "response": "Hey, thanks but I do not have opinions on politics",
                            "score_threshold": 0.9
                          }
                        ]
                      },
                      "llm_config": {
                          "agent_flow_type": "streaming",
                          "provider": "openai",
                          "request_json": true,
                          "model": "gpt-4o-mini"
                      }
                  },
                  "output": {
                      "format": "wav",
                      "provider": "twilio"
                  },
                  "synthesizer": {
                      "audio_format": "wav",
                      "provider": "elevenlabs",
                      "stream": true,
                      "provider_config": {
                          "voice": "George",
                          "model": "eleven_turbo_v2_5",
                          "voice_id": "JBFqnCBsd6RMkjVDRZzb"
                      },
                      "buffer_size": 100.0
                  },
                  "transcriber": {
                      "encoding": "linear16",
                      "language": "en",
                      "provider": "deepgram",
                      "stream": true
                  }
              },
              "task_config": {
                  "hangup_after_silence": 30.0
              }
          }
      ]
  },
  "agent_prompts": {
      "task_1": {
          "system_prompt": "Why Do We Fall, Sir? So That We Can Learn To Pick Ourselves Up."
      }
  }
}


Step 2: The response of the previous API will return the agent_id.
Use this agent_id to initiate a call via the telephony server running on 8001 port (for Twilio) http://localhost:8001/call.

  {
    "agent_id": "4c19700b-227c-4c2d-8bgf-42dfe4b240fc",
    "recipient_phone_number": "+19876543210"
  }