Powered by AppSignal & Oban Pro

SubAgent Examples

livebooks/ptc_runner_llm_agent.livemd

SubAgent Examples

repo_root = Path.expand("..", __DIR__)

deps =
  if File.exists?(Path.join(repo_root, "mix.exs")) do
    [{:ptc_runner, path: repo_root}, {:llm_client, path: Path.join(repo_root, "llm_client")}]
  else
    [{:ptc_runner, "~> 0.5.0"}]
  end

Mix.install(deps ++ [{:req_llm, "~> 1.0"}, {:kino, "~> 0.14"}], consolidate_protocols: false)

Setup

# For testing locally and reloading the library
# IEx.Helpers.recompile()

# Load LLM setup: local file if available, otherwise fetch from GitHub
local_path = Path.join(__DIR__, "llm_setup.exs")

if File.exists?(local_path) do
  Code.require_file(local_path)
else
  %{body: code} = Req.get!("https://raw.githubusercontent.com/andreasronge/ptc_runner/main/livebooks/llm_setup.exs")
  Code.eval_string(code)
end

"LLM Setup loaded"

Choose your provider:

provider_input = LLMSetup.provider_input()
provider = Kino.Input.read(provider_input)
LLMSetup.configure_provider(provider)
model_input = LLMSetup.model_input(provider)
model = Kino.Input.read(model_input)
my_llm = LLMSetup.create_llm(model)
"Ready: #{model}"

Output Modes

SubAgents support two output modes:

Mode Use When Output
:json Classification, extraction, summarization Structured JSON
:ptc_lisp (default) Computation, tool orchestration, multi-step reasoning PTC-Lisp program result

JSON Mode - Direct LLM Tasks

Use output: :json when the LLM can answer directly without computation:

alias PtcRunner.SubAgent
alias PtcRunner.SubAgent.Debug

review = "Great product, fast shipping! Would buy again."

{:ok, step} = SubAgent.run(
  "Classify as positive/negative/neutral with confidence 0.0-1.0: {{review}}",
  output: :json,
  signature: "(review :string) -> {sentiment :string, confidence :float}",
  context: %{review: review},
  llm: my_llm
)

Debug.print_trace(step, raw: true)
step.return

PTC-Lisp Mode - Computational Tasks

The default mode. The LLM writes a program to solve tasks that need accurate computation:

{:ok, step} = SubAgent.run(
  "How many r's are in raspberry?",
  llm: my_llm,
  max_turns: 1
)

Debug.print_trace(step, raw: true)
step.return

Execution Modes

max_turns Mode Behavior
1 Single-shot One LLM call, answer immediately
> 1 (default: 10) Multi-turn Can iterate, fix errors, explore data

Single-shot is faster and cheaper - use when the task is straightforward.

Multi-turn allows the LLM to inspect results with println, retry on errors, and call return when confident.

Signatures

Signatures define input/output types. They work with both output modes.

Format: (input1 :type, input2 :type) -> output_type

Type Examples
:string, :int, :float, :bool Primitives
{field :type, ...} Object with named fields
[element_type] List of elements
{:optional, :type} Optional field
# Input: two strings, Output: object with score and explanation
sig1 = "(text1 :string, text2 :string) -> {similarity :float, explanation :string}"

# Input: list of items, Output: object with categorized lists
sig2 = "(items [{name :string, price :float}]) -> {expensive [{name :string}], cheap [{name :string}]}"

# Output only (no inputs from context)
sig3 = "{count :int, items [:string]}"

:ok

Compiled SubAgents

Compile an agent once to derive reusable PTC-Lisp logic. Runs without further LLM calls:

agent = SubAgent.new(
  prompt: "Count r's in {{word}}",
  signature: "(word :string) -> :int",
  max_turns: 1
)

{:ok, compiled} = SubAgent.compile(agent, llm: my_llm)

IO.puts("Compiled source:\n#{compiled.source}")
# Execute on multiple inputs - no LLM calls
words = ["strawberry", "raspberry", "program", "error"]

for word <- words do
  step = compiled.execute.(%{"word" => word}, [])
  "#{word}: #{step.return}"
end

Working with Tools

Tools let agents fetch external data or perform actions:

expenses = [
  %{"id" => 1, "category" => "travel", "amount" => 450.00, "vendor" => "Airlines Inc"},
  %{"id" => 2, "category" => "food", "amount" => 32.50, "vendor" => "Cafe Luna"},
  %{"id" => 3, "category" => "travel", "amount" => 189.00, "vendor" => "Hotel Central"},
  %{"id" => 4, "category" => "office", "amount" => 299.99, "vendor" => "Tech Store"},
  %{"id" => 5, "category" => "food", "amount" => 28.00, "vendor" => "Deli Express"}
]

tools = %{
  "list-expenses" => {fn _ -> expenses end,
    signature: "() -> [{id :int, category :string, amount :float, vendor :string}]",
    description: "Returns all expense records"
  }
}

Kino.DataTable.new(expenses)
{:ok, step} = SubAgent.run(
  "What is the total travel expense?",
  tools: tools,
  signature: "{total :float}",
  llm: my_llm
)

Debug.print_trace(step, raw: true)
step.return

Interactive Query

question_input = Kino.Input.textarea("Question", default: "Show spending by category")
question = Kino.Input.read(question_input)

case SubAgent.run(question, tools: tools, llm: my_llm) do
  {:ok, step} ->
    Debug.print_trace(step)
    step.return

  {:error, step} ->
    Debug.print_trace(step)
    "Failed: #{step.fail.message}"
end

Debug Options

# Preview the prompt before running
agent = SubAgent.new(prompt: "What is 2 + 2?")
SubAgent.preview_prompt(agent).system |> IO.puts()

print_trace options:

Option Description
raw: true Show raw LLM input/output
messages: true Show all messages including system prompt
usage: true Show token usage
view: :compressed Show what LLM sees (compressed format)

Learn More