Distributed AI client
Mix.install(
[
{:kino_bumblebee, "~> 0.5"},
{:exla, "~> 0.9", override: true}
],
config: [nx: [default_backend: EXLA.Backend]]
)
Connect to server
server_node_inputs =
["A", "B", "C"]
|> Enum.into(%{}, fn node_id ->
{
node_id,
%{
node: Kino.Input.text("SERVER_#{node_id}_NODE_NAME"),
cookie: Kino.Input.text("SERVER_#{node_id}_COOKIE")
}
}
end)
server_node_inputs
|> Enum.map(fn {_, inputs} ->
[inputs.node, inputs.cookie]
end)
|> List.flatten()
|> Kino.Layout.grid(columns: 2)
server_node_inputs
|> Enum.map(fn {_, inputs} ->
node_name =
inputs.node
|> Kino.Input.read()
|> String.to_atom()
cookie =
inputs.cookie
|> Kino.Input.read()
|> String.to_atom()
Node.set_cookie(node_name, cookie)
Node.connect(node_name)
end)
Call server AI
image_input = Kino.Input.image("Image", size: {224, 224})
form = Kino.Control.form([image: image_input], submit: "Run")
frame = Kino.Frame.new()
Kino.listen(form, fn %{data: %{image: image}} ->
if image do
Kino.Frame.render(frame, Kino.Text.new("Running..."))
image = image.data |> Nx.from_binary(:u8) |> Nx.reshape({image.height, image.width, 3})
output = Nx.Serving.batched_run(ViT, image)
output.predictions
|> Enum.map(&{&1.label, &1.score})
|> Kino.Bumblebee.ScoredList.new()
|> then(&Kino.Frame.render(frame, &1))
end
end)
Kino.Layout.grid([form, frame], boxed: true, gap: 16)