Image Generation
Mix.install(
[
{:kino_bumblebee, "~> 0.3.0"},
{:exla, "~> 0.5.1"}
],
system_env: [
{"XLA_TARGET", "cuda118"}
],
config: [nx: [default_backend: EXLA.Backend]]
)
Section
repository_id = "CompVis/stable-diffusion-v1-4"
{:ok, tokenizer} = Bumblebee.load_tokenizer({:hf, "openai/clip-vit-large-patch14"})
{:ok, clip} = Bumblebee.load_model({:hf, repository_id, subdir: "text_encoder"})
{:ok, unet} =
Bumblebee.load_model({:hf, repository_id, subdir: "unet"},
params_filename: "diffusion_pytorch_model.bin"
)
{:ok, vae} =
Bumblebee.load_model({:hf, repository_id, subdir: "vae"},
architecture: :decoder,
params_filename: "diffusion_pytorch_model.bin"
)
{:ok, scheduler} = Bumblebee.load_scheduler({:hf, repository_id, subdir: "scheduler"})
{:ok, featurizer} = Bumblebee.load_featurizer({:hf, repository_id, subdir: "feature_extractor"})
{:ok, safety_checker} = Bumblebee.load_model({:hf, repository_id, subdir: "safety_checker"})
serving =
Bumblebee.Diffusion.StableDiffusion.text_to_image(clip, unet, vae, tokenizer, scheduler,
num_steps: 20,
num_images_per_prompt: 2,
safety_checker: safety_checker,
safety_checker_featurizer: featurizer,
compile: [batch_size: 1, sequence_length: 50],
defn_options: [compiler: EXLA]
)
Kino.start_child({Nx.Serving, serving: serving, name: ImageGeneration})
text_input =
Kino.Input.textarea("Text",
default: "Elixir changes the world, high quality, detailed, digital art"
)
form = Kino.Control.form([text: text_input], submit: "Run")
frame = Kino.Frame.new()
Kino.async_listen(form, fn %{data: %{text: text}, origin: origin} ->
Kino.Frame.render(frame, Kino.Text.new("Running..."))
output = Nx.Serving.batched_run(ImageGeneration, text)
for result <- output.results do
Kino.Image.new(result.image)
end
|> Kino.Layout.grid(columns: 2)
|> then(&Kino.Frame.render(frame, &1, to: origin))
end)
Kino.Layout.grid([form, frame], boxed: true, gap: 16)