Powered by AppSignal & Oban Pro
Would you like to see your link here? Contact us
Notesclub

Bumblebee Examples

Bumblebee_StableDiffusion_Example.livemd

Bumblebee Examples

Mix.install(
  [
    {:bumblebee, "~> 0.5"},
    {:axon, "~> 0.6"},
    {:nx, "~> 0.7"},
    {:kino_bumblebee, "~> 0.5"},
    {:exla, "~> 0.7.1"}
  ],
  config: [
    nx: [default_backend: EXLA.Backend]
  ],
  system_env: [
    XLA_TARGET: "cuda120"
  ]
)

Text to image

Setup Stable Diffusion

repo_id = "CompVis/stable-diffusion-v1-4"
opts = [params_variant: "fp16", type: :bf16, backend: EXLA.Backend]

{:ok, tokenizer} = Bumblebee.load_tokenizer({:hf, "openai/clip-vit-large-patch14"})
{:ok, clip} = Bumblebee.load_model({:hf, repo_id, subdir: "text_encoder"}, opts)
{:ok, unet} = Bumblebee.load_model({:hf, repo_id, subdir: "unet"}, opts)
{:ok, vae} = Bumblebee.load_model({:hf, repo_id, subdir: "vae"}, [architecture: :decoder] ++ opts)
{:ok, scheduler} = Bumblebee.load_scheduler({:hf, repo_id, subdir: "scheduler"})
{:ok, featurizer} = Bumblebee.load_featurizer({:hf, repo_id, subdir: "feature_extractor"})
{:ok, safety_checker} = Bumblebee.load_model({:hf, repo_id, subdir: "safety_checker"}, opts)

Start serving

serving =
  Bumblebee.Diffusion.StableDiffusion.text_to_image(clip, unet, vae, tokenizer, scheduler,
    num_steps: 20,
    num_images_per_prompt: 2,
    safety_checker: safety_checker,
    safety_checker_featurizer: featurizer,
    compile: [batch_size: 1, sequence_length: 30],
    # Option 1
    defn_options: [compiler: EXLA]
    # Option 2 (reduces GPU usage, but runs noticeably slower)
    # Also remove `backend: EXLA.Backend` from the loading options above
    # defn_options: [compiler: EXLA, lazy_transfers: :always]
  )

Kino.start_child({Nx.Serving, name: StableDiffusion, serving: serving})

Setup text input

prompt_input =
  Kino.Input.text("Prompt", default: "pig, medium quality")

negative_prompt_input = Kino.Input.text("Negative Prompt", default: "")

Kino.Layout.grid([prompt_input, negative_prompt_input])

Generate image

prompt = Kino.Input.read(prompt_input)
negative_prompt = Kino.Input.read(negative_prompt_input)

output =
  Nx.Serving.batched_run(StableDiffusion, %{prompt: prompt, negative_prompt: negative_prompt})

for result <- output.results do
  Kino.Image.new(result.image)
end
|> Kino.Layout.grid(columns: 2)