Powered by AppSignal & Oban Pro
Would you like to see your link here? Contact us

Benchmark for Nx

colab/bench.livemd

Benchmark for Nx

Mix.install(
  [
    {:benchee, "~> 1.3"},
    {:nx, "~> 0.9"},
    {:exla, "~> 0.9"},
    {:torchx, "~> 0.9"},
    {:evision, "~> 0.2"},
    {:kino, "~> 0.14"}
  ],
  system_env: [
    {"XLA_TARGET", "cuda12"},
    {"EXLA_TARGET", "cuda"},
    {"LIBTORCH_TARGET", "cu121"},
    {"EVISION_ENABLE_CUDA", "true"},
    {"EVISION_ENABLE_CONTRIB", "true"},
    {"EVISION_CUDA_VERSION", "12"},
    {"EVISION_CUDNN_VERSION", "9"}
  ]
)

Generate each backend tensors

tensor = Nx.tensor([1.0, 2.0, 3.0], backend: Nx.BinaryBackend)
Nx.add(tensor, tensor)
tensor = Nx.tensor([1.0, 2.0, 3.0], backend: {EXLA.Backend, device_id: 0})
Nx.add(tensor, tensor)
tensor = Nx.tensor([1.0, 2.0, 3.0], backend: {Torchx.Backend, device: :cuda})
Nx.add(tensor, tensor)
tensor = Nx.tensor([1.0, 2.0, 3.0], backend: {Torchx.Backend, device: :cpu})
Nx.add(tensor, tensor)
tensor = Nx.tensor([1.0, 2.0, 3.0], backend: Evision.Backend)
Nx.add(tensor, tensor)

Benchmark

bench = fn backend ->
  tensor = Nx.iota({200, 200}, type: {:f, 64}, backend: backend)
  Nx.add(tensor, tensor)
end
Benchee.run(%{
  "binary" => fn -> bench.(Nx.BinaryBackend) end,
  "exla" => fn -> bench.({EXLA.Backend, device_id: 0}) end,
  "torchx_cpu" => fn -> bench.({Torchx.Backend, device: :cpu}) end,
  "torchx" => fn -> bench.({Torchx.Backend, device: :cuda}) end,
  "evision" => fn -> bench.(Evision.Backend) end
})