Powered by AppSignal & Oban Pro
Would you like to see your link here? Contact us

Resnet18 image classification

demo_resnet18/Resnet18.livemd

Resnet18 image classification

File.cd!(__DIR__)
# for windows JP
# System.shell("chcp 65001")

Mix.install(
  [
    {:axon_interp, "~> 0.1.0"},
    {:cimg, "~> 0.1.14"},
    {:nx, "~> 0.4.0"},
    {:kino, "~> 0.7.0"}
  ],
  config: [
    nx: [default_defn_options: [compiler: EXLA]]
  ]
  # system_env: [{"NNINTERP", "Axon"}]
)

0.Original work

torchvision.models.resnet18 - pre-trained model included in Pytorch.

Thanks a lot!!!


Implementation with AxonInterp in Elixir

1.Defining the inference module: ResNet18

defmodule Resnet18 do
  @width 224
  @height 224

  use AxonInterp,
    model: "./model/resnet18.onnx",
    url: "https://github.com/shoz-f/axon_interp/releases/download/0.0.1/resnet18.onnx",
    inputs: [f32: {1, 3, @height, @width}],
    outputs: [f32: {1, 1000}]

  @imagenet1000 (for item <- File.stream!("./model/imagenet1000.label") do
                   String.trim_trailing(item)
                 end)
                |> Enum.with_index(&amp;{&amp;2, &amp;1})
                |> Enum.into(%{})

  def apply(img, top \\ 1) do
    # preprocess
    input0 =
      CImg.builder(img)
      |> CImg.resize({@width, @height})
      |> CImg.to_binary([{:gauss, {{123.7, 58.4}, {116.3, 57.1}, {103.5, 57.4}}}, :nchw])

    # prediction
    output0 =
      session()
      |> AxonInterp.set_input_tensor(0, input0, [:binary])
      |> AxonInterp.invoke()
      |> AxonInterp.get_output_tensor(0)
      |> Nx.squeeze()

    # postprocess
    then(Nx.exp(output0), fn exp -> Nx.divide(exp, Nx.sum(exp)) end)
    |> Nx.argsort(direction: :desc)
    |> Nx.slice([0], [top])
    |> Nx.to_flat_list()
    |> Enum.map(&amp;@imagenet1000[&amp;1])
  end
end

Launch Resnet18.

# AxonInterp.stop(Resnet18)
Resnet18.start_link([])

Display the properties of the Resnet18 model.

AxonInterp.info(Resnet18)

2.Let’s try it

Load a photo and apply Resnet18 to it.

img = CImg.load("lion.jpg")
Kino.render(CImg.display_kino(img, :jpeg))

Resnet18.apply(img, 3)

3.Appendix - How to convert Pytorch model to Axon

STEP1.

Create Onnx model from torchvision.models.resnet18.

mk_resnet18_onnx.py

import os
import torch
import torchvision.models as models

model = models.resnet18(weights='DEFAULT')
model.eval()

dummy_input = torch.randn(1, 3, 224, 224)

os.makedirs("data", exist_ok=True)
torch.onnx.export(model,
   dummy_input,
   "data/resnet18.onnx",
   verbose=False,
   input_names=["input.0"],
   output_names=["output.0"],
   export_params=True
   )

STEP2.

Convert Onnx model to Axon’s graph and parameter.

mk_resnet18_axon.exs

Mix.install([
  {:axon_onnx, "~> 0.3.0"}
])

AxonOnnx.import("data/resnet18.onnx")
|> then(fn {model, params} -> Axon.serialize(model, params) end)
|> (&amp;File.write("data/resnet18.axon", &amp;1)).()