m3 commited on
Commit
22fe4c5
·
1 Parent(s): 43b5bf5

feat: stable releases

Browse files
model.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:535fb5c7cda692ca2caad3c6d15260f7ebe398f7b16556d038b49e34ced0a3e2
3
  size 235
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e25f635133a806a543cc19b1fae70ae4ade6a30d2770a58fae8a69834b5428e
3
  size 235
src/base_model.py DELETED
File without changes
src/create_model.py CHANGED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig, PreTrainedModel
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+
6
+ class ONNXBaseConfig(PretrainedConfig):
7
+ model_type = "onnx-base"
8
+
9
+ def __init__(self, model_path=None, **kwargs):
10
+ self.model_path = model_path
11
+ super().__init__(**kwargs)
12
+
13
+
14
+ model_directory = './new_model'
15
+
16
+ config = ONNXBaseConfig(model_path='model.onnx')
17
+ config.save_pretrained(save_directory=model_directory)
18
+
19
+
20
+ class ONNXBaseModel(PreTrainedModel):
21
+ config_class = ONNXBaseConfig
22
+ def __init__(self, config):
23
+ super().__init__(config)
24
+ self.dummy_param = nn.Parameter(torch.zeros(0))
25
+
26
+ def forward(self, inputs):
27
+ return torch.zeros_like(inputs)
28
+
29
+ def save_pretrained(self, save_directory: str, **kwargs):
30
+ super().save_pretrained(save_directory=save_directory, **kwargs)
31
+ onnx_file_path = save_directory + '/model.onnx'
32
+ dummy_input = torch.tensor([[1, 2], [3, 4]], dtype=torch.float32)
33
+ torch.onnx.export(self, dummy_input, onnx_file_path,
34
+ input_names=['input'], output_names=['output'],
35
+ dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
36
+
37
+
38
+ # Initialize model
39
+ model = ONNXBaseModel(config)
40
+ # Save model
41
+ model.save_pretrained(save_directory=model_directory)
42
+
43
+ model = model.from_pretrained(model_directory)
44
+
45
+ # Test model
46
+ dummy_input = torch.tensor([[1, 2], [3, 4]], dtype=torch.float32)
47
+ output_tensor = model(dummy_input)
48
+ print(output_tensor)
49
+
50
+ # Test the onnx model
51
+ onnx_file_path = model_directory + '/model.onnx'
52
+ import onnx
53
+ import onnxruntime as ort
54
+
55
+ ort_session = ort.InferenceSession(onnx_file_path)
56
+ outputs = ort_session.run(None, {'input': dummy_input.numpy()})
57
+ print("Model output:", outputs)
src/demo.py CHANGED
@@ -7,14 +7,9 @@ pipe = pipeline(
7
  task='onnx-base',
8
  model='m3/onnx-base',
9
  batch_size=10,
10
- device='cuda',
11
  )
12
-
13
- dummy_input = torch.tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]], dtype=torch.float32)
14
- input_data = dummy_input.numpy()
15
- result = pipe(
16
- inputs=input_data, device='cuda',
17
- )
18
-
19
  print(result)
20
 
 
7
  task='onnx-base',
8
  model='m3/onnx-base',
9
  batch_size=10,
10
+ device='cpu',
11
  )
12
+ input = torch.tensor([[1, 2], [3, 4]], dtype=torch.float32).numpy()
13
+ result = pipe(input)
 
 
 
 
 
14
  print(result)
15
 
src/init_model.py DELETED
@@ -1,34 +0,0 @@
1
- import torch
2
- import os
3
- import torch.nn as nn
4
- from pipeline import ONNXBaseConfig, ONNXBaseModel
5
-
6
- local_model_path = './custom_model'
7
- config = ONNXBaseConfig(model_path='model.onnx',
8
- id2label={0: 'label_0', 1: 'label_1'},
9
- label2id={0: 'label_1', 1: 'label_0'})
10
- model = ONNXBaseModel(config, base_path='./custom_mode')
11
- config.save_pretrained(local_model_path)
12
- # make sure have model_type
13
- import json
14
- config_path = local_model_path + '/config.json'
15
- with open(config_path, 'r') as f:
16
- config_data = json.load(f)
17
- config_data['model_type'] = 'onnx-base'
18
- del config_data['transformers_version']
19
- with open(config_path, 'w') as f:
20
- json.dump(config_data, f, indent=2)
21
-
22
- # save onnx
23
- dummy_input = torch.tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]], dtype=torch.float32)
24
- onnx_file_path = './custom_model' + '/' + 'model.onnx'
25
- class ZeroModel(nn.Module):
26
- def __init__(self):
27
- super(ZeroModel, self).__init__()
28
- def forward(self, x):
29
- return torch.zeros_like(x)
30
- zero_model = ZeroModel()
31
- torch.onnx.export(zero_model, dummy_input, onnx_file_path,
32
- input_names=['input'], output_names=['output'],
33
- dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
34
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/init_onnx.py DELETED
@@ -1,37 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- class BaseModel(nn.Module):
5
- def __init__(self):
6
- super(BaseModel, self).__init__()
7
-
8
- def forward(self, x):
9
- return torch.zeros_like(x)
10
-
11
- # create a model
12
- model = BaseModel()
13
-
14
- dummy_input = torch.tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]], dtype=torch.float32)
15
-
16
- safetensors_file_path = "model.safetensors"
17
-
18
- from safetensors.torch import save_file
19
- save_file(model.state_dict(), 'model.safetensors')
20
-
21
- import torch.onnx
22
- onnx_file_path = "model.onnx"
23
- torch.onnx.export(model, dummy_input, onnx_file_path,
24
- input_names=['input'], output_names=['output'],
25
- dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
26
-
27
- print(f"Model has been exported to {onnx_file_path}")
28
-
29
- import onnx
30
- import onnxruntime as ort
31
- onnx_model = onnx.load(onnx_file_path)
32
- onnx.checker.check_model(onnx_model)
33
- ort_session = ort.InferenceSession(onnx_file_path)
34
- input_data = dummy_input.numpy()
35
- outputs = ort_session.run(None, {'input': input_data})
36
- print("Model output:", outputs)
37
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/pipeline.py CHANGED
@@ -26,6 +26,14 @@ class ONNXBaseModel(PreTrainedModel):
26
  outs = self.session.run(None, {'input': input})
27
  return outs
28
 
 
 
 
 
 
 
 
 
29
  @classmethod
30
  def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
31
  config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
@@ -45,6 +53,7 @@ class ONNXBaseModel(PreTrainedModel):
45
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
46
  return torch.device(device)
47
 
 
48
  AutoModel.register(ONNXBaseConfig, ONNXBaseModel)
49
 
50
  # 2. register Pipeline
 
26
  outs = self.session.run(None, {'input': input})
27
  return outs
28
 
29
+ def save_pretrained(self, save_directory: str, **kwargs):
30
+ super().save_pretrained(save_directory=save_directory, **kwargs)
31
+ onnx_file_path = save_directory + '/model.onnx'
32
+ dummy_input = torch.tensor([[1, 2], [3, 4]], dtype=torch.float32)
33
+ torch.onnx.export(self, dummy_input, onnx_file_path,
34
+ input_names=['input'], output_names=['output'],
35
+ dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
36
+
37
  @classmethod
38
  def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
39
  config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
 
53
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
54
  return torch.device(device)
55
 
56
+
57
  AutoModel.register(ONNXBaseConfig, ONNXBaseModel)
58
 
59
  # 2. register Pipeline