pankajmathur commited on
Commit
7980a4c
·
verified ·
1 Parent(s): 35f6fcb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -44,7 +44,7 @@ Below shows a code example on how to use this model in default half precision (b
44
  import torch
45
  from transformers import pipeline
46
 
47
- model_slug = "pankajmathur/orca_mini_v9_2_70B"
48
  pipeline = pipeline(
49
  "text-generation",
50
  model=model_slug,
@@ -64,7 +64,7 @@ Below shows a code example on how to use this model in 4-bit format via bitsandb
64
  import torch
65
  from transformers import BitsAndBytesConfig, pipeline
66
 
67
- model_slug = "pankajmathur/orca_mini_v9_2_70B"
68
  quantization_config = BitsAndBytesConfig(
69
  load_in_4bit=True,
70
  bnb_4bit_quant_type="nf4",
@@ -92,7 +92,7 @@ Below shows a code example on how to use this model in 8-bit format via bitsandb
92
  import torch
93
  from transformers import BitsAndBytesConfig, pipeline
94
 
95
- model_slug = "pankajmathur/orca_mini_v9_2_70B"
96
  quantization_config = BitsAndBytesConfig(
97
  load_in_8bit=True
98
  )
 
44
  import torch
45
  from transformers import pipeline
46
 
47
+ model_slug = "pankajmathur/orca_mini_v9_2_70b"
48
  pipeline = pipeline(
49
  "text-generation",
50
  model=model_slug,
 
64
  import torch
65
  from transformers import BitsAndBytesConfig, pipeline
66
 
67
+ model_slug = "pankajmathur/orca_mini_v9_2_70b"
68
  quantization_config = BitsAndBytesConfig(
69
  load_in_4bit=True,
70
  bnb_4bit_quant_type="nf4",
 
92
  import torch
93
  from transformers import BitsAndBytesConfig, pipeline
94
 
95
+ model_slug = "pankajmathur/orca_mini_v9_2_70b"
96
  quantization_config = BitsAndBytesConfig(
97
  load_in_8bit=True
98
  )