pablo-rf commited on
Commit
d1cb9f9
1 Parent(s): 9ee29d8

[MOD] Change pipeline charging between Carballo models

Browse files
Files changed (2) hide show
  1. app.py +9 -15
  2. requirements.txt +1 -2
app.py CHANGED
@@ -56,30 +56,24 @@ fronted_theme = 'Soft'
56
 
57
  # Model charge ---------------------------------------------------------
58
  model_id_bloom = "proxectonos/Carballo-bloom-1.3B"
59
- generator_model_bloom = pipeline("text-generation", model=model_id_bloom)
60
  model_id_carvalho = "Nos-PT/Carvalho_pt-gl-1.3B"
61
- generator_model_carvalho = pipeline("text-generation", model=model_id_carvalho)
62
-
63
- #Quantized Carballo-Cerebras (model less used) to save memory
64
  model_id_cerebras = "proxectonos/Carballo-cerebras-1.3B"
65
- quantization_config = BitsAndBytesConfig(
66
- load_in_4bit=True,
67
- bnb_4bit_quant_type="nf4",
68
- bnb_4bit_compute_dtype=torch.float16,
69
- )
70
 
71
- model_quantizied = AutoModelForCausalLM.from_pretrained(model_id_cerebras, quantization_config=quantization_config)
72
- tokenizer = AutoTokenizer.from_pretrained(model_id_cerebras)
73
- generator_model_cerebras = pipeline("text-generation", model=model_id_cerebras)
74
 
75
  # Generation functions ---------------------------------------------------------
76
  def get_model(model_selection):
77
  if model_selection == "Carballo-bloom-1.3B":
78
- return generator_model_bloom
 
 
79
  elif model_selection == "Carballo-cerebras-1.3B":
80
- return generator_model_cerebras
 
 
81
  else:
82
- return generator_model_carvalho
83
 
84
  def remove_empty_lines(text):
85
  lines = text.strip().split("\n")
 
56
 
57
  # Model charge ---------------------------------------------------------
58
  model_id_bloom = "proxectonos/Carballo-bloom-1.3B"
 
59
  model_id_carvalho = "Nos-PT/Carvalho_pt-gl-1.3B"
 
 
 
60
  model_id_cerebras = "proxectonos/Carballo-cerebras-1.3B"
 
 
 
 
 
61
 
62
+ generator_model_gl = pipeline("text-generation", model=model_id_bloom)
63
+ generator_model_pt = pipeline("text-generation", model=model_id_carvalho)
 
64
 
65
  # Generation functions ---------------------------------------------------------
66
  def get_model(model_selection):
67
  if model_selection == "Carballo-bloom-1.3B":
68
+ if generator_model_gl.model.name_or_path != model_id_bloom:
69
+ generator_model_gl = pipeline("text-generation", model=model_id_bloom)
70
+ return generator_model_gl
71
  elif model_selection == "Carballo-cerebras-1.3B":
72
+ if generator_model_gl.model.name_or_path != model_id_cerebras:
73
+ generator_model_gl = pipeline("text-generation", model=model_id_cerebras)
74
+ return generator_model_gl
75
  else:
76
+ return generator_model_pt
77
 
78
  def remove_empty_lines(text):
79
  lines = text.strip().split("\n")
requirements.txt CHANGED
@@ -1,4 +1,3 @@
1
  transformers
2
  torch
3
- accelerate
4
- bitsandbytes
 
1
  transformers
2
  torch
3
+ accelerate