alakxender commited on
Commit
ededc7b
·
1 Parent(s): 951a9a6
Files changed (2) hide show
  1. .gitignore +1 -1
  2. app.py +7 -5
.gitignore CHANGED
@@ -1 +1 @@
1
- hf_cache
 
1
+ .hf_cache
app.py CHANGED
@@ -5,16 +5,18 @@ from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, pipeli
5
  import gradio as gr
6
 
7
  # Load Goldfish model for Dhivehi
8
- language = 'Dhivehi'
 
9
 
10
  # Load model
11
- goldfish_model = 'goldfish-models/div_thaa_full'
12
- config = AutoConfig.from_pretrained(goldfish_model)
13
- tokenizer = AutoTokenizer.from_pretrained(goldfish_model)
14
- model = AutoModelForCausalLM.from_pretrained(goldfish_model, config=config)
15
  if torch.cuda.is_available():
16
  model = model.cuda() # Load onto GPU
17
 
 
18
  # Create text generation pipeline
19
  text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
20
 
 
5
  import gradio as gr
6
 
7
  # Load Goldfish model for Dhivehi
8
+ model_name = 'div_thaa_full'
9
+ HF_CACHE = '.hf_cache'
10
 
11
  # Load model
12
+ goldfish_model = 'goldfish-models/' + model_name
13
+ config = AutoConfig.from_pretrained(goldfish_model, cache_dir=HF_CACHE)
14
+ tokenizer = AutoTokenizer.from_pretrained(goldfish_model, cache_dir=HF_CACHE)
15
+ model = AutoModelForCausalLM.from_pretrained(goldfish_model, config=config, cache_dir=HF_CACHE)
16
  if torch.cuda.is_available():
17
  model = model.cuda() # Load onto GPU
18
 
19
+
20
  # Create text generation pipeline
21
  text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
22