imperialwool commited on
Commit
c13a85e
·
1 Parent(s): 968018c

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +19 -19
gradio_app.py CHANGED
@@ -5,16 +5,16 @@ import gradio as gr
5
  import psutil
6
 
7
  # Initing things
8
- print("! DOWNLOADING TOKENIZER AND SETTING ALL UP !")
9
- translator_tokenizer = M2M100Tokenizer.from_pretrained( # tokenizer for translator
10
- "facebook/m2m100_418M", cache_dir="translator/"
11
- )
12
- print("! DOWNLOADING MODEL AND SETTING ALL UP !")
13
- translator_model = M2M100ForConditionalGeneration.from_pretrained( # translator model
14
- "facebook/m2m100_418M", cache_dir="translator/"
15
- )
16
- print("! SETTING MODEL IN EVALUATION MODE !")
17
- translator_model.eval()
18
  print("! INITING LLAMA MODEL !")
19
  llm = Llama(model_path="./model.bin") # LLaMa model
20
  llama_model_name = "TheBloke/Llama-2-13B-chat-GGUF"
@@ -56,15 +56,15 @@ def generate_answer(request: str, max_tokens: int = 256, language: str = "en", c
56
  text = output["choices"][0]["text"]
57
  # i allowed only certain languages (its not discrimination, its just other popular language on my opinion!!!):
58
  # russian (ru), ukranian (uk), chinese (zh)
59
- if language in ["ru", "uk", "zh"]:
60
- encoded_input = translator_tokenizer(output, return_tensors="pt")
61
- generated_tokens = translator_model.generate(
62
- **encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(language)
63
- )
64
- translated_text = translator_tokenizer.batch_decode(
65
- generated_tokens, skip_special_tokens=True
66
- )[0]
67
- return translated_text
68
  return text
69
  except Exception as e:
70
  print(e)
 
5
  import psutil
6
 
7
  # Initing things
8
+ #print("! DOWNLOADING TOKENIZER AND SETTING ALL UP !")
9
+ #translator_tokenizer = M2M100Tokenizer.from_pretrained( # tokenizer for translator
10
+ # "facebook/m2m100_418M", cache_dir="translator/"
11
+ #)
12
+ #print("! DOWNLOADING MODEL AND SETTING ALL UP !")
13
+ #translator_model = M2M100ForConditionalGeneration.from_pretrained( # translator model
14
+ # "facebook/m2m100_418M", cache_dir="translator/"
15
+ #)
16
+ #print("! SETTING MODEL IN EVALUATION MODE !")
17
+ #translator_model.eval()
18
  print("! INITING LLAMA MODEL !")
19
  llm = Llama(model_path="./model.bin") # LLaMa model
20
  llama_model_name = "TheBloke/Llama-2-13B-chat-GGUF"
 
56
  text = output["choices"][0]["text"]
57
  # i allowed only certain languages (its not discrimination, its just other popular language on my opinion!!!):
58
  # russian (ru), ukranian (uk), chinese (zh)
59
+ #if language in ["ru", "uk", "zh"]:
60
+ #encoded_input = translator_tokenizer(output, return_tensors="pt")
61
+ #generated_tokens = translator_model.generate(
62
+ # **encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(language)
63
+ #)
64
+ #translated_text = translator_tokenizer.batch_decode(
65
+ # generated_tokens, skip_special_tokens=True
66
+ #)[0]
67
+ #return translated_text
68
  return text
69
  except Exception as e:
70
  print(e)