6yuru99 commited on
Commit
62eeaec
·
verified ·
1 Parent(s): d78018d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -38,7 +38,7 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
38
  # Load the tokenizer and model
39
  model_name = "6yuru99/medical-nllb-200-en2zh_hant"
40
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
41
- tokenizer = AutoTokenizer.from_pretrained(model_name, src_lang="eng_Latn")
42
 
43
  # Example Chinese medical text
44
  input_text = "患者有高血壓病史,需定期檢查血壓。"
@@ -47,7 +47,7 @@ input_text = "患者有高血壓病史,需定期檢查血壓。"
47
  inputs = tokenizer(input_text, return_tensors="pt")
48
 
49
  # Generate translation
50
- translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.convert_tokens_to_ids("zho_Hant"))
51
 
52
  # Decode the translated text
53
  outputs = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
 
38
  # Load the tokenizer and model
39
  model_name = "6yuru99/medical-nllb-200-en2zh_hant"
40
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
41
+ tokenizer = AutoTokenizer.from_pretrained(model_name, src_lang="zho_Hant")
42
 
43
  # Example Chinese medical text
44
  input_text = "患者有高血壓病史,需定期檢查血壓。"
 
47
  inputs = tokenizer(input_text, return_tensors="pt")
48
 
49
  # Generate translation
50
+ translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.convert_tokens_to_ids("eng_Latn"))
51
 
52
  # Decode the translated text
53
  outputs = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]