xzuyn commited on
Commit
ffe6c58
·
verified ·
1 Parent(s): 24c02e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -18,8 +18,7 @@ def tokenize(input_text):
18
  codeqwen_tokens = len(codeqwen_tokenizer(input_text, add_special_tokens=True)["input_ids"])
19
  rwkv4_tokens = len(rwkv4_tokenizer(input_text, add_special_tokens=True)["input_ids"])
20
  rwkv5_tokens = len(rwkv5_tokenizer(input_text, add_special_tokens=True)["input_ids"])
21
- deepseekllm_tokens = len(deepseekllm_tokenizer(input_text, add_special_tokens=True)["input_ids"])
22
- deepseekv2_tokens = len(deepseekv2_tokenizer(input_text, add_special_tokens=True)["input_ids"])
23
 
24
  results = {
25
  "LLaMa-1/LLaMa-2": llama_tokens,
@@ -37,8 +36,7 @@ def tokenize(input_text):
37
  "CodeQwen": codeqwen_tokens,
38
  "RWKV-v4": rwkv4_tokens,
39
  "RWKV-v5/RWKV-v6": rwkv5_tokens,
40
- "DeepSeek-LLM": deepseekllm_tokens,
41
- "DeepSeek-V2": deepseekv2_tokens
42
  }
43
 
44
  # Sort the results in descending order based on token length
@@ -63,8 +61,7 @@ if __name__ == "__main__":
63
  codeqwen_tokenizer = AutoTokenizer.from_pretrained("Qwen/CodeQwen1.5-7B")
64
  rwkv4_tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-4-14b-pile", trust_remote_code=True)
65
  rwkv5_tokenizer = AutoTokenizer.from_pretrained("RWKV/v5-EagleX-v2-7B-HF", trust_remote_code=True)
66
- deepseekllm_tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-llm-7b-base", trust_remote_code=True)
67
- deepseekv2_tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-V2", trust_remote_code=True)
68
 
69
  iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(label="Input Text", lines=17), outputs="text")
70
  iface.launch()
 
18
  codeqwen_tokens = len(codeqwen_tokenizer(input_text, add_special_tokens=True)["input_ids"])
19
  rwkv4_tokens = len(rwkv4_tokenizer(input_text, add_special_tokens=True)["input_ids"])
20
  rwkv5_tokens = len(rwkv5_tokenizer(input_text, add_special_tokens=True)["input_ids"])
21
+ deepseek_tokens = len(deepseek_tokenizer(input_text, add_special_tokens=True)["input_ids"])
 
22
 
23
  results = {
24
  "LLaMa-1/LLaMa-2": llama_tokens,
 
36
  "CodeQwen": codeqwen_tokens,
37
  "RWKV-v4": rwkv4_tokens,
38
  "RWKV-v5/RWKV-v6": rwkv5_tokens,
39
+ "DeepSeek": deepseek_tokens
 
40
  }
41
 
42
  # Sort the results in descending order based on token length
 
61
  codeqwen_tokenizer = AutoTokenizer.from_pretrained("Qwen/CodeQwen1.5-7B")
62
  rwkv4_tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-4-14b-pile", trust_remote_code=True)
63
  rwkv5_tokenizer = AutoTokenizer.from_pretrained("RWKV/v5-EagleX-v2-7B-HF", trust_remote_code=True)
64
+ deepseek_tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-V2", trust_remote_code=True)
 
65
 
66
  iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(label="Input Text", lines=17), outputs="text")
67
  iface.launch()