Tonic commited on
Commit
45e595b
·
verified ·
1 Parent(s): 2688c47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -20
app.py CHANGED
@@ -12,9 +12,7 @@ description = """
12
  You can use this Space to test out the current model [intfloat/e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct). e5mistral has a larger context window, a different prompting/return mechanism and generally better results than other embedding models.
13
  You can also use 🐣e5-mistral🛌🏻 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/e5?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
14
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
15
- You can use this space in **two ways !** either select an embeddings mode or 'None' to speak with the e5mistral LLM 🤗
16
  """
17
-
18
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:30'
19
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
 
@@ -49,18 +47,10 @@ def last_token_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tenso
49
 
50
  @spaces.GPU
51
  def compute_embeddings(selected_task, input_text, system_prompt):
52
- max_length = 2042 # Define max_length here
53
-
54
- if selected_task == "None":
55
- if system_prompt:
56
- processed_texts = [f'Instruct: {system_prompt}\nQuery: {input_text}']
57
- else:
58
- processed_texts = [f'Query: {input_text}']
59
- else:
60
- task_description = tasks[selected_task]
61
- processed_texts = [f'Instruct: {task_description}\nQuery: {input_text}']
62
 
63
- task = tasks[selected_task]
64
  batch_dict = tokenizer(processed_texts, max_length=max_length - 1, return_attention_mask=False, padding=False, truncation=True)
65
  batch_dict['input_ids'] = [input_ids + [tokenizer.eos_token_id] for input_ids in batch_dict['input_ids']]
66
  batch_dict = tokenizer.pad(batch_dict, padding=True, return_attention_mask=True, return_tensors='pt')
@@ -76,8 +66,8 @@ def app_interface():
76
  gr.Markdown(title)
77
  gr.Markdown(description)
78
 
79
- task_dropdown = gr.Dropdown(list(tasks.keys()) + ["None"], label="Select a Task (Optional)", value="None")
80
-
81
  input_text_box = gr.Textbox(label="📖Input Text")
82
  system_prompt_box = gr.Textbox(label="🤖System Prompt (Optional)")
83
 
@@ -93,11 +83,12 @@ def app_interface():
93
  compute_button
94
  output_display
95
 
96
- compute_button.click(
97
- fn=compute_embeddings,
98
- inputs=[task_dropdown, input_text_box, system_prompt_box],
99
- outputs=output_display
100
- )
 
101
 
102
  return demo
103
 
 
12
  You can use this Space to test out the current model [intfloat/e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct). e5mistral has a larger context window, a different prompting/return mechanism and generally better results than other embedding models.
13
  You can also use 🐣e5-mistral🛌🏻 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/e5?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
14
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
 
15
  """
 
16
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:30'
17
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
18
 
 
47
 
48
  @spaces.GPU
49
  def compute_embeddings(selected_task, input_text, system_prompt):
50
+ max_length = 2042
51
+ task_description = tasks[selected_task]
52
+ processed_texts = [f'Instruct: {task_description}\nQuery: {input_text}']
 
 
 
 
 
 
 
53
 
 
54
  batch_dict = tokenizer(processed_texts, max_length=max_length - 1, return_attention_mask=False, padding=False, truncation=True)
55
  batch_dict['input_ids'] = [input_ids + [tokenizer.eos_token_id] for input_ids in batch_dict['input_ids']]
56
  batch_dict = tokenizer.pad(batch_dict, padding=True, return_attention_mask=True, return_tensors='pt')
 
66
  gr.Markdown(title)
67
  gr.Markdown(description)
68
 
69
+ task_dropdown = gr.Dropdown(list(tasks.keys()), label="Select a Task", value=list(tasks.keys())[0])
70
+
71
  input_text_box = gr.Textbox(label="📖Input Text")
72
  system_prompt_box = gr.Textbox(label="🤖System Prompt (Optional)")
73
 
 
83
  compute_button
84
  output_display
85
 
86
+ compute_button.click(
87
+ fn=compute_embeddings,
88
+ inputs=[task_dropdown, input_text_box],
89
+ outputs=output_display
90
+ )
91
+
92
 
93
  return demo
94