Thziin commited on
Commit
b72192d
·
verified ·
1 Parent(s): 6d0e654

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -43
app.py CHANGED
@@ -4,8 +4,10 @@ from huggingface_hub import InferenceClient
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("microsoft/orca-agentinstruct-1M-v1")
8
- #client = InferenceClient("microsoft/Orca-2-13b")
 
 
9
 
10
 
11
  def respond(
@@ -29,47 +31,15 @@ def respond(
29
  response = ""
30
 
31
 
32
- try:
33
- for message in client.chat_completion(
34
- messages,
35
- max_tokens=max_tokens,
36
- stream=True,
37
- temperature=temperature,
38
- top_p=top_p,
39
- ):
40
- # Ensure the message has a valid structure
41
- if not message or not isinstance(message, dict):
42
- continue
43
-
44
- try:
45
- # Extract content and finish reason
46
- content = message.choices[0].delta.content
47
- finish_reason = message.choices[0].finish_reason
48
 
49
- # Check if the content is empty
50
- if content.strip() == "":
51
- # If the finish reason is 'stop', it's expected and we can break the loop
52
- if finish_reason == "stop":
53
- print("Stream ended normally.")
54
- break
55
- else:
56
- print("Received unexpected empty content, skipping...")
57
- continue
58
-
59
- response += content
60
- yield response
61
-
62
- except (AttributeError, IndexError, KeyError) as e:
63
- print(f"Error processing message: {e}")
64
- continue
65
-
66
- except Exception as e:
67
- print(f"Unexpected error: {e}")
68
- yield "An error occurred while generating the response."
69
-
70
- # Final check if the response is empty
71
- if response.strip() == "":
72
- yield "No response generated. Please try again or adjust the settings."
73
 
74
 
75
  """
@@ -83,7 +53,7 @@ demo = gr.ChatInterface(
83
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
84
  gr.Slider(
85
  minimum=0.1,
86
- maximum=2.0,
87
  value=0.95,
88
  step=0.05,
89
  label="Top-p (nucleus sampling)",
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
+ #client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+ #client = InferenceClient("meta-llama/Llama-3.2-1B-Instruct")
9
+ #client = InferenceClient("microsoft/Phi-3.5-mini-instruct")
10
+ client = InferenceClient("unsloth/Llama-3.2-1B-Instruct")
11
 
12
 
13
  def respond(
 
31
  response = ""
32
 
33
 
34
+ mensagens = client.chat_completion(
35
+ messages,
36
+ max_tokens=max_tokens,
37
+ temperature=temperature,
38
+ top_p=top_p,
39
+ )
40
+ response = mensagens.choices[0].message.content
 
 
 
 
 
 
 
 
 
41
 
42
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
 
45
  """
 
53
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
54
  gr.Slider(
55
  minimum=0.1,
56
+ maximum=1.0,
57
  value=0.95,
58
  step=0.05,
59
  label="Top-p (nucleus sampling)",