Simon Strandgaard commited on
Commit
3ac716d
·
1 Parent(s): b243f89

Removed dead code

Browse files
Files changed (1) hide show
  1. app.py +1 -106
app.py CHANGED
@@ -1,108 +1,3 @@
1
- import os
2
- import subprocess
3
- import gradio as gr
4
- from huggingface_hub import InferenceClient
5
- from src.prompt.prompt_catalog import PromptCatalog
6
- from src.llm_factory import get_llm
7
- from src.plan.app_text2plan import demo_text2plan
8
- from llama_index.core.llms import ChatMessage
9
-
10
- if False:
11
- llm = get_llm("openrouter-paid-gemini-2.0-flash-001")
12
- messages = [
13
- ChatMessage(
14
- role="system", content="You are a pirate with a colorful personality"
15
- ),
16
- ChatMessage(role="user", content="What is your name"),
17
- ]
18
- resp = llm.stream_chat(messages)
19
-
20
- for r in resp:
21
- print(r.delta, end="")
22
-
23
-
24
- if False:
25
- # Load modules from src
26
- prompt_catalog = PromptCatalog()
27
- prompt_catalog.load(os.path.join(os.path.dirname(__file__), 'src', 'plan', 'data', 'simple_plan_prompts.jsonl'))
28
- prompt_item = prompt_catalog.find("4dc34d55-0d0d-4e9d-92f4-23765f49dd29")
29
- print(prompt_item)
30
-
31
- if False:
32
- # Spawn a child process that lists files in the current directory.
33
- result = subprocess.run(["pwd"], capture_output=True, text=True)
34
- print("Child process output:")
35
- print(result.stdout)
36
-
37
- if False:
38
- # Write a file
39
- with open("output.txt", "w") as f:
40
- f.write("Hello from Hugging Face 1 Spaces!")
41
-
42
- # Read the file back
43
- with open("output.txt", "r") as f:
44
- content = f.read()
45
- print("File content:", content)
46
-
47
-
48
- """
49
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
50
- """
51
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
52
-
53
-
54
- def respond(
55
- message,
56
- history: list[tuple[str, str]],
57
- system_message,
58
- max_tokens,
59
- temperature,
60
- top_p,
61
- ):
62
- messages = [{"role": "system", "content": system_message}]
63
-
64
- for val in history:
65
- if val[0]:
66
- messages.append({"role": "user", "content": val[0]})
67
- if val[1]:
68
- messages.append({"role": "assistant", "content": val[1]})
69
-
70
- messages.append({"role": "user", "content": message})
71
-
72
- response = ""
73
-
74
- for message in client.chat_completion(
75
- messages,
76
- max_tokens=max_tokens,
77
- stream=True,
78
- temperature=temperature,
79
- top_p=top_p,
80
- ):
81
- token = message.choices[0].delta.content
82
-
83
- response += token
84
- yield response
85
-
86
-
87
- """
88
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
89
- """
90
- demo = gr.ChatInterface(
91
- respond,
92
- additional_inputs=[
93
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
94
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
95
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
96
- gr.Slider(
97
- minimum=0.1,
98
- maximum=1.0,
99
- value=0.95,
100
- step=0.05,
101
- label="Top-p (nucleus sampling)",
102
- ),
103
- ],
104
- )
105
-
106
-
107
  if __name__ == "__main__":
 
108
  demo_text2plan.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  if __name__ == "__main__":
2
+ from src.plan.app_text2plan import demo_text2plan
3
  demo_text2plan.launch()