Ankit Yadav commited on
Commit
b47bd05
·
1 Parent(s): 2746e4c

Jarvis Model

Browse files
Files changed (1) hide show
  1. app.py +49 -49
app.py CHANGED
@@ -49,55 +49,55 @@ async def generate1(prompt):
49
  # await communicate.save(tmp_path)
50
  yield output
51
 
52
- client2 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
53
-
54
- system_instructions2 = "[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Tony Stark', Must answer in friendly style and Easy Manner. You can answer Complex Questions. Do not say who are you or Hi, Hello, Just Start answering. Stop, as answer ends. [USER]"
55
-
56
- async def generate2(prompt):
57
- generate_kwargs = dict(
58
- temperature=0.6,
59
- max_new_tokens=512,
60
- top_p=0.95,
61
- repetition_penalty=1,
62
- do_sample=True,
63
- )
64
- formatted_prompt = system_instructions2 + prompt + "[ASSISTANT]"
65
- stream = client2.text_generation(
66
- formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
67
- output = ""
68
- for response in stream:
69
- output += response.token.text
70
-
71
- # communicate = edge_tts.Communicate(output)
72
- # with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
73
- # tmp_path = tmp_file.name
74
- # await communicate.save(tmp_path)
75
- yield output
76
-
77
- client3 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
78
-
79
- system_instructions3 = "[SYSTEM]The text provided is a request for a specific type of response from me, the virtual assistant. The request asks me to provide detailed and friendly responses as if I am the character Jarvis, inspired by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, my task is to generate responses that appear as if they are created by the user and not an AI-powered assistant.[USER]"
80
-
81
- async def generate3(prompt):
82
- generate_kwargs = dict(
83
- temperature=0.6,
84
- max_new_tokens=2048,
85
- top_p=0.95,
86
- repetition_penalty=1,
87
- do_sample=True,
88
- )
89
- formatted_prompt = system_instructions3 + prompt + "[ASSISTANT]"
90
- stream = client3.text_generation(
91
- formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
92
- output = ""
93
- for response in stream:
94
- output += response.token.text
95
-
96
- communicate = edge_tts.Communicate(output)
97
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
98
- tmp_path = tmp_file.name
99
- await communicate.save(tmp_path)
100
- yield tmp_path
101
 
102
  with gr.Blocks(css="style.css") as demo:
103
  gr.Markdown(DESCRIPTION)
 
49
  # await communicate.save(tmp_path)
50
  yield output
51
 
52
+ # client2 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
53
+ #
54
+ # system_instructions2 = "[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Tony Stark', Must answer in friendly style and Easy Manner. You can answer Complex Questions. Do not say who are you or Hi, Hello, Just Start answering. Stop, as answer ends. [USER]"
55
+ #
56
+ # async def generate2(prompt):
57
+ # generate_kwargs = dict(
58
+ # temperature=0.6,
59
+ # max_new_tokens=512,
60
+ # top_p=0.95,
61
+ # repetition_penalty=1,
62
+ # do_sample=True,
63
+ # )
64
+ # formatted_prompt = system_instructions2 + prompt + "[ASSISTANT]"
65
+ # stream = client2.text_generation(
66
+ # formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
67
+ # output = ""
68
+ # for response in stream:
69
+ # output += response.token.text
70
+ #
71
+ # # communicate = edge_tts.Communicate(output)
72
+ # # with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
73
+ # # tmp_path = tmp_file.name
74
+ # # await communicate.save(tmp_path)
75
+ # yield output
76
+ #
77
+ # client3 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
78
+ #
79
+ # system_instructions3 = "[SYSTEM]The text provided is a request for a specific type of response from me, the virtual assistant. The request asks me to provide detailed and friendly responses as if I am the character Jarvis, inspired by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, my task is to generate responses that appear as if they are created by the user and not an AI-powered assistant.[USER]"
80
+ #
81
+ # async def generate3(prompt):
82
+ # generate_kwargs = dict(
83
+ # temperature=0.6,
84
+ # max_new_tokens=2048,
85
+ # top_p=0.95,
86
+ # repetition_penalty=1,
87
+ # do_sample=True,
88
+ # )
89
+ # formatted_prompt = system_instructions3 + prompt + "[ASSISTANT]"
90
+ # stream = client3.text_generation(
91
+ # formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
92
+ # output = ""
93
+ # for response in stream:
94
+ # output += response.token.text
95
+ #
96
+ # communicate = edge_tts.Communicate(output)
97
+ # with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
98
+ # tmp_path = tmp_file.name
99
+ # await communicate.save(tmp_path)
100
+ # yield tmp_path
101
 
102
  with gr.Blocks(css="style.css") as demo:
103
  gr.Markdown(DESCRIPTION)