curiouscurrent commited on
Commit
4450af9
·
verified ·
1 Parent(s): 31ccb9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -29
app.py CHANGED
@@ -1,30 +1,40 @@
 
 
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
-
4
- # Load your text generation model from Hugging Face using its identifier
5
- model_identifier = "curiouscurrent/omnicode"
6
- model = AutoModelForCausalLM.from_pretrained(model_identifier)
7
- tokenizer = AutoTokenizer.from_pretrained(model_identifier)
8
-
9
- def generate_response(input_prompt):
10
- # Tokenize input prompt
11
- input_ids = tokenizer.encode(input_prompt, return_tensors="pt", max_length=512, truncation=True)
12
-
13
- # Generate response
14
- output_ids = model.generate(input_ids, max_length=100, num_return_sequences=1)
15
- response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
16
-
17
- return response
18
-
19
- # Create Gradio interface
20
- input_prompt = gr.inputs.Textbox(lines=5, label="Input Prompt")
21
- output_text = gr.outputs.Textbox(label="Response")
22
-
23
- gr.Interface(
24
- generate_response,
25
- inputs=input_prompt,
26
- outputs=output_text,
27
- title="OmniCode",
28
- description="Multi programming coding assistant",
29
- theme="compact"
30
- ).launch()
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
  import gradio as gr
4
+
5
+ url="https://huggingface.co/curiouscurrent/omnicode"
6
+
7
+ headers={
8
+
9
+ 'Content-Type':'application/json'
10
+ }
11
+
12
+ history=[]
13
+
14
+ def generate_response(prompt):
15
+ history.append(prompt)
16
+ final_prompt="\n".join(history)
17
+
18
+ data={
19
+ "model":"curiouscurrent/omnicode",
20
+ "prompt":final_prompt,
21
+ "stream":False
22
+ }
23
+
24
+ response=requests.post(url,headers=headers,data=json.dumps(data))
25
+
26
+ if response.status_code==200:
27
+ response=response.text
28
+ data=json.loads(response)
29
+ actual_response=data['response']
30
+ return actual_response
31
+ else:
32
+ print("error:",response.text)
33
+
34
+
35
+ interface=gr.Interface(
36
+ fn=generate_response,
37
+ inputs=gr.Textbox(lines=4,placeholder="Enter your Prompt"),
38
+ outputs="text"
39
+ )
40
+ interface.launch()