curiouscurrent commited on
Commit
76bd158
·
verified ·
1 Parent(s): 4450af9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -34
app.py CHANGED
@@ -1,40 +1,35 @@
1
- import requests
2
- import json
3
  import gradio as gr
 
4
 
5
- url="https://huggingface.co/curiouscurrent/omnicode"
 
 
 
6
 
7
- headers={
8
-
9
- 'Content-Type':'application/json'
10
- }
11
-
12
- history=[]
13
 
14
  def generate_response(prompt):
15
  history.append(prompt)
16
- final_prompt="\n".join(history)
17
-
18
- data={
19
- "model":"curiouscurrent/omnicode",
20
- "prompt":final_prompt,
21
- "stream":False
22
- }
23
-
24
- response=requests.post(url,headers=headers,data=json.dumps(data))
25
-
26
- if response.status_code==200:
27
- response=response.text
28
- data=json.loads(response)
29
- actual_response=data['response']
30
- return actual_response
31
- else:
32
- print("error:",response.text)
33
-
34
-
35
- interface=gr.Interface(
36
- fn=generate_response,
37
- inputs=gr.Textbox(lines=4,placeholder="Enter your Prompt"),
38
- outputs="text"
39
- )
40
- interface.launch()
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Load your text generation model from Hugging Face using its identifier
5
+ model_identifier = "curiouscurrent/omnicode"
6
+ model = AutoModelForCausalLM.from_pretrained(model_identifier)
7
+ tokenizer = AutoTokenizer.from_pretrained(model_identifier)
8
 
9
+ history = []
 
 
 
 
 
10
 
11
  def generate_response(prompt):
12
  history.append(prompt)
13
+ final_prompt = "\n".join(history)
14
+
15
+ # Tokenize input prompt
16
+ input_ids = tokenizer.encode(final_prompt, return_tensors="pt", max_length=512, truncation=True)
17
+
18
+ # Generate response
19
+ output_ids = model.generate(input_ids, max_length=100, num_return_sequences=1)
20
+ response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
21
+
22
+ return response
23
+
24
+ # Create Gradio interface
25
+ input_prompt = gr.inputs.Textbox(lines=4, label="Input Prompt")
26
+ output_text = gr.outputs.Textbox(label="Response")
27
+
28
+ gr.Interface(
29
+ generate_response,
30
+ inputs=input_prompt,
31
+ outputs=output_text,
32
+ title="OmniCode",
33
+ description="Multi programming coding assistant",
34
+ theme="compact"
35
+ ).launch()