Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,7 @@
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
import streamlit as st
|
4 |
-
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoConfig, AutoModel
|
5 |
-
import black
|
6 |
-
from pylint import lint
|
7 |
-
from io import StringIO
|
8 |
-
import sys
|
9 |
-
from huggingface_hub import notebook_login
|
10 |
-
|
11 |
-
notebook_login()
|
12 |
-
hf_token = "YOUR_HF_TOKEN"
|
13 |
|
14 |
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
|
15 |
PROJECT_ROOT = "projects"
|
@@ -67,9 +59,6 @@ I am confident that I can leverage my expertise to assist you in developing and
|
|
67 |
|
68 |
return summary, next_step
|
69 |
|
70 |
-
def load_hf_token():
|
71 |
-
return hf_token
|
72 |
-
|
73 |
def save_agent_to_file(agent):
|
74 |
"""Saves the agent's prompt to a file."""
|
75 |
if not os.path.exists(AGENT_DIRECTORY):
|
@@ -104,9 +93,7 @@ def chat_interface_with_agent(input_text, agent_name):
|
|
104 |
try:
|
105 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
106 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
107 |
-
generator = pipeline("text-generation", model=model, tokenizer=tokenizer
|
108 |
-
framework="pt",
|
109 |
-
model_kwargs={'load_in_8bit': True, 'torch_dtype': torch.float16})
|
110 |
except EnvironmentError as e:
|
111 |
return f"Error loading model: {e}"
|
112 |
|
@@ -117,7 +104,7 @@ def chat_interface_with_agent(input_text, agent_name):
|
|
117 |
if input_ids.shape[1] > max_input_length:
|
118 |
input_ids = input_ids[:, :max_input_length]
|
119 |
|
120 |
-
outputs =
|
121 |
input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True,
|
122 |
pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
|
123 |
)
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
import streamlit as st
|
4 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoConfig, AutoModel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
|
7 |
PROJECT_ROOT = "projects"
|
|
|
59 |
|
60 |
return summary, next_step
|
61 |
|
|
|
|
|
|
|
62 |
def save_agent_to_file(agent):
|
63 |
"""Saves the agent's prompt to a file."""
|
64 |
if not os.path.exists(AGENT_DIRECTORY):
|
|
|
93 |
try:
|
94 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
95 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
96 |
+
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
|
|
|
97 |
except EnvironmentError as e:
|
98 |
return f"Error loading model: {e}"
|
99 |
|
|
|
104 |
if input_ids.shape[1] > max_input_length:
|
105 |
input_ids = input_ids[:, :max_input_length]
|
106 |
|
107 |
+
outputs = model.generate(
|
108 |
input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True,
|
109 |
pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
|
110 |
)
|