Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,17 +11,21 @@ from urllib.parse import urlparse
|
|
11 |
|
12 |
import pandas as pd
|
13 |
from selenium import webdriver
|
14 |
-
from selenium.webdriver.chrome.service import Service
|
|
|
15 |
from selenium.webdriver.chrome.options import Options
|
16 |
from selenium.webdriver.common.by import By
|
17 |
-
from selenium.webdriver.support.ui
|
|
|
18 |
from selenium.webdriver.support import expected_conditions as EC
|
19 |
from selenium.common.exceptions import (
|
20 |
-
TimeoutException,
|
|
|
21 |
NoSuchElementException,
|
22 |
StaleElementReferenceException,
|
23 |
)
|
24 |
-
from webdriver_manager.chrome import ChromeDriverManager
|
|
|
25 |
from transformers import AutoTokenizer, OpenLlamaForCausalLM, pipeline
|
26 |
import gradio as gr
|
27 |
import xml.etree.ElementTree as ET
|
@@ -31,6 +35,22 @@ from mysql.connector import errorcode, pooling
|
|
31 |
from dotenv import load_dotenv
|
32 |
from huggingface_hub import login
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
model_name = "openlm-research/open_llama_3b_v2" # Or another OpenLlama variant
|
35 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, legacy=False)
|
36 |
model = OpenLlamaForCausalLM.from_pretrained(model_name)
|
@@ -39,7 +59,7 @@ openllama_pipeline = pipeline(
|
|
39 |
"GenerationMixin",
|
40 |
model=model,
|
41 |
tokenizer=tokenizer,
|
42 |
-
truncation=True,
|
43 |
max_length=max_tokens, # Assuming max_tokens is your max_length variable
|
44 |
# ... other parameters
|
45 |
)
|
|
|
11 |
|
12 |
import pandas as pd
|
13 |
from selenium import webdriver
|
14 |
+
from selenium.webdriver.chrome.service import Service
|
15 |
+
|
16 |
from selenium.webdriver.chrome.options import Options
|
17 |
from selenium.webdriver.common.by import By
|
18 |
+
from selenium.webdriver.support.ui
|
19 |
+
import WebDriverWait
|
20 |
from selenium.webdriver.support import expected_conditions as EC
|
21 |
from selenium.common.exceptions import (
|
22 |
+
TimeoutException,
|
23 |
+
|
24 |
NoSuchElementException,
|
25 |
StaleElementReferenceException,
|
26 |
)
|
27 |
+
from webdriver_manager.chrome import ChromeDriverManager
|
28 |
+
|
29 |
from transformers import AutoTokenizer, OpenLlamaForCausalLM, pipeline
|
30 |
import gradio as gr
|
31 |
import xml.etree.ElementTree as ET
|
|
|
35 |
from dotenv import load_dotenv
|
36 |
from huggingface_hub import login
|
37 |
|
38 |
+
model_name = "openlm-research/open_llama_3b_v2"
|
39 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, legacy=False)
|
40 |
+
model = OpenLlamaForCausalLM.from_pretrained(model_name)
|
41 |
+
|
42 |
+
# Determine the maximum supported length for the model
|
43 |
+
max_supported_length = 8000 # You might need to adjust this
|
44 |
+
|
45 |
+
openllama_pipeline = pipeline(
|
46 |
+
"GenerationMixin",
|
47 |
+
model=model,
|
48 |
+
tokenizer=tokenizer,
|
49 |
+
truncation=True,
|
50 |
+
max_length=max_supported_length,
|
51 |
+
# ... other parameters
|
52 |
+
)
|
53 |
+
|
54 |
model_name = "openlm-research/open_llama_3b_v2" # Or another OpenLlama variant
|
55 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, legacy=False)
|
56 |
model = OpenLlamaForCausalLM.from_pretrained(model_name)
|
|
|
59 |
"GenerationMixin",
|
60 |
model=model,
|
61 |
tokenizer=tokenizer,
|
62 |
+
truncation=True,
|
63 |
max_length=max_tokens, # Assuming max_tokens is your max_length variable
|
64 |
# ... other parameters
|
65 |
)
|