Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -361,17 +361,17 @@ from transformers import GPTNeoForCausalLM, GPT2Tokenizer
|
|
361 |
def generate_project_proposal(prompt): # Generate the proposal
|
362 |
default_proposal = "Hyper-local Sustainability Projects would lead to Longevity of the self and Prosperity of the community. Therefore UNSDGs coupled with Longevity initiatives should be focused upon."
|
363 |
|
364 |
-
model_Name = "EleutherAI/gpt-neo-2.7B"
|
365 |
-
tempareCHUR = uniform(0.3,0.6)
|
366 |
|
367 |
-
|
368 |
-
|
369 |
|
370 |
consoleMessage_and_Print(f"Trying to access {model_Name} model. The Prompt is: \n{prompt}")
|
371 |
|
372 |
model = GPTNeoForCausalLM.from_pretrained(model_Name)
|
373 |
tokenizer = GPT2Tokenizer.from_pretrained(model_Name)
|
374 |
-
model_max_token_limit =
|
375 |
|
376 |
try:
|
377 |
# input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
@@ -770,7 +770,7 @@ def process_excel(file):
|
|
770 |
|
771 |
|
772 |
example_files = []
|
773 |
-
example_files.append('#TaxDirection (Responses)_BasicExample.xlsx')
|
774 |
example_files.append('#TaxDirection (Responses)_IntermediateExample.xlsx')
|
775 |
# example_files.append('#TaxDirection (Responses)_UltimateExample.xlsx')
|
776 |
|
|
|
361 |
def generate_project_proposal(prompt): # Generate the proposal
|
362 |
default_proposal = "Hyper-local Sustainability Projects would lead to Longevity of the self and Prosperity of the community. Therefore UNSDGs coupled with Longevity initiatives should be focused upon."
|
363 |
|
364 |
+
# model_Name = "EleutherAI/gpt-neo-2.7B"
|
365 |
+
# tempareCHUR = uniform(0.3,0.6)
|
366 |
|
367 |
+
model_Name = "EleutherAI/gpt-neo-1.3B"
|
368 |
+
tempareCHUR = uniform(0.5,0.8)
|
369 |
|
370 |
consoleMessage_and_Print(f"Trying to access {model_Name} model. The Prompt is: \n{prompt}")
|
371 |
|
372 |
model = GPTNeoForCausalLM.from_pretrained(model_Name)
|
373 |
tokenizer = GPT2Tokenizer.from_pretrained(model_Name)
|
374 |
+
model_max_token_limit = 2000 #2048 #1500
|
375 |
|
376 |
try:
|
377 |
# input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
|
|
770 |
|
771 |
|
772 |
example_files = []
|
773 |
+
# example_files.append('#TaxDirection (Responses)_BasicExample.xlsx')
|
774 |
example_files.append('#TaxDirection (Responses)_IntermediateExample.xlsx')
|
775 |
# example_files.append('#TaxDirection (Responses)_UltimateExample.xlsx')
|
776 |
|