Update app.py
Browse files
app.py
CHANGED
@@ -44,7 +44,8 @@ PROMPT_DICT = {
|
|
44 |
"Instruction:\n{instruction}\n\nResponse:"
|
45 |
),
|
46 |
}
|
47 |
-
|
|
|
48 |
def generate_prompt(instruction, input=None):
|
49 |
if input:
|
50 |
return PROMPT_DICT["prompt_input"].format(instruction=instruction,input=input)
|
@@ -68,7 +69,6 @@ def generator(input_ids, generation_config, max_new_tokens):
|
|
68 |
return generation_output
|
69 |
|
70 |
def loadModel():
|
71 |
-
global model, tokenizer
|
72 |
if model is None:
|
73 |
from llama_rope_scaled_monkey_patch import replace_llama_rope_with_scaled_rope
|
74 |
replace_llama_rope_with_scaled_rope()
|
|
|
44 |
"Instruction:\n{instruction}\n\nResponse:"
|
45 |
),
|
46 |
}
|
47 |
+
model = None
|
48 |
+
tokenizer = None
|
49 |
def generate_prompt(instruction, input=None):
|
50 |
if input:
|
51 |
return PROMPT_DICT["prompt_input"].format(instruction=instruction,input=input)
|
|
|
69 |
return generation_output
|
70 |
|
71 |
def loadModel():
|
|
|
72 |
if model is None:
|
73 |
from llama_rope_scaled_monkey_patch import replace_llama_rope_with_scaled_rope
|
74 |
replace_llama_rope_with_scaled_rope()
|