Spaces:
Runtime error
Runtime error
EC2 Default User
commited on
Commit
Β·
00f2fc9
1
Parent(s):
64bec8f
added a note on SSL access to app.py
Browse files- __pycache__/InstructionTextGenerationPipeline.cpython-37.pyc +0 -0
- app.py +6 -61
- nohup.out +56 -0
__pycache__/InstructionTextGenerationPipeline.cpython-37.pyc
CHANGED
Binary files a/__pycache__/InstructionTextGenerationPipeline.cpython-37.pyc and b/__pycache__/InstructionTextGenerationPipeline.cpython-37.pyc differ
|
|
app.py
CHANGED
@@ -32,66 +32,6 @@ PROMPT_FOR_GENERATION_FORMAT = """{intro}
|
|
32 |
)
|
33 |
|
34 |
|
35 |
-
#class InstructionTextGenerationPipeline:
|
36 |
-
# def __init__(
|
37 |
-
# self,
|
38 |
-
# model_name,
|
39 |
-
# torch_dtype=torch.bfloat16,
|
40 |
-
# trust_remote_code=True,
|
41 |
-
# use_auth_token=None,
|
42 |
-
# ) -> None:
|
43 |
-
# self.model = AutoModelForCausalLM.from_pretrained(
|
44 |
-
# model_name,
|
45 |
-
# torch_dtype=torch_dtype,
|
46 |
-
# trust_remote_code=trust_remote_code,
|
47 |
-
# use_auth_token=use_auth_token,
|
48 |
-
# )
|
49 |
-
#
|
50 |
-
# tokenizer = AutoTokenizer.from_pretrained(
|
51 |
-
# model_name,
|
52 |
-
# trust_remote_code=trust_remote_code,
|
53 |
-
# use_auth_token=use_auth_token,
|
54 |
-
# )
|
55 |
-
# if tokenizer.pad_token_id is None:
|
56 |
-
# warnings.warn(
|
57 |
-
# "pad_token_id is not set for the tokenizer. Using eos_token_id as pad_token_id."
|
58 |
-
# )
|
59 |
-
# tokenizer.pad_token = tokenizer.eos_token
|
60 |
-
# tokenizer.padding_side = "left"
|
61 |
-
# self.tokenizer = tokenizer
|
62 |
-
#
|
63 |
-
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
64 |
-
# self.model.eval()
|
65 |
-
# self.model.to(device=device, dtype=torch_dtype)
|
66 |
-
#
|
67 |
-
# self.generate_kwargs = {
|
68 |
-
# "temperature": 0.5,
|
69 |
-
# "top_p": 0.92,
|
70 |
-
# "top_k": 0,
|
71 |
-
# "max_new_tokens": 512,
|
72 |
-
# "use_cache": True,
|
73 |
-
# "do_sample": True,
|
74 |
-
# "eos_token_id": self.tokenizer.eos_token_id,
|
75 |
-
# "pad_token_id": self.tokenizer.pad_token_id,
|
76 |
-
# "repetition_penalty": 1.1, # 1.0 means no penalty, > 1.0 means penalty, 1.2 from CTRL paper
|
77 |
-
# }
|
78 |
-
#
|
79 |
-
# def format_instruction(self, instruction):
|
80 |
-
# return PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
|
81 |
-
#
|
82 |
-
# def __call__(
|
83 |
-
# self, instruction: str, **generate_kwargs: Dict[str, Any]
|
84 |
-
# ) -> Tuple[str, str, float]:
|
85 |
-
# s = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
|
86 |
-
# input_ids = self.tokenizer(s, return_tensors="pt").input_ids
|
87 |
-
# input_ids = input_ids.to(self.model.device)
|
88 |
-
# gkw = {**self.generate_kwargs, **generate_kwargs}
|
89 |
-
# with torch.no_grad():
|
90 |
-
# output_ids = self.model.generate(input_ids, **gkw)
|
91 |
-
# # Slice the output_ids tensor to get only new tokens
|
92 |
-
# new_tokens = output_ids[0, len(input_ids[0]) :]
|
93 |
-
# output_text = self.tokenizer.decode(new_tokens, skip_special_tokens=True)
|
94 |
-
# return output_text
|
95 |
##
|
96 |
from InstructionTextGenerationPipeline import *
|
97 |
from timeit import default_timer as timer
|
@@ -200,7 +140,7 @@ demo = gr.Interface(
|
|
200 |
gr.Slider(256, 3072,value=1024, step=256, label="Tokens" ),
|
201 |
gr.Slider(0.0, 1.0, value=0.1, step=0.1, label='temperature:'),
|
202 |
gr.Slider(0, 1, value=0, step=1, label='top_k:'),
|
203 |
-
gr.Slider(0.0, 1.0, value=0.
|
204 |
|
205 |
],
|
206 |
outputs=["text"],
|
@@ -209,3 +149,8 @@ demo.launch(share=True,
|
|
209 |
server_name="0.0.0.0",
|
210 |
server_port=7860
|
211 |
)
|
|
|
|
|
|
|
|
|
|
|
|
32 |
)
|
33 |
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
##
|
36 |
from InstructionTextGenerationPipeline import *
|
37 |
from timeit import default_timer as timer
|
|
|
140 |
gr.Slider(256, 3072,value=1024, step=256, label="Tokens" ),
|
141 |
gr.Slider(0.0, 1.0, value=0.1, step=0.1, label='temperature:'),
|
142 |
gr.Slider(0, 1, value=0, step=1, label='top_k:'),
|
143 |
+
gr.Slider(0.0, 1.0, value=0.05, step=0.05, label='top_p:')
|
144 |
|
145 |
],
|
146 |
outputs=["text"],
|
|
|
149 |
server_name="0.0.0.0",
|
150 |
server_port=7860
|
151 |
)
|
152 |
+
# Note on how we can run on SSL
|
153 |
+
# See:https://github.com/gradio-app/gradio/issues/563
|
154 |
+
# a = gr.Interface(lambda x:x, "image", "image", examples=["lion.jpg"]).launch(
|
155 |
+
# share=False, ssl_keyfile="key.pem", ssl_certfile="cert.pem")
|
156 |
+
# seems like we need an appropriate NON SELF SIGNED cert that the customer will accept on their net
|
nohup.out
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
sys.path : ['/home/ec2-user/Gradio/MPT7BTest', '/usr/lib64/python37.zip', '/usr/lib64/python3.7', '/usr/lib64/python3.7/lib-dynload', '/home/ec2-user/.local/lib/python3.7/site-packages', '/usr/lib64/python3.7/site-packages', '/usr/lib/python3.7/site-packages', '/home/ec2-user/workspace/Notebooks/lib']
|
2 |
+
|
3 |
+
Cell imports done
|
4 |
+
|
5 |
+
Cell start generate
|
6 |
+
You are using config.init_device='cpu', but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.
|
7 |
+
Traceback (most recent call last):
|
8 |
+
File "app.py", line 68, in <module>
|
9 |
+
trust_remote_code=True,
|
10 |
+
File "/home/ec2-user/Gradio/MPT7BTest/InstructionTextGenerationPipeline.py", line 44, in __init__
|
11 |
+
use_auth_token=use_auth_token,
|
12 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/transformers/models/auto/auto_factory.py", line 463, in from_pretrained
|
13 |
+
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
|
14 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/transformers/modeling_utils.py", line 2611, in from_pretrained
|
15 |
+
model = cls(config, *model_args, **model_kwargs)
|
16 |
+
File "/home/ec2-user/.cache/huggingface/modules/transformers_modules/mosaicml/mpt-7b-instruct/ff5f3989b2585668930aeba501e83dfb306fc78d/modeling_mpt.py", line 222, in __init__
|
17 |
+
self.transformer = MPTModel(config)
|
18 |
+
File "/home/ec2-user/.cache/huggingface/modules/transformers_modules/mosaicml/mpt-7b-instruct/ff5f3989b2585668930aeba501e83dfb306fc78d/modeling_mpt.py", line 55, in __init__
|
19 |
+
self.apply(self.param_init_fn)
|
20 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 728, in apply
|
21 |
+
module.apply(fn)
|
22 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 728, in apply
|
23 |
+
module.apply(fn)
|
24 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 728, in apply
|
25 |
+
module.apply(fn)
|
26 |
+
[Previous line repeated 1 more time]
|
27 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 729, in apply
|
28 |
+
fn(self)
|
29 |
+
File "/home/ec2-user/.cache/huggingface/modules/transformers_modules/mosaicml/mpt-7b-instruct/ff5f3989b2585668930aeba501e83dfb306fc78d/modeling_mpt.py", line 208, in param_init_fn
|
30 |
+
MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
|
31 |
+
File "/home/ec2-user/.cache/huggingface/modules/transformers_modules/mosaicml/mpt-7b-instruct/ff5f3989b2585668930aeba501e83dfb306fc78d/param_init_fns.py", line 167, in kaiming_normal_param_init_fn_
|
32 |
+
generic_param_init_fn_(module=module, init_fn_=kaiming_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
33 |
+
File "/home/ec2-user/.cache/huggingface/modules/transformers_modules/mosaicml/mpt-7b-instruct/ff5f3989b2585668930aeba501e83dfb306fc78d/param_init_fns.py", line 51, in generic_param_init_fn_
|
34 |
+
init_fn_(module.weight)
|
35 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/torch/nn/init.py", line 451, in kaiming_normal_
|
36 |
+
return tensor.normal_(0, std)
|
37 |
+
KeyboardInterrupt
|
38 |
+
sys.path : ['/home/ec2-user/Gradio/MPT7BTest', '/usr/lib64/python37.zip', '/usr/lib64/python3.7', '/usr/lib64/python3.7/lib-dynload', '/home/ec2-user/.local/lib/python3.7/site-packages', '/usr/lib64/python3.7/site-packages', '/usr/lib/python3.7/site-packages', '/home/ec2-user/workspace/Notebooks/lib']
|
39 |
+
|
40 |
+
Cell imports done
|
41 |
+
|
42 |
+
Cell start generate
|
43 |
+
|
44 |
+
|
45 |
+
During handling of the above exception, another exception occurred:
|
46 |
+
|
47 |
+
Traceback (most recent call last):
|
48 |
+
File "app.py", line 68, in <module>
|
49 |
+
trust_remote_code=True,
|
50 |
+
File "/home/ec2-user/Gradio/MPT7BTest/InstructionTextGenerationPipeline.py", line 44, in __init__
|
51 |
+
use_auth_token=use_auth_token,
|
52 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/transformers/models/auto/auto_factory.py", line 463, in from_pretrained
|
53 |
+
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
|
54 |
+
File "/home/ec2-user/.local/lib/python3.7/site-packages/transformers/modeling_utils.py", line 2611, in from_pretrained
|
55 |
+
model = cls(config, *model_args, **model_kwargs)
|
56 |
+
KeyboardInterrupt
|