Update README.md
Browse files
README.md
CHANGED
@@ -48,65 +48,26 @@ llm = Llama.from_pretrained(
|
|
48 |
```
|
49 |
For inference:
|
50 |
```
|
51 |
-
def
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
num_beams=1,
|
56 |
-
top_k = 50,
|
57 |
-
top_p =0.9,repetition_penalty=1.,eos_token_id=2,verbatim=False,
|
58 |
-
exponential_decay_length_penalty_fac=None,add_special_tokens =True,
|
59 |
-
):
|
60 |
-
inputs = tokenizer(text_input, add_special_tokens = add_special_tokens, return_tensors ='pt').to(device)
|
61 |
-
|
62 |
-
with torch.no_grad():
|
63 |
-
|
64 |
-
outputs = model.generate (input_ids = inputs["input_ids"],
|
65 |
-
attention_mask = inputs["attention_mask"] , # This is usually done automatically by the tokenizer
|
66 |
-
max_new_tokens=max_new_tokens,
|
67 |
-
temperature=temperature, #value used to modulate the next token probabilities.
|
68 |
-
num_beams=num_beams,
|
69 |
-
top_k = top_k,
|
70 |
-
top_p = top_p,
|
71 |
-
num_return_sequences = num_return_sequences,
|
72 |
-
eos_token_id=eos_token_id,
|
73 |
-
pad_token_id = eos_token_id,
|
74 |
-
do_sample =True,#skip_prompt=True,
|
75 |
-
repetition_penalty=repetition_penalty,
|
76 |
-
)
|
77 |
-
|
78 |
-
return tokenizer.batch_decode(outputs[:,inputs["input_ids"].shape[1]:].detach().cpu().numpy(), skip_special_tokens=True)
|
79 |
-
|
80 |
-
def generate_BioMixtral (system_prompt='You a helpful assistant. You are familiar with materials science, especially biological and bioinspired materials. ',
|
81 |
-
prompt='What is spider silk in the context of bioinspired materials?',
|
82 |
-
repetition_penalty=1.,
|
83 |
-
top_p=0.9, top_k=256,
|
84 |
-
temperature=0.5, max_tokens=512, verbatim=False, eos_token=None,
|
85 |
-
prepend_response='',
|
86 |
-
):
|
87 |
-
|
88 |
-
if eos_token==None:
|
89 |
-
eos_token= tokenizer.eos_token_id
|
90 |
|
91 |
if system_prompt==None:
|
92 |
-
messages=[
|
93 |
{"role": "user", "content": prompt},
|
94 |
]
|
95 |
else:
|
96 |
-
messages=[
|
97 |
-
|
98 |
-
|
99 |
]
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
top_p=top_p, top_k=top_k,
|
107 |
-
temperature=temperature,max_new_tokens=max_tokens, verbatim=verbatim,
|
108 |
-
)
|
109 |
-
return output_text[0]
|
110 |
|
111 |
start_time = time.time()
|
112 |
result=generate_BioMixtral(system_prompt='You respond accurately.',
|
|
|
48 |
```
|
49 |
For inference:
|
50 |
```
|
51 |
+
def generate_BioMixtral (system_prompt='You are an expert in biological materials, mechanics and related topics.', prompt="What is spider silk?",
|
52 |
+
temperature=0.0,
|
53 |
+
max_tokens=10000,
|
54 |
+
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
if system_prompt==None:
|
57 |
+
messages=[
|
58 |
{"role": "user", "content": prompt},
|
59 |
]
|
60 |
else:
|
61 |
+
messages=[
|
62 |
+
{"role": "system", "content": system_prompt},
|
63 |
+
{"role": "user", "content": prompt},
|
64 |
]
|
65 |
+
|
66 |
+
result=llm.create_chat_completion(
|
67 |
+
messages=messages,
|
68 |
+
temperature=temperature,
|
69 |
+
max_tokens=max_tokens,
|
70 |
+
)
|
|
|
|
|
|
|
|
|
71 |
|
72 |
start_time = time.time()
|
73 |
result=generate_BioMixtral(system_prompt='You respond accurately.',
|