kenyano commited on
Commit
2535bc8
·
verified ·
1 Parent(s): 5be0fb8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +51 -1
README.md CHANGED
@@ -235,4 +235,54 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
235
 
236
  ## Model Card Contact
237
 
238
- [More Information Needed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
  ## Model Card Contact
237
 
238
+ ```python
239
+
240
+ import string, time
241
+ from vllm import LLM, SamplingParams
242
+ import torch
243
+
244
+ model_name = "kenyano/ELAINE_medLLM"
245
+
246
+ vllm_paralell = 1
247
+
248
+ questions_ja = [
249
+ "尿酸値の値はどこまでが正常値ですか?",
250
+ ]
251
+
252
+ questions_en = [
253
+ "What is the normal level of uric acid levels?" ,
254
+ ]
255
+
256
+ questions_zh = [
257
+ "尿酸的正常水平是多少?",
258
+ ]
259
+
260
+
261
+ llm = LLM(model=model_name,
262
+ trust_remote_code=True,
263
+ tensor_parallel_size=vllm_paralell,
264
+ dtype="half",
265
+ max_model_len=8192)
266
+
267
+ sampling_params = SamplingParams(temperature=0.2, top_p=0.8, max_tokens=200, min_tokens=50)
268
+
269
+ def generate(questions):
270
+
271
+ prompts = [f"Human: \n{question}\n\nAssistant: \n" for question in questions]
272
+ outputs = llm.generate(prompts,sampling_params)
273
+
274
+ for i, output in enumerate(outputs):
275
+ prompt = output.prompt
276
+ generated_text = output.outputs[0].text
277
+
278
+ print("-"*5, "prompt", "-"*5)
279
+ print(f'{prompt}')
280
+ print("-"*5, "generaated", "-"*5)
281
+ print(f'{generated_text}\n')
282
+
283
+
284
+ generate(questions_ja)
285
+ generate(questions_en)
286
+ generate(questions_zh)
287
+
288
+ ```python