Stevross commited on
Commit
d7c24b8
·
1 Parent(s): 9410bfe

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -8,7 +8,7 @@ tags:
8
  - large language model
9
  - PAIX.Cloud
10
  inference: true
11
- thumbnail: https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico
12
  ---
13
  # Model Card
14
  ## Summary
@@ -38,7 +38,7 @@ import torch
38
  from transformers import pipeline
39
 
40
  generate_text = pipeline(
41
- model="Stevross/Astrid-1B-1",
42
  torch_dtype="auto",
43
  trust_remote_code=True,
44
  use_fast=True,
@@ -76,13 +76,13 @@ from h2oai_pipeline import H2OTextGenerationPipeline
76
  from transformers import AutoModelForCausalLM, AutoTokenizer
77
 
78
  tokenizer = AutoTokenizer.from_pretrained(
79
- "Stevross/Astrid-1B-1",
80
  use_fast=True,
81
  padding_side="left",
82
  trust_remote_code=True,
83
  )
84
  model = AutoModelForCausalLM.from_pretrained(
85
- "Stevross/Astrid-1B-1",
86
  torch_dtype="auto",
87
  device_map={"": "cuda:0"},
88
  trust_remote_code=True,
@@ -108,7 +108,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
108
  ```python
109
  from transformers import AutoModelForCausalLM, AutoTokenizer
110
 
111
- model_name = "Stevross/Astrid-1B-1" # either local folder or huggingface model name
112
  # Important: The prompt needs to be in the same format the model was trained with.
113
  # You can find an example prompt in the experiment logs.
114
  prompt = "<|prompt|>How are you?<|endoftext|><|answer|>"
@@ -182,7 +182,7 @@ This model was trained using H2O LLM Studio and with the configuration in [cfg.y
182
  Model validation results using [EleutherAI lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness).
183
 
184
  ```bash
185
- CUDA_VISIBLE_DEVICES=0 python main.py --model hf-causal-experimental --model_args pretrained=Stevross/Astrid-1B-1 --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq --device cuda &> eval.log
186
  ```
187
 
188
 
 
8
  - large language model
9
  - PAIX.Cloud
10
  inference: true
11
+ thumbnail: https://static.wixstatic.com/media/bdee4e_8aa5cefc86024bc88f7e20e3e19d9ff3~mv2.png/v1/fill/w_192%2Ch_192%2Clg_1%2Cusm_0.66_1.00_0.01/bdee4e_8aa5cefc86024bc88f7e20e3e19d9ff3~mv2.png
12
  ---
13
  # Model Card
14
  ## Summary
 
38
  from transformers import pipeline
39
 
40
  generate_text = pipeline(
41
+ model="Stevross/Astrid-1B",
42
  torch_dtype="auto",
43
  trust_remote_code=True,
44
  use_fast=True,
 
76
  from transformers import AutoModelForCausalLM, AutoTokenizer
77
 
78
  tokenizer = AutoTokenizer.from_pretrained(
79
+ "Stevross/Astrid-1B",
80
  use_fast=True,
81
  padding_side="left",
82
  trust_remote_code=True,
83
  )
84
  model = AutoModelForCausalLM.from_pretrained(
85
+ "Stevross/Astrid-1B",
86
  torch_dtype="auto",
87
  device_map={"": "cuda:0"},
88
  trust_remote_code=True,
 
108
  ```python
109
  from transformers import AutoModelForCausalLM, AutoTokenizer
110
 
111
+ model_name = "Stevross/Astrid-1B" # either local folder or huggingface model name
112
  # Important: The prompt needs to be in the same format the model was trained with.
113
  # You can find an example prompt in the experiment logs.
114
  prompt = "<|prompt|>How are you?<|endoftext|><|answer|>"
 
182
  Model validation results using [EleutherAI lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness).
183
 
184
  ```bash
185
+ CUDA_VISIBLE_DEVICES=0 python main.py --model hf-causal-experimental --model_args pretrained=Stevross/Astrid-1B --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq --device cuda &> eval.log
186
  ```
187
 
188