Aamod37 commited on
Commit
49af7a3
1 Parent(s): 6e40344

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -14
README.md CHANGED
@@ -60,22 +60,16 @@ Significantly, the **Ganga-1B** model outperforms existing open-source models th
60
  Use the code below to get started with the model.
61
 
62
  ```python
63
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
64
 
65
  tokenizer = AutoTokenizer.from_pretrained("LingoIITGN/ganga-1b")
66
- model = AutoModelForCausalLM.from_pretrained(
67
- "LingoIITGN/ganga-1b",
68
- device_map="auto"
69
- )
70
-
71
- pipe = pipeline(task="text-generation",
72
- model=model,
73
- tokenizer=tokenizer,
74
- max_new_tokens = 5,
75
- temperature = 0.70,
76
- )
77
- result = pipe("2007 टूर्नामेंट में क्रिकट विश्व कप के लिए टिकटों से सबसे ज्यादा आमदनी हुई ", pad_token_id=pipe.tokenizer.eos_token_id)
78
- print(result)
79
 
80
  ```
81
 
 
60
  Use the code below to get started with the model.
61
 
62
  ```python
63
+ from transformers import AutoModelForCausalLM, AutoTokenizer
64
 
65
  tokenizer = AutoTokenizer.from_pretrained("LingoIITGN/ganga-1b")
66
+ model = AutoModelForCausalLM.from_pretrained("LingoIITGN/ganga-1b", device_map="auto")
67
+
68
+ input_text = "BCCI ने टी-20 वर्ल्ड कप के बीच जिम्बाब्वे सीरीज "
69
+ input_ids = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
70
+
71
+ outputs = model.generate(input_ids, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95, temperature=0.7)
72
+ print(tokenizer.decode(outputs[0]))
 
 
 
 
 
 
73
 
74
  ```
75