Saboorhsn commited on
Commit
c7cc986
·
verified ·
1 Parent(s): 5ab3939

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -65
app.py CHANGED
@@ -1,65 +1,27 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
- from datasets import load_dataset
3
- from transformers import TrainingArguments, Trainer
4
-
5
- # Load LLAMA3 8B model
6
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
7
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
8
-
9
- # Load datasets
10
- python_codes_dataset = load_dataset('flytech/python-codes-25k', split='train')
11
- streamlit_issues_dataset = load_dataset("andfanilo/streamlit-issues")
12
- streamlit_docs_dataset = load_dataset("sai-lohith/streamlit_docs")
13
-
14
- # Combine datasets
15
- combined_dataset = python_codes_dataset['text'] + streamlit_issues_dataset['text'] + streamlit_docs_dataset['text']
16
-
17
- # Define training arguments
18
- training_args = TrainingArguments(
19
- per_device_train_batch_size=2,
20
- num_train_epochs=3,
21
- logging_dir='./logs',
22
- output_dir='./output',
23
- overwrite_output_dir=True,
24
- report_to="none" # Disable logging to avoid cluttering output
25
- )
26
-
27
- # Define training function
28
- def tokenize_function(examples):
29
- return tokenizer(examples["text"])
30
-
31
- def group_texts(examples):
32
- # Concatenate all texts.
33
- concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
34
- total_length = len(concatenated_examples[list(examples.keys())[0]])
35
- # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can customize this part to your needs.
36
- total_length = (total_length // tokenizer.max_len) * tokenizer.max_len
37
- # Split by chunks of max_len.
38
- result = {
39
- k: [t[i : i + tokenizer.max_len] for i in range(0, total_length, tokenizer.max_len)]
40
- for k, t in concatenated_examples.items()
41
- }
42
- return result
43
-
44
- # Tokenize dataset
45
- tokenized_datasets = combined_dataset.map(tokenize_function, batched=True, num_proc=4)
46
-
47
- # Group texts into chunks of max_len
48
- tokenized_datasets = tokenized_datasets.map(
49
- group_texts,
50
- batched=True,
51
- num_proc=4,
52
- )
53
-
54
- # Train the model
55
- trainer = Trainer(
56
- model=model,
57
- args=training_args,
58
- train_dataset=tokenized_datasets,
59
- tokenizer=tokenizer,
60
- )
61
-
62
- trainer.train()
63
-
64
- # Save the trained model
65
- trainer.save_model("PyStreamlitGPT")
 
1
+ import streamlit as st
2
+ from datasets import load_dataset, concatenate_datasets
3
+
4
+ def load_and_combine_datasets():
5
+ # Load the datasets
6
+ python_codes_dataset = load_dataset('flytech/python-codes-25k', split='train')
7
+ streamlit_issues_dataset = load_dataset("andfanilo/streamlit-issues")
8
+ streamlit_docs_dataset = load_dataset("sai-lohith/streamlit_docs")
9
+
10
+ # Combine the datasets
11
+ combined_dataset = concatenate_datasets([python_codes_dataset, streamlit_issues_dataset, streamlit_docs_dataset])
12
+ return combined_dataset
13
+
14
+ def main():
15
+ st.title("Combined Dataset Viewer")
16
+
17
+ # Load and combine datasets
18
+ combined_dataset = load_and_combine_datasets()
19
+
20
+ # Display random sample from the combined dataset
21
+ random_sample = combined_dataset.shuffle(seed=42).select(range(10))
22
+
23
+ st.header("Random Sample from Combined Dataset")
24
+ st.write(random_sample)
25
+
26
+ if __name__ == "__main__":
27
+ main()