Spaces:
Sleeping
Sleeping
Testing the LLM pipeline
Browse files- app.py +39 -25
- transformers_setup.py +0 -11
app.py
CHANGED
@@ -5,34 +5,48 @@ import streamlit as st
|
|
5 |
import os
|
6 |
from trainer import train
|
7 |
from tester import test
|
8 |
-
|
|
|
9 |
|
10 |
|
11 |
def main():
|
12 |
-
st.
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
|
38 |
def perform_training(jammer_type, channel_switching_cost):
|
|
|
5 |
import os
|
6 |
from trainer import train
|
7 |
from tester import test
|
8 |
+
import transformers
|
9 |
+
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
10 |
|
11 |
|
12 |
def main():
|
13 |
+
st.subheader("Generating Insights of the DRL-Training")
|
14 |
+
model_name = "tiiuae/falcon-7b-instruct"
|
15 |
+
model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
17 |
+
pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
|
18 |
+
temperature=0.7)
|
19 |
+
text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
|
20 |
+
st.write(text)
|
21 |
+
# st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
|
22 |
+
#
|
23 |
+
# st.sidebar.header("Make Your Environment Configuration")
|
24 |
+
# mode = st.sidebar.radio("Choose Mode", ["Auto", "Manual"])
|
25 |
+
#
|
26 |
+
# if mode == "Auto":
|
27 |
+
# jammer_type = "dynamic"
|
28 |
+
# channel_switching_cost = 0.1
|
29 |
+
# else:
|
30 |
+
# jammer_type = st.sidebar.selectbox("Select Jammer Type", ["constant", "sweeping", "random", "dynamic"])
|
31 |
+
# channel_switching_cost = st.sidebar.selectbox("Select Channel Switching Cost", [0, 0.05, 0.1, 0.15, 0.2])
|
32 |
+
#
|
33 |
+
# st.sidebar.subheader("Configuration:")
|
34 |
+
# st.sidebar.write(f"Jammer Type: {jammer_type}")
|
35 |
+
# st.sidebar.write(f"Channel Switching Cost: {channel_switching_cost}")
|
36 |
+
#
|
37 |
+
# start_button = st.sidebar.button('Start')
|
38 |
+
#
|
39 |
+
# if start_button:
|
40 |
+
# agent = perform_training(jammer_type, channel_switching_cost)
|
41 |
+
# st.subheader("Generating Insights of the DRL-Training")
|
42 |
+
# model_name = "tiiuae/falcon-7b-instruct"
|
43 |
+
# model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
44 |
+
# tokenizer = AutoTokenizer.from_pretrained(model_name)
|
45 |
+
# pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
|
46 |
+
# temperature=0.7)
|
47 |
+
# text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
|
48 |
+
# st.write(text)
|
49 |
+
# test(agent, jammer_type, channel_switching_cost)
|
50 |
|
51 |
|
52 |
def perform_training(jammer_type, channel_switching_cost):
|
transformers_setup.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
|
4 |
-
|
5 |
-
import transformers
|
6 |
-
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
7 |
-
|
8 |
-
model_name = "tiiuae/falcon-7b-instruct"
|
9 |
-
model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
10 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
-
pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100, temperature=0.7)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|