Spaces:
Sleeping
Sleeping
Testing the LLM pipeline
Browse files
app.py
CHANGED
@@ -8,55 +8,55 @@ from tester import test
|
|
8 |
import transformers
|
9 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
def perform_training(jammer_type, channel_switching_cost):
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
def perform_testing(agent, jammer_type, channel_switching_cost):
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
if __name__ == "__main__":
|
62 |
-
|
|
|
8 |
import transformers
|
9 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
10 |
|
11 |
+
st.subheader("Generating Insights of the DRL-Training")
|
12 |
+
model_name = "tiiuae/falcon-7b-instruct"
|
13 |
+
model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
+
pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
|
16 |
+
temperature=0.7)
|
17 |
+
text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
|
18 |
+
st.write(text)
|
19 |
+
# def main():
|
20 |
+
#
|
21 |
+
# # st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
|
22 |
+
# #
|
23 |
+
# # st.sidebar.header("Make Your Environment Configuration")
|
24 |
+
# # mode = st.sidebar.radio("Choose Mode", ["Auto", "Manual"])
|
25 |
+
# #
|
26 |
+
# # if mode == "Auto":
|
27 |
+
# # jammer_type = "dynamic"
|
28 |
+
# # channel_switching_cost = 0.1
|
29 |
+
# # else:
|
30 |
+
# # jammer_type = st.sidebar.selectbox("Select Jammer Type", ["constant", "sweeping", "random", "dynamic"])
|
31 |
+
# # channel_switching_cost = st.sidebar.selectbox("Select Channel Switching Cost", [0, 0.05, 0.1, 0.15, 0.2])
|
32 |
+
# #
|
33 |
+
# # st.sidebar.subheader("Configuration:")
|
34 |
+
# # st.sidebar.write(f"Jammer Type: {jammer_type}")
|
35 |
+
# # st.sidebar.write(f"Channel Switching Cost: {channel_switching_cost}")
|
36 |
+
# #
|
37 |
+
# # start_button = st.sidebar.button('Start')
|
38 |
+
# #
|
39 |
+
# # if start_button:
|
40 |
+
# # agent = perform_training(jammer_type, channel_switching_cost)
|
41 |
+
# # st.subheader("Generating Insights of the DRL-Training")
|
42 |
+
# # model_name = "tiiuae/falcon-7b-instruct"
|
43 |
+
# # model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
44 |
+
# # tokenizer = AutoTokenizer.from_pretrained(model_name)
|
45 |
+
# # pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
|
46 |
+
# # temperature=0.7)
|
47 |
+
# # text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
|
48 |
+
# # st.write(text)
|
49 |
+
# # test(agent, jammer_type, channel_switching_cost)
|
50 |
+
#
|
51 |
+
#
|
52 |
+
# def perform_training(jammer_type, channel_switching_cost):
|
53 |
+
# agent = train(jammer_type, channel_switching_cost)
|
54 |
+
# return agent
|
55 |
+
#
|
56 |
+
#
|
57 |
+
# def perform_testing(agent, jammer_type, channel_switching_cost):
|
58 |
+
# test(agent, jammer_type, channel_switching_cost)
|
59 |
+
#
|
60 |
+
#
|
61 |
+
# if __name__ == "__main__":
|
62 |
+
# main()
|