File size: 1,805 Bytes
ab02957
71a012f
 
 
 
ab02957
e1500b4
f3e7ebc
c53942e
01e87d5
c53942e
 
 
01e87d5
ebf1864
01e87d5
 
 
 
 
 
 
c53942e
f3e7ebc
 
 
01e87d5
e1500b4
c53942e
 
 
 
 
 
f3e7ebc
ca624af
 
71a012f
ab02957
71a012f
 
 
 
 
 
 
 
 
ab02957
71a012f
 
ca624af
c7598d9
e468ee0
9fc0643
ab02957
836f4b5
ab02957
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
import os
os.environ["KERAS_BACKEND"] = "tensorflow"                                                                           
import keras
import keras_nlp


css = """
html, body {
    margin: 0;
    padding: 0;
    height: 100%;
    overflow: hidden;
}

body::before {
    content: '';
    position: fixed;
    top: 0;
    left: 0;
    width: 100vw;
    height: 100vh;
    background-image: url('https://stsci-opo.org/STScI-01J5E849R5W27ZZ2C3QAE9ET75.png');
    background-size: cover;
    background-repeat: no-repeat;
    background-position: center;
    z-index: -1;    /* Keep the background behind text */
}
.gradio-container {
    display: flex;
    justify-content: center;
    align-items: center;
    height: 100vh;  /* Ensure the content is vertically centered */
}
"""


gemma_lm = keras_nlp.models.CausalLM.from_preset("hf://sultan-hassan/CosmoGemma_2b_en")

def launch(input):
    template = "Instruction:\n{instruction}\n\nResponse:\n{response}"
    prompt = template.format(
        instruction=input,                                                                   
        response="",
    )
    out = gemma_lm.generate(prompt, max_length=256)
    ind = out.index('Response') + len('Response')+2
    return out[ind:]

iface = gr.Interface(launch,
                     inputs="text",
                     outputs="text",
                     css=css,
                     title="Hey I am CosmoGemma 👋 I can answer cosmology questions from astroph.CO research articles. Try me :)",
                     description="Gemma_2b_en fine-tuned on QA pairs (~3.5k) generated from Cosmology and Nongalactic Astrophysics articles (arXiv astro-ph.CO) from 2018-2022 and tested on QA pairs (~1k) generated from 2023 articles, scoring over 75% accuracy.")

iface.launch()