File size: 11,723 Bytes
fbf2452
 
9cfea04
fbf2452
 
 
 
 
 
 
 
54f5fbc
fbf2452
 
 
 
 
 
 
 
 
 
54f5fbc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fbf2452
 
 
54f5fbc
 
fbf2452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54f5fbc
fbf2452
54f5fbc
fbf2452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54f5fbc
fbf2452
 
 
 
 
 
 
 
 
 
 
 
54f5fbc
 
 
fbf2452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54f5fbc
fbf2452
 
54f5fbc
fbf2452
 
 
 
 
54f5fbc
 
 
 
fbf2452
 
 
 
 
 
 
 
 
 
 
 
54f5fbc
fbf2452
 
 
 
 
 
0db5c80
 
54f5fbc
fbf2452
 
54f5fbc
 
 
 
 
 
 
fbf2452
 
 
 
 
 
54f5fbc
b701828
fbf2452
 
54f5fbc
fbf2452
 
 
 
54f5fbc
 
 
 
fbf2452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5053ea5
fbf2452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
import os
import requests
from contextlib import asynccontextmanager
from bs4 import BeautifulSoup
from fastapi import FastAPI, HTTPException
from neo4j import GraphDatabase, basic_auth
import google.generativeai as genai
import logging # Import logging module

# --- Logging Configuration ---
# Basic logger configuration to display INFO messages and above.
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__) # Create a logger instance for this module

# --- Environment Variable Configuration ---
NEO4J_URI = os.getenv("NEO4J_URI")
NEO4J_USER = os.getenv("NEO4J_USER")
NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD")

# Validation of essential configurations
if not NEO4J_URI or not NEO4J_USER or not NEO4J_PASSWORD:
    logger.critical("CRITICAL ERROR: NEO4J_URI, NEO4J_USER, and NEO4J_PASSWORD environment variables must be set.")

# --- Application Lifecycle (Startup/Shutdown) ---
@asynccontextmanager
async def lifespan(app: FastAPI):
    """Handles startup and shutdown events."""
    # Initialize Gemini Client
    logger.info("Initializing Gemini client...")
    if genai:
        try:
            # Assuming GEMINI_API_KEY is set in environment or loaded via settings
            api_key = os.getenv("GEMINI_API_KEY") or getattr(settings, "GEMINI_API_KEY", None)
            if not api_key:
                 raise ValueError("GEMINI_API_KEY not found in environment or settings.")
            genai.configure(api_key=api_key)
            logger.info("Gemini client configured successfully.")
        except Exception as e:
            logger.error(f"Failed to configure Gemini client: {e}", exc_info=True)
    else:
        logger.warning("Gemini library not imported. Endpoints requiring Gemini will not work.")

    yield # API runs here
    
    # --- Shutdown ---
    logger.info("API shutting down...")
    # Close Neo4j connection (handled by atexit in graph_client.py)
    # neo4j_client.close() # Usually not needed due to atexit registration
    logger.info("Neo4j client closed (likely via atexit).")
    logger.info("API shutdown complete.")

# Initialize FastAPI application
app = FastAPI(
    title="Neo4j Importer",
    description="API to fetch documents, summarize it with Gemini, and add it to Neo4j.",
    version="1.0.0"
)

# --- Utility Functions (Adapted from your script) ---

def get_content(number: str, node_type: str) -> str:
    """Fetches raw HTML content from Arxiv or other sources."""
    redirect_links = {
        "Patent": f"https://patents.google.com/patent/{number}/en",
        "ResearchPaper": f"https://arxiv.org/abs/{number}"
    }

    url = redirect_links.get(node_type)
    if not url:
        logger.warning(f"Unknown node type: {node_type} for number {number}")
        return ""
        
    try:
        response = requests.get(url)
        response.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX)
        return response.content.decode('utf-8', errors='replace').replace("\n", "")
    except requests.exceptions.RequestException as e:
        logger.error(f"Request error for {node_type} number: {number} at URL {url}: {e}")
        return ""
    except Exception as e:
        logger.error(f"An unexpected error occurred in get_content for {number}: {e}")
        return ""

def extract_research_paper_arxiv(rp_number: str, node_type: str) -> dict:
    """Extracts information from an Arxiv research paper and generates a summary."""
    raw_content = get_content(rp_number, node_type)
    
    rp_data = {
        "document": f"Arxiv {rp_number}", # ID for the paper
        "title": "Error fetching content or content not found",
        "abstract": "Error fetching content or content not found",
        "summary": "Summary not yet generated" # Default summary
    }

    if not raw_content:
        logger.warning(f"No content fetched for Arxiv ID: {rp_number}")
        return rp_data # Returns default error data

    try:
        soup = BeautifulSoup(raw_content, 'html.parser')

        # Extract Title
        title_tag = soup.find('h1', class_='title')
        if title_tag and title_tag.find('span', class_='descriptor'):
            title_text = title_tag.find('span', class_='descriptor').next_sibling
            if title_text and isinstance(title_text, str):
                 rp_data["title"] = title_text.strip()
            else: 
                rp_data["title"] = title_tag.get_text(separator=" ", strip=True).replace("Title:", "").strip()
        elif title_tag : # Fallback if the span descriptor is not there but h1.title exists
             rp_data["title"] = title_tag.get_text(separator=" ", strip=True).replace("Title:", "").strip()


        # Extract Abstract
        abstract_tag = soup.find('blockquote', class_='abstract')
        if abstract_tag:
            abstract_text = abstract_tag.get_text(strip=True)
            if abstract_text.lower().startswith('abstract'): # Check if "abstract" (case-insensitive) is at the beginning
                # Find the first occurrence of ':' after "abstract" or just remove "abstract" prefix
                prefix_end = abstract_text.lower().find('abstract') + len('abstract')
                if prefix_end < len(abstract_text) and abstract_text[prefix_end] == ':':
                    prefix_end += 1 # Include the colon in removal
                abstract_text = abstract_text[prefix_end:].strip()
            rp_data["abstract"] = abstract_text
        
        # Mark if title or abstract are still not found
        if rp_data["title"] == "Error fetching content or content not found" and not title_tag:
            rp_data["title"] = "Title not found on page"
        if rp_data["abstract"] == "Error fetching content or content not found" and not abstract_tag:
            rp_data["abstract"] = "Abstract not found on page"

        # Generate summary with Gemini API if available and abstract exists
        if rp_data["abstract"] and \
           not rp_data["abstract"].startswith("Error fetching content") and \
           not rp_data["abstract"].startswith("Abstract not found"):
            
            prompt = f"""You are a 3GPP standardization expert. Summarize the key information in the provided document in simple technical English relevant to identifying potential Key Issues.
            Focus on challenges, gaps, or novel aspects.
            Here is the document: <document>{rp_data['abstract']}<document>"""
            
            try:
                model_name = "gemini-2.5-flash-preview-05-20"
                model = genai.GenerativeModel(model_name)
        
                response = model.generate_content(prompt)
                rp_data["summary"] = response.text
                logger.info(f"Summary generated for Arxiv ID: {rp_number}")
            except Exception as e:
                logger.error(f"Error generating summary with Gemini for Arxiv ID {rp_number}: {e}")
                rp_data["summary"] = "Error generating summary (API failure)"
        else:
            rp_data["summary"] = "Summary not generated (Abstract unavailable or problematic)"

    except Exception as e:
        logger.error(f"Error parsing content for Arxiv ID {rp_number}: {e}")
    return rp_data

def add_nodes_to_neo4j(driver, data_list: list, node_type: str):
    """Adds a list of nodes to Neo4j in a single transaction."""
    if not data_list:
        logger.warning("No data provided to add_nodes_to_neo4j.")
        return 0

    query = (
        "UNWIND $data as properties "
        f"CREATE (n:{node_type}) "
        "SET n = properties"
    )

    # query = (
    #     f"UNWIND $data as properties "
    #     f"MERGE (n:{node_type} {{arxiv_id: properties.arxiv_id}}) " # Use MERGE for idempotency
    #     f"ON CREATE SET n = properties "
    #     f"ON MATCH SET n += properties" # Update properties if the node already exists
    # )

    try:
        with driver.session(database="neo4j") as session: # Specify database if not default
            result = session.execute_write(lambda tx: tx.run(query, data=data_list).consume())
            nodes_created = result.counters.nodes_created
            
            if nodes_created > 0:
                logger.info(f"{nodes_created} new {node_type} node(s) added successfully.")
                
            return nodes_created # Return the number of nodes actually created
    except Exception as e:
        logger.error(f"Neo4j Error - Failed to add/update {node_type} nodes: {e}")
        raise HTTPException(status_code=500, detail=f"Neo4j database error: {e}")


# --- FastAPI Endpoint ---
# API state check route
@app.get("/")
def read_root():
    return {"status": "ok"}

@app.post("/add_research_paper/{arxiv_id}", status_code=201) # 201 Created for successful creation
async def add_single_research_paper(arxiv_id: str):
    """
    Fetches a research paper from Arxiv by its ID, extracts information,
    generates a summary, and adds/updates it as a 'ResearchPaper' node in Neo4j.
    """
    node_type = "ResearchPaper"
    logger.info(f"Processing request for Arxiv ID: {arxiv_id}")

    if not NEO4J_URI or not NEO4J_USER or not NEO4J_PASSWORD:
        logger.error("Neo4j database connection details are not configured on the server.")
        raise HTTPException(status_code=500, detail="Neo4j database connection details are not configured on the server.")

    # Step 1: Extract paper data
    paper_data = extract_research_paper_arxiv(arxiv_id, node_type)

    if paper_data["title"].startswith("Error fetching content") or paper_data["title"] == "Title not found on page":
        logger.warning(f"Could not fetch or parse content for Arxiv ID {arxiv_id}. Title: {paper_data['title']}")
        raise HTTPException(status_code=404, detail=f"Could not fetch or parse content for Arxiv ID {arxiv_id}. Title: {paper_data['title']}")

    # Step 2: Add/Update in Neo4j
    driver_instance = None # Initialize for the finally block
    try:
        auth_token = basic_auth(NEO4J_USER, NEO4J_PASSWORD)
        driver_instance = GraphDatabase.driver(NEO4J_URI, auth=auth_token)
        driver_instance.verify_connectivity()
        logger.info("Successfully connected to Neo4j.")
        
        nodes_created_count = add_nodes_to_neo4j(driver_instance, [paper_data], node_type)
            
        if nodes_created_count > 0 :
            logger.info(f"Research paper {arxiv_id} was successfully added to Neo4j.")
            status_code_response = 201 # Created
        
        logger.info(message)
        # Note: FastAPI uses the status_code from the decorator or HTTPException.
        # This custom status_code_response is for the JSON body if needed, but the actual HTTP response status
        # will be 201 (from decorator) unless an HTTPException overrides it or we change the decorator based on logic.
        # For simplicity here, we'll return it in the body and let the decorator's 201 stand if no error.
        # A more advanced setup might change the response status dynamically.
        return {
            "message": message,
            "data": paper_data,
            "response_status_info": status_code_response
        }

    except HTTPException as e: # Re-raise HTTPExceptions
        logger.error(f"HTTPException during Neo4j operation for {arxiv_id}: {e.detail}")
        raise e
    except Exception as e:
        logger.error(f"An unexpected error occurred during Neo4j operation for {arxiv_id}: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"An unexpected server error occurred: {e}")
    finally:
        if driver_instance:
            driver_instance.close()
            logger.info("Neo4j connection closed.")