Update README.md
Browse files
README.md
CHANGED
@@ -35,7 +35,68 @@ Each record in the dataset represents a news article about technology companies
|
|
35 |
- embedding: An array of numerical values representing the vector embedding for the article, generated using the OpenAI EMBEDDING_MODEL.
|
36 |
|
37 |
|
38 |
-
## Data Ingestion
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
[Create a free MongoDB Atlas Account](https://www.mongodb.com/cloud/atlas/register?utm_campaign=devrel&utm_source=community&utm_medium=organic_social&utm_content=Hugging%20Face%20Dataset&utm_term=richmond.alake)
|
40 |
|
41 |
```python
|
|
|
35 |
- embedding: An array of numerical values representing the vector embedding for the article, generated using the OpenAI EMBEDDING_MODEL.
|
36 |
|
37 |
|
38 |
+
## Data Ingestion (Partioned)
|
39 |
+
|
40 |
+
```python
|
41 |
+
|
42 |
+
import os
|
43 |
+
import requests
|
44 |
+
import pandas as pd
|
45 |
+
from io import BytesIO
|
46 |
+
from pymongo import MongoClient
|
47 |
+
|
48 |
+
# MongoDB Atlas URI and client setup
|
49 |
+
uri = os.environ.get('MONGODB_ATLAS_URI')
|
50 |
+
|
51 |
+
client = MongoClient(uri)
|
52 |
+
|
53 |
+
# Change to the appropriate database and collection names for the tech news embeddings
|
54 |
+
db_name = 'your_database_name' # Change this to your actual database name
|
55 |
+
collection_name = 'tech_news_embeddings' # Change this to your actual collection name
|
56 |
+
tech_news_embeddings_collection = client[db_name][collection_name]
|
57 |
+
|
58 |
+
hf_token = os.environ.get('HF_TOKEN')
|
59 |
+
headers = {
|
60 |
+
"Authorization": f"Bearer {hf_token}"
|
61 |
+
}
|
62 |
+
|
63 |
+
# Downloads 228012 data points
|
64 |
+
parquet_files = [
|
65 |
+
"https://huggingface.co/api/datasets/AIatMongoDB/tech-news-embeddings/parquet/default/train/0000.parquet",
|
66 |
+
"https://huggingface.co/api/datasets/AIatMongoDB/tech-news-embeddings/parquet/default/train/0001.parquet",
|
67 |
+
"https://huggingface.co/api/datasets/AIatMongoDB/tech-news-embeddings/parquet/default/train/0002.parquet",
|
68 |
+
"https://huggingface.co/api/datasets/AIatMongoDB/tech-news-embeddings/parquet/default/train/0003.parquet",
|
69 |
+
"https://huggingface.co/api/datasets/AIatMongoDB/tech-news-embeddings/parquet/default/train/0004.parquet",
|
70 |
+
"https://huggingface.co/api/datasets/AIatMongoDB/tech-news-embeddings/parquet/default/train/0005.parquet",
|
71 |
+
]
|
72 |
+
|
73 |
+
all_dataframes = []
|
74 |
+
combined_df = None
|
75 |
+
|
76 |
+
for parquet_file_url in parquet_files:
|
77 |
+
response = requests.get(parquet_file_url, headers=headers)
|
78 |
+
if response.status_code == 200:
|
79 |
+
|
80 |
+
parquet_bytes = BytesIO(response.content)
|
81 |
+
df = pd.read_parquet(parquet_bytes)
|
82 |
+
all_dataframes.append(df)
|
83 |
+
else:
|
84 |
+
print(f"Failed to download Parquet file from {parquet_file_url}: {response.status_code}")
|
85 |
+
|
86 |
+
if all_dataframes:
|
87 |
+
combined_df = pd.concat(all_dataframes, ignore_index=True)
|
88 |
+
else:
|
89 |
+
print("No dataframes to concatenate.")
|
90 |
+
|
91 |
+
|
92 |
+
# Ingest to database
|
93 |
+
dataset_records = combined_df.to_dict('records')
|
94 |
+
tech_news_embeddings_collection.insert_many(dataset_records)
|
95 |
+
|
96 |
+
```
|
97 |
+
|
98 |
+
|
99 |
+
## Data Ingestion (All Records)
|
100 |
[Create a free MongoDB Atlas Account](https://www.mongodb.com/cloud/atlas/register?utm_campaign=devrel&utm_source=community&utm_medium=organic_social&utm_content=Hugging%20Face%20Dataset&utm_term=richmond.alake)
|
101 |
|
102 |
```python
|