image_url
stringlengths
113
131
tags
sequence
discussion
list
title
stringlengths
8
254
created_at
stringlengths
24
24
fancy_title
stringlengths
8
396
views
int64
73
422k
null
[ "queries", "node-js" ]
[ { "code": "import { MongoClient, ServerApiVersion } from \"mongodb\";\n// Variables de entorno\nconst DB_USER = import.meta.env.VITE_DB_USER;\n\nconst MONGO_HOST = `mongodb+srv://${DB_USER}:${DB_PASSWORD}@${DB_HOST}?retryWrites=true&w=majority`;\n\nexport const mongoClient = new MongoClient(MONGO_HOST, {\n useNewUrlParser: true,\n useUnifiedTopology: true,\n serverApi: ServerApiVersion.v1,\n});\n\nconst clientPromise = mongoClient.connect();\n\nconst connectDB = async () => {\n try {\n const database = (await clientPromise).db(DB_NAME);\n console.log(\"[db] Conectada con éxito\", database);\n const collection = database.collection(MONGODB_COLLECTION);\n const results = await collection.find({}).toArray();\n return {\n statusCode: 200,\n body: JSON.stringify(results),\n };\n } catch (err) {\n console.error(\"[db] Error\", MONGO_HOST, err);\n return { statusCode: 500, body: err.toString() };\n }\n};\n\nexport default connectDB;\napp.get(\"/api/allnames\", async (req, res) => {\n const docs = await connectDB();\n console.log(\"Call docs\", docs);\n res.json(docs);\n});\npnpm startfetch(\"http://localhost:5173/api/allnames/\")pnpm buildfetch(\"https://nexasoft.netlify.app/api/allnames/\")[ERROR] Could not resolve \"/opt/build/repo/adaptors/express/vite.config.ts\"netlify devnetlify devpnpm startTypeError: Failed to parse URL from /.netlify/functions/get_contacts. \nTypeError [ERR_INVALID_URL]: Invalid URL\n", "text": "Hi, really I need your help with this.\nI am building a web app with Qwik framework, Express, Mongodb, and Netlify (also implementing netlify functions). See my site here: https://nexasoft.netlify.app/. Then I am trying to read data from Mongo Atlas so I created a function to connect this DataBase.Then from express I use a route to implement this function and get data for my component with a fetch request. But I have not achieve to read my database info.When I run pnpm start with my local config fetch(\"http://localhost:5173/api/allnames/\"), I get that it returns as response a 404 code with no documents. Then If I run pnpm build with my production config fetch(\"https://nexasoft.netlify.app/api/allnames/\"), my app build correctly in my computer, then I push it to github where the Netlify deploy fails with this:[ERROR] Could not resolve \"/opt/build/repo/adaptors/express/vite.config.ts\"I also run the command, netlify dev, witch opens the content from my site but still I get the 404 message trying to access my data from Mongo.So I decided to install, Netlyfy functions to serverless. I created a new function get_contacts witch made my MongoDB conection and call (there I import my .env variables like, process.env…). Again running with this configuration, netlify dev or even pnpm start, the explorer shows me, Could not proxy request. Then the terminal closed with this:I have not any idea witch could be my mistake, here my github repository: GitHub - Maikpwwq/nexasoft: Sitio Web de NexaSoft, el futuro en soluciones de software.", "username": "Michael_Arias_Fajardo" }, { "code": "fetch(\"https://nexasoft.netlify.app/.netlify/functions/get_contactsimport mongoose from \"mongoose\"; // ,\nimport * as dotenv from \"dotenv\";\ndotenv.config();\n\n// Variables de entorno\nconst DB_USER = `${process.env.VITE_DB_USER}`;\nconst DB_PASSWORD = `${process.env.VITE_DB_PASSWORD}`;\nconst DB_HOST = `${process.env.VITE_DB_HOST}`;\nconst DB_NAME = `${process.env.VITE_DB_NAME}`;\nconst MONGODB_COLLECTION = `${process.env.VITE_MONGODB_COLLECTION}`;\n\nconst MONGO_HOST = `mongodb+srv://${DB_USER}:${DB_PASSWORD}@${DB_HOST}/?retryWrites=true&w=majority`;\n\nconst schema = new mongoose.Schema({ name: \"string\", email: \"string\" });\n\nconsole.log(\"MONGOOSE_HOST\", MONGO_HOST);\nconst clientPromise = mongoose.createConnection(MONGO_HOST, {\n dbName: DB_NAME,\n useNewUrlParser: true,\n useUnifiedTopology: true,\n});\n\nconst Contactos = clientPromise.model(MONGODB_COLLECTION, schema);\n\nconst handler = async () => {\n console.log(\"hi mongoose\");\n try {\n await Contactos.find().exec().then((res) => {\n console.log(\"res\", res);\n }); // .toArray(); .exec() .clone()\n // You don't need callbacks in Mongoose, because Mongoose supports promises and async/await.\n // , function (err, docs) { if (err) console.log(\"Error getting the data\", err);\n // docs.forEach\n const results = [];\n console.log(\"mongoClient\", results);\n return {\n statusCode: 200,\n body: JSON.stringify(results),\n };\n } catch (err) {\n console.error(\"[db] Error\", MONGO_HOST, err);\n return { statusCode: 500, body: err.toString() };\n }\n};\n\nexport { clientPromise, handler };\n", "text": "My fetch called was falling because I don’t used full URL: (host + function), ie. fetch(\"https://nexasoft.netlify.app/.netlify/functions/get_contacts. I made this change, then my consult with Mongo was taking a little over 10 seconds to respond to database query. And Netlify serverless function has this restriction in time. Then I tried to optimize my Mongo queries installing mongoose, I update my serverless function, but in the explorer I get this just a empty array (res). Maybe someone have idea of how to debug this.This is my function get_mongoose:", "username": "Michael_Arias_Fajardo" }, { "code": "npm run devnpm run build.serverpnpm run build[commonjs] Cannot bundle Node.js built-in \"stream\" imported from \"node_modules/mongodb/lib/cursor/abstract_cursor.js\". Consider disabling ssr.noExternal or remove the built-in dependency.`\nssr: { \n noExternal: ['mongodb'],\n }\n", "text": "Hi,\nI’m working with Qwik and Mongodb as well and everything is working as expected locally with npm run dev but when I run npm run build.server or pnpm run build I get this error: [commonjs] Cannot bundle Node.js built-in \"stream\" imported from \"node_modules/mongodb/lib/cursor/abstract_cursor.js\". Consider disabling ssr.noExternal or remove the built-in dependency. I tried addingto vite.config.ts but it didn’t solve it.\nDid you have this issue?", "username": "Pavel_Kerbel" } ]
Cannot connect Mongodb Atlas data, with qwik framework and express server
2023-01-27T22:23:08.054Z
Cannot connect Mongodb Atlas data, with qwik framework and express server
1,385
null
[ "app-services-user-auth" ]
[ { "code": "", "text": "Is there any way I can provide the dual password for the same single user in MongoDB community version?", "username": "Shaktisinh_Jhala" }, { "code": "pwdcreateUser", "text": "As far as i recall, the manual doesn’t mention such feature. pwd in createUser is one string only, not an array.", "username": "Kobe_W" } ]
Support for dual Password for single user
2023-05-19T12:36:19.486Z
Support for dual Password for single user
722
null
[]
[ { "code": "", "text": "I’m starting a new app services + realm project and have a couple questions. When I purchase a dedicated cluster and go into production, do the Functions / Endpoints I create in my app services incur additional billing/compute charges? If not, does that mean then that all of the compute overhead of running Functions/Endpoints for my middleware run on the same server as my Mongo instance?Since I’m new maybe these questions are covered somewhere but also, when I deploy Mongo on Atlas dedicated instance, does that mean there are 3 instances of Mongo running (the default redundant configuration of Mongo) and therefore Mongo actually runs on 3 separate EC2 instances? And, if so, do the functions I create run on one of those instances?If app services Functions run on say Lambda then I would expect to see a pricing page for Functions. But, I don’t see one.So, this is really a question about cost and also about scaling. Depending on where my Functions run / get deployed, how would I make plans on how to scale them? What if my Functions get extremely high use but my data tier / the actual Mongo activity isn’t very high relatively speaking. If I need to scale out my Functions but keep my Mongo server running on modest server, how would I do this?If the Functions actually run on the Mongo servers themselves (which I guess they do bc I don’t see additional pricing info for Functions), I would need to purchase more Mongo instances even if all I really need is more compute capacity for my Functions layer?Thanks, basically I’m considering Functions for nearly all of my middleware needs. I’m an experienced developer and am comfortable creating Lambda functions + API Gateway, etc. But, after learning as much as I can about Realm (which is absolutely perfect for my application) and the App Services product + node Functions, it seems as though I could just deploy nearly all of my middleware logic into App Services + Functions.Any input greatly appreciated, thanks", "username": "d33p" }, { "code": "", "text": "Not sure how I missed this when I was reading Realm/Atlas docs but I found the pricing link here in another forum post: https://www.mongodb.com/docs/atlas/app-services/billing/So, this makes more sense. Functions/Triggers/Sync’ing/etc etc are all billed separately and therefore, scaled separately from my Atlas Mongo Cluster.", "username": "d33p" } ]
App services billing and scaling
2023-05-20T21:41:56.794Z
App services billing and scaling
289
https://www.mongodb.com/…8_2_1024x576.png
[ "aggregation", "lebanon-mug", "conference" ]
[ { "code": "", "text": "\nThe MongoDB Weekender1920×1080 173 KB\nBecome a MongoDB master! Join the #Lebanon MongoDB User Group and unlock your potential with four new MongoDB topics every month.Kicking off in May 2023, we’ll begin our first month with the following exciting topics: 2023-05-05T21:00:00Z→2023-05-26T21:00:00Z 5:00 (pm) —> 7:00(pm) (CEST)Seats are limited, book your FREE one now\n bit.ly/the-mongodb-weekender", "username": "eliehannouch" }, { "code": "", "text": "Our first session of the MongoDB Weekender series in #Lebanon MongoDB user group, talking about data Modeling and schema design patterns is done and published on our YouTube channel.Today Session Link: MongoDB The Weekender 2023 - Data Modeling - YouTubeIf you are interested to attend the coming sessions, secure your FREE spot by filling the below form: The MongoDB Weekender Registration Form", "username": "eliehannouch" }, { "code": "", "text": "This is great @eliehannouch. Thanks for leading this up!", "username": "Veronica_Cooley-Perry" }, { "code": "", "text": "Our second session of the MongoDB Weekender series in #Lebanon MongoDB user group, talking about indexes, their usage, different types (Single, Compound, TTL, Partial, Multi-key, Text…) and how to implement them in real scenario’s is done and published in our YouTube channel.Session Link: MongoDB The Weekender 2023 - Indexing - YouTubeIf you are interested to attend the coming sessions, secure your FREE spot by filling the below form: The MongoDB Weekender Registration Form", "username": "eliehannouch" } ]
Lebanon MUG: The MongoDB Weekender Series
2023-04-28T15:01:07.224Z
Lebanon MUG: The MongoDB Weekender Series
1,545
null
[ "atlas-cluster" ]
[ { "code": "", "text": "I’m getting this error when I’m using render or railway. So far I’ve tested both the connection string and network connectivity which both work. mongodb+srv://Username:[email protected]/?retryWrites=true&w=majority", "username": "Sara_Rutherfurd" }, { "code": "", "text": "This error indicates the driver fails to find a server based on your preference (e.g. read preference).What read preference you use? by default it should be primary, can you check if you have a primary runningi never use atlas. is your deployment a replica set or sharded cluster?check this. https://www.mongodb.com/docs/manual/core/read-preference-mechanics/", "username": "Kobe_W" }, { "code": "", "text": "It’s a replica set with two secondary and one primary cluster.", "username": "Sara_Rutherfurd" } ]
Error connecting to database: MongoServerSelectionError: connection <monitor> to 52.88.89.97:27017 closed
2023-05-19T20:18:41.153Z
Error connecting to database: MongoServerSelectionError: connection &lt;monitor&gt; to 52.88.89.97:27017 closed
782
null
[ "replication", "compass", "mongodb-shell", "containers" ]
[ { "code": "docker-compose.ymlversion: '3.9'\n\nservices:\n mongo1:\n image: mongo:6\n container_name: mongo1\n command: --replSet my-replica-set\n ports:\n - 27017:27017\n networks:\n - mongo-network\n\n mongo2:\n image: mongo:6\n container_name: mongo2\n command: --replSet my-replica-set\n ports:\n - 27018:27017\n networks:\n - mongo-network\n\n mongo3:\n image: mongo:6\n container_name: mongo3\n command: --replSet my-replica-set\n ports:\n - 27019:27017\n networks:\n - mongo-network\n\nnetworks:\n mongo-network:\n driver: bridge\ndocker-compose up -ddocker exec -it mongo1 mongosh --eval \"rs.initiate({ _id: \"myReplicaSet\", members: [ {_id: 0, host: \"mongo1\"}, {_id: 1, host: \"mongo2\"}, {_id: 2, host: \"mongo3\"} ] })\"\ndocker exec -it mongo1 mongosh --eval \"rs.status()\"\ndocker exec -it mongo1 mongosh\ndb.myCollection.insertOne({name: \"John Doe\", age: 30})\nexitmongodb://127.0.0.1:27017,127.0.0.1:27018,127.0.0.1:27019/?replicaSet=my-replica-set\nmongodb://127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+1.8.2\ndocker exec -it mongo2 mongosh --eval \"rs.status()\"\nmongodb://127.0.0.1:27017,127.0.0.1:27018,127.0.0.1:27019/?replicaSet=my-replica-set\n", "text": "I am using Windows 10. I followed this process:andThis creates on “test” database a collection named “myCollection” and adds some data. Finally I exit the app by typing exit.Does not work:WorksThe issue: The main idea was to create a replication cluster, where I can stop the main container from running, so that the new primary source would be elected from the remaining two sources. If I do that and run:I indeed get the answer, that mongo1 container has stopped working and mongo2 is now the primary, while mongo3 is the secondary. HOWEVER, MongoDB Compass stops working, because I used a direct connection to access the database and it does not understand, that a new database has been elected as the primary.According to all the articles, I should be using this URI:But it just does not work. It always throws this error:getaddrinfo ENOTFOUND mongo1Please help.", "username": "Pie_Leos" }, { "code": "mongodb://127.0.0.1:27017,127.0.0.1:27018,127.0.0.1:27019/?replicaSet=my-replica-set\n", "text": "Really this comes down to the docker networking namespace and being able to resolve the hostnames. It also relates to the replicaset discovery when a client connects, the hostnames and ports returned by db.hello() is what the client will try and connect to.According to all the articles, I should be using this URI:This would work if it wasn’t for docker. If you had multiple mongod on your local environment for example.This is how I have have run a replset in docker and be able to connect from a local application.", "username": "chris" } ]
Getting error "getaddrinfo ENOTFOUND mongo1" when trying to reach replication cluster on Docker through MongoDB Compass on Windows 10
2023-05-19T14:20:14.494Z
Getting error &ldquo;getaddrinfo ENOTFOUND mongo1&rdquo; when trying to reach replication cluster on Docker through MongoDB Compass on Windows 10
2,064
null
[ "aggregation" ]
[ { "code": "userIds[\"1\", \"2\"][{user1Obj}, {user2Obj}][\n {\n \"_id\": \"8b7baf2cebaa41c9aff9ed9f97f661e9\",\n \"messages\": [],\n \"userIds\": [\n \"64656d299920c3e937f4d6cc\",\n \"64656be5ba1496a420fdce38\"\n ],\n \"chatInitiator\": \"64656be5ba1496a420fdce38\",\n \"createdAt\": \"2023-05-19T03:00:25.447Z\",\n \"updatedAt\": \"2023-05-19T03:00:25.447Z\",\n \"__v\": 0,\n }\n ]\n[\n {\n \"_id\": \"8b7baf2cebaa41c9aff9ed9f97f661e9\",\n \"messages\": [],\n \"chatInitiator\": \"64656be5ba1496a420fdce38\",\n \"createdAt\": \"2023-05-19T03:00:25.447Z\",\n \"updatedAt\": \"2023-05-19T03:00:25.447Z\",\n \"__v\": 0,\n \"users\": [\n user1Obj,\n user2Obj\n ]\n }\n]\nModel.aggregate([\n { $match: { userIds: { $in: [userId] } } },\n { $sort: { createdAt: -1 } },\n {\n $lookup: {\n from: \"users\",\n localField: \"userIds[]\",\n foreignField: \"_id\",\n as: \"users\"\n }\n },\n {\n $addFields: {\n users: {\n $map: {\n input: \"$users\",\n as: \"user\",\n in: \"$$user\"\n }\n }\n }\n },\n {\n $unset: \"userIds\"\n },\n // pagination\n { $skip: options.page * options.limit },\n { $limit: options.limit },\n { $sort: { createdAt: 1 } },\n ]);`\n", "text": "Hi everyone. I’m new here and also new to MongoDBI have an array of user ids userIds = [\"1\", \"2\"]I want to be able to get the user’s object from the user’s collection using the above array, and save the user objects in a new array called ‘users’. That is users = [{user1Obj}, {user2Obj}]I want it transformed toBelow is what I have done but it’s not giving me the expected resultAny help?", "username": "Pro_Help" }, { "code": " localField: \"userIds[]\",", "text": "1 - Try with `localField: “userIds” rather than: localField: \"userIds[]\",2 - your $addFields is probably useless since you $map each $$user to itself3 - your 2nd $sort is useless", "username": "steevej" } ]
Facing difficulty with aggregation
2023-05-19T08:14:41.222Z
Facing difficulty with aggregation
337
null
[ "kafka-connector" ]
[ { "code": "name=mongo-source\nconnector.class=com.mongodb.kafka.connect.MongoSourceConnector\ntasks.max=1\n# Connection and source configuration\nconnection.uri=<Connection.uri>\ndatabase=<database.name>\ncollection=<coll.name>\ntopic.prefix=Topic1\n", "text": "Hi Team,\nIm trying to connect MongoDB with AWS Managed kafka using the source connector (downloaded from Confluent Hub), but while creating AWS connector im getting the error message “The connector configuration is invalid, Message: Request failed.”configurations file in connector:", "username": "Nabeel_Raza" }, { "code": "", "text": "There is a blog post that discusses some of the considerations including network configuration that might be the issue.Code, content, tutorials, programs and community to enable developers of all skill levels on the MongoDB Data Platform. Join or follow us here to learn more!", "username": "Robert_Walters" } ]
MongoDB Kafka source connector
2023-05-18T19:20:00.858Z
MongoDB Kafka source connector
866
null
[ "swift", "app-services-user-auth", "realm-web" ]
[ { "code": "", "text": "Hello,I am trying to implement Sign in with Apple for my IOS app. I am using the sign in to authenticate the user into realm. The thing I do not understand is that I am following these steps from the mongodb docs. In Step 2 on creating a services ID for the app in dev portal of apple. We must activate the Sign in with Apple service within the service identifier. In order to do this we have to provide domains and return URLs? I am confused by this - isn’t this only for web apps? Do I have to setup a whole website that provides a return url and everything.Not trying to give attitude or anything. I genuinely don’t understand what I am supposed to do here. If there are any other tutorials that people have for this or advice please let me know. I do not have a website or anything to provide a return url thus cannot activate the apple sign in on the Atlas realm side.", "username": "Peter_Raad" }, { "code": "", "text": "+1, genuinely confused about which domains/subdomains/return URLs to set?", "username": "Turbulent_Stability" }, { "code": "", "text": "+1, The documentation is not clear at all.", "username": "Timothy_Tati" } ]
Sign In With Apple Setup on Realm for iOS
2022-11-01T04:54:52.848Z
Sign In With Apple Setup on Realm for iOS
2,707
null
[ "python", "crud" ]
[ { "code": " event_set_upsert = pymongo.UpdateOne({\n 'events_id': event.get(\"_id\")\n },{\n \"$setOnInsert\": { \"created_at\": datetime.now() },\n '$set': event_set\n }, upsert=True)\n event_golden_upsert_list.append(event_set_upsert)\n try:\n events_golden_collection.bulk_write(event_golden_upsert_list, ordered=False)\n except BulkWriteError as bwe:\n print(\"ERROR: Bulk Write Error\")\n print(bwe)\n", "text": "Hello,I’m working with a match-string to make sure, that I avoid duplicates. I assumed, it would be a good idea, to make that unique in the DB, so I created a unique index on my match string field.Now, for data loads, I sometimes would like to do updates via BulkWrites (pymongo).After adding all the events that I’d like to update to the list, I’m trying for a bulk write:Now, it seems as if none of my updates have been executed, if there is one bulk write error.\nHow can I make sure, that the updates are still executed for the matching IDs - just avoid inserting new documents with the same match-string?", "username": "Chris_Haus" }, { "code": "eventevent_set", "text": "Hi @Chris_Haus, can you please share the traceback you are getting, and also a representative event and event_set?", "username": "Steve_Silvester" }, { "code": "----- Loop: 47 Event: 63dfd2e5b162cd1e47e16338\nUPSERT: 63dfd2e5b162cd1e47e16338 - 18059_2023-02-18_00-00\n\n----- Loop: 48 Event: 63dfd2e5b162cd1e47e16339\nUPSERT: 63dfd2e5b162cd1e47e16339 - 18057_2023-02-23_00-00\n\n----- Loop: 49 Event: 63dfd2e5b162cd1e47e1633a\nUPSERT: 63dfd2e5b162cd1e47e1633a - 18059_2023-02-24_00-00\n\n----- Loop: 50 Event: 63dfd2e5b162cd1e47e1633b\nUPSERT: 63dfd2e5b162cd1e47e1633b - 18057_2023-02-24_00-00\n\n\nERROR: Bulk Write Error\n\nE11000 duplicate key error collection: events_golden index: match_zip_street_date_time_index dup key: { match_zip_street_date_time: \"18057_2023-02-24_00-00\" }\n\nE11000 duplicate key error collection: events_golden index: match_zip_street_date_time_index dup key: { match_zip_street_date_time: \"18057_2023-02-24_00-00\" }\n\nE11000 duplicate key error collection: events_golden index: match_zip_street_date_time_index dup key: { match_zip_street_date_time: \"18057_2023-02-24_00-00\" }\n\nE11000 duplicate key error collection: events_golden index: match_zip_street_date_time_index dup key: { match_zip_street_date_time: \"18057_2023-02-24_00-00\" }\n\n", "text": "Just some excerpts from the data structure and print.\nI’m looping through the events and check, whether to put them into the golden records collection.\nSo for every event I check:When I print the errors, I iterate through all of them. There I expected an individual error per upsert element in the list.\nBut it seems as if one upsert failed, and the same error is being printed for all upsert elements in the list.[UPDATE]When I restructure the code to use individual inserts and updates, it’s working fine.\nJust the bulk seems to be not updating, when there is a duplicate index / value for any of the records to upsert.", "username": "Chris_Haus" }, { "code": "", "text": "I think the difference when you’re doing the individual inserts and updates is that the datetime string ends up being different for each entry, avoiding the duplicates, because there is a delay for each write.", "username": "Steve_Silvester" }, { "code": "", "text": "By default bulk_write operations are ordered and stop after the first error. To continue after an error you can use an unordered bulk_write by passing the ordered=False argument: Bulk Write Operations — PyMongo 4.3.3 documentation", "username": "Shane" }, { "code": "", "text": "OP’s code is already using ordered=False", "username": "Kobe_W" } ]
E11000 duplicate key error collection - with BulkWrite correct?
2023-05-18T13:59:27.114Z
E11000 duplicate key error collection - with BulkWrite correct?
1,095
null
[ "node-js" ]
[ { "code": "MongoError: MongoClient must be connected before calling MongoClient.prototype.db\n at MongoClient.db (/home/pgewvedi/yp/node_modules/mongodb/lib/mongo_client.js:314:11)\n at Object.exports.model (/home/pgewvedi/yp/views/Content/Model/index.js:167:38)\n at Function.$.getModel (/home/pgewvedi/yp/app.js:121:101)\n at Object.index (/home/pgewvedi/yp/views/Home/Controller/index.js:3:11)\n at Function.$.setController (/home/pgewvedi/yp/app.js:110:73)\n at /home/pgewvedi/yp/views/Home/Config.js:3:11\n at Layer.handle [as handle_request] (/home/pgewvedi/yp/node_modules/express/lib/router/layer.js:95:5)\n at next (/home/pgewvedi/yp/node_modules/express/lib/router/route.js:131:13)\n at Route.dispatch (/home/pgewvedi/yp/node_modules/express/lib/router/route.js:112:3)\n at Layer.handle [as handle_request] (/home/pgewvedi/yp/node_modules/express/lib/router/layer.js:95:5)\n", "text": "I have a web resourcewhen localhost then goodbut when online show a DB errorcan you provide a simple web resource connected to DB success?", "username": "Lam_Cao_I_nh" }, { "code": "import { MongoClient } from 'mongodb';\n\nconst uri = 'uri'; // Replace 'uri' with your actual MongoDB connection string\nconst client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true });\n\nclient.connect(err => {\n if (err) {\n console.error('Error connecting to MongoDB:', err);\n return;\n }\n\n console.log('Connected to MongoDB server...');\n\n const db = client.db('testdb'); // Replace 'testdb' with your database name\n const collection = db.collection('coll'); // Replace 'coll' with your collection name\n\n collection.find({}).toArray((err, result) => {\n if (err) {\n console.error('Error executing find query:', err);\n return;\n }\n\n console.log('Find query executed...');\n console.log(result);\n\n client.close(); // Close the MongoDB connection\n });\n});\n", "text": "Hello @Lam_Cao_I_nh,Could you please try the following code to establish a connection to your MongoDB database?The provided code is an example of connecting to a MongoDB database and performing a basic query. Please make sure to adapt the code by replacing the placeholders with your actual MongoDB connection string, database name, and collection name.If the issue persists, it would be helpful to provide additional context by sharing the following details:Best regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "thank you ^^ ", "username": "Lam_Cao_I_nh" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoError: MongoClient must be connected before calling
2023-05-19T04:23:35.649Z
MongoError: MongoClient must be connected before calling
470
null
[ "flutter" ]
[ { "code": "class RealmService with ChangeNotifier {\n final String appId = \"xxxx\";\n late final appConfig = AppConfiguration(appId);\n late final _app = App(appConfig);\n\n late final user = _app.currentUser;\n final updatedTimestamp = DateTime.now().millisecondsSinceEpoch;\n var logger = Logger();\n late String _token = '';\n final storage = FlutterSecureStorage();\n\n late userModel.User user2;\n\n static final RealmService _singleton = RealmService._internal();\n\n\n factory RealmService() {\n return _singleton;\n }\n\n RealmService._internal();\n\n User? get currentUser {\n return _app.currentUser;\n }\n @override\n Widget build(BuildContext context) {\n final realmService = Provider.of<RealmService>(context, listen: false);\n customUserData = realmService.currentUser!.customData;\n logger.d(realmService.currentUser!.profile); //<-works\n logger.d(realmService.currentUser!.customData); // <-shows null\n", "text": "Currently i am trying to get the customdata of my loggedin userThe login works fine. But when i try to fetch the customData it for somereason shows null. Meanwhile the profile of the current user shows just fine.Any idea why this is the case. I already had the “Enable Custom User Data” enabled in Atlas.", "username": "Salman_lartey" }, { "code": "user.refreshCustomData()", "text": "Hi @Salman_lartey!\nThe customData are embedded in user access token. You have to be sure that your table has data populated for the user before logging with this user to the app. It is important that the field set to “user ID field” has the user id inserted as data.\nI’m sending one of our samples, which is based on customData security roles. You can find custom data configuration file here.\nHere is an example how the roles, which are set as custom data, are populated for the purpose of this sample.\nFeel free to write back in case you still receive null.\nBy the way, we provide a method user.refreshCustomData() that could also be used in case your custom data items have been added after user login.", "username": "Desislava_St_Stefanova" }, { "code": "class RealmService with ChangeNotifier {\n final String appId = \"xxx\";\n late final appConfig = AppConfiguration(appId);\n late final _app = App(appConfig);\n\n late final user = _app.currentUser;\n final updatedTimestamp = DateTime.now().millisecondsSinceEpoch;\n var logger = Logger();\n late String _token = '';\n final storage = FlutterSecureStorage();\n\n late userModel.User user2;\n\n static final RealmService _singleton = RealmService._internal();\n\nfactory RealmService() {\n return _singleton;\n }\n\n RealmService._internal();\n\n User? get currentUser {\n return _app.currentUser;\n }\n\n Future<userModel.User> logInWithEmail(String email) async {\n\n await _app.emailPasswordAuthProvider\n .registerUser(email, 'myStr0ngPassw0rd');\n print('User registration successful!');\n\n final user =\n await _app.logIn(Credentials.emailPassword(email, 'myStr0ngPassw0rd'));\n print('User login successful!');\n\n logger.d(user.customData);\n final AuthLink authLink = AuthLink(getToken: () async {\n final currentUser = _app.currentUser;\n final accessToken = await currentUser?.accessToken;\n return 'Bearer $accessToken'; // Assuming your server expects the token in the \"Bearer\" format\n });\n\n // user.customData;\n logger.i(currentUser!.customData);\n", "text": "Hi,I still get that it shows null. My login method looks like:but even in the method i get that customData = Null.", "username": "Salman_lartey" }, { "code": "Role", "text": "Could you please check whether the id of the user is the same as the “user ID field” in the collection that is set for custom data? In this example custom data collection is Role and “user ID field” is owner_id .\n\nimage1373×997 68.4 KB\n\n\nimage1803×870 73.3 KB\n", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "\nScreenshot 2023-05-17 1106511337×291 14.8 KB\nUsers is my cusom data collection.\nimage1202×622 34.9 KB\n\nimage1591×618 12.3 KB\n", "username": "Salman_lartey" }, { "code": "", "text": "The _id should be equal to user.id.", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "I have changed it to the following:\n\nimage786×707 44.5 KB\n\nand:\n\nimage1557×807 26.9 KB\n\nbut the issue still persists strange enough", "username": "Salman_lartey" }, { "code": "", "text": "I would advice you to add additional field for user_id that is from type String.", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "Even when adding “user_id” to the field and adding it to the database it doesnt work. Do you have a screenshot on how it looks like in the Atlas when you add the userfield?\n\nimage747×327 10.7 KB\n\nimage497×698 31.7 KB\nand\n\nimage1552×841 26.9 KB\n", "username": "Salman_lartey" }, { "code": "", "text": "Now it looks correct to me. Could you please also check if “user_id” is set as queryable field? See the last image.\n\nimage954×870 30.5 KB\n\n\nimage957×969 54.2 KB\n", "username": "Desislava_St_Stefanova" }, { "code": "{\n \"roles\": [\n {\n \"name\": \"readOwnWriteOwn\",\n \"apply_when\": {},\n \"document_filters\": {\n \"write\": {\n \"user_id\": \"%%user.id\"\n },\n \"read\": {\n \"user_id\": \"%%user.id\"\n }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n }\n ]\n}\n{\n \"roles\": [\n {\n \"name\": \"readOwnWriteOwn\",\n \"apply_when\": {},\n \"document_filters\": {\n \"write\": true,\n \"read\": true\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n }\n ]\n}\n", "text": "Also make sure that the users have permissions to read these data.\nimage1050×838 44.4 KB\nYou can use either filter by user or permissions for all.or for all users:", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "It is not. I have:\n\nimage1367×617 31.4 KB\n\nand:\n\nimage1145×720 34.3 KB\n\n\nimage1596×717 35.9 KB\n", "username": "Salman_lartey" }, { "code": "", "text": "The database name in Device Sync tab looks wrong. It is “todo”.\nAnd what is the permission Rule, can you switch to View mode, so that we can check the json content?", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "Okay i have changed the Device Sync to:\n\nimage867×612 24.8 KB\nAnd the json looks like:\n\nimage966×410 20 KB\n", "username": "Salman_lartey" }, { "code": "owner_iduser_idwrite:false and read:true", "text": "You can remove owner_id queryable field. It was from my example. For you it is user_id. By the way you need this queryable field in case you have set permissions rules based on this field. If your rule is write:false and read:true the queryable field shouldn’t be required.\nIs it working now?", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "No, it isn’t working. Is there a reference where i can see an implementation of the custom User data?", "username": "Salman_lartey" }, { "code": "", "text": "Yes, here is that sample that I sent earlier users_permissions.\nHere is the place where the custom data is used in isAdmin extension property .", "username": "Desislava_St_Stefanova" }, { "code": " @override\n void initState() {\n super.initState();\n\n final realmService = Provider.of<RealmService>(context, listen: false);\n final user = realmService.currentUser;\n socketClient.emit('setUser', user!.profile.email);\n\n try {\n setState(() {\n final customUserData = realmService.currentUser?.customData;\n logger.d(customUserData);\n });\n } catch (e) {}\n", "text": "Oke so I manage to find something. When i login with google for the first time, it is null. When i then try to logout and login with the same Google account i get it.but how and why is that possible? Isn’t it the case that you get the customData once you login?", "username": "Salman_lartey" }, { "code": "Credentials.emailPassword", "text": "We will try to reproduce this.\nYou mentioned google, but I saw in the code above you are using Credentials.emailPassword, right?", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "That is correct. First i started with the email/password. But as i was troubleshooting, i thought let me try google. and voilla. I notice that the first login attempt is null. But after the 2nd login, it will give me the customData. With email/password i havent tried it .", "username": "Salman_lartey" } ]
Flutter Realm: User.Customdata is always null
2023-05-16T23:40:47.707Z
Flutter Realm: User.Customdata is always null
1,505
null
[]
[ { "code": "class Book: Object, ObjectKeyIdentifiable {\n @Persisted(primaryKey: true) var _id: UUID\n @Persisted var name: String\n @Persisted var pages = List<Page>()\n \n @Persisted var ownerId = \"\"\n}\n\nclass Page: Object, ObjectKeyIdentifiable {\n @Persisted(primaryKey: true) var _id: UUID\n @Persisted var label: String\n @Persisted var paragraphs = List<Paragraph>()\n}\n\nclass Paragraph: Object, ObjectKeyIdentifiable {\n @Persisted(primaryKey: true) var _id: UUID\n @Persisted var text: String\n}\n", "text": "I’m using realm flexible sync and I don’t know how to solve the following problem:My app consists of the following domain architecture:The models would look like this:When opening the realm, I subscribe to each of those collections and have defined a role at the atlas backend so every user is allowed to read and write documents where the ownerId is identical to there userId.No I’m wondering how to deal with the related collections. I think it’s not necessary to define the ownerId there as well, because they’re not accessed on their own and just accessible via their parent, at least in the UI. But I think that wouldn’t work considering the defined backend role because there’s no ownerId present.Can someone help me out?", "username": "Andreas_Teich" }, { "code": "bookpagesparagraphs", "text": "Hello @Andreas_Teich ,Welcome to the MongoDB Community You may not need to have separate collections for book, pages, and paragraphs. They could be embedded objects in your book collection.When you subscribe to the queryable fields and perform the query, the embedded objects will be downloaded. Please follow the embedded object section in the documentation.I hope provided information is helpful.Cheers,\nHenna", "username": "henna.s" }, { "code": "", "text": "Many thanks. I already considered that, but from my point of view, pages could get massive and I don‘t want to produce a massive array anti pattern with that. So I would like to solve that problem using relationships Would you have any suggestions here as well?", "username": "Andreas_Teich" }, { "code": "", "text": "Hi @Andreas_Teich ,If I understood you correctly, you have subscribed to separate collections already. You may need to have some id to connect all collections.\nThe defined backend role with ownderid is an example to show how the rules can be defined. You are free to change them as per your requirements.You can define roles and permissions to allow documents you need. You can read more on roles in the documentation.I hope provided information is helpful.Cheers,\nhenna ", "username": "henna.s" } ]
How to sync relationships in realm flexible sync
2023-05-18T10:22:01.949Z
How to sync relationships in realm flexible sync
613
null
[]
[ { "code": "", "text": "Hello,I’m facing a problem with the connection limit on my M0 during a PoC using Event Ordering disabled.\nUnfortunately, I couldn’t see how to or even if it is possible to limit the number of connections used by an Atlas Trigger when processing a large number of requests with the Event Ordering disabled.In a test with 20,000 requests, the connections reached the maximum of 500 connections in 4 minutes resulting in some data loss, where only 18,973 requests were processed.", "username": "Caio_Paula" }, { "code": "", "text": "Hi, we highly recommend not using the shared tier clusters when doing any kind of performance or load testing. There are various limitations placed on both the cluster and app services for M0/M2/M5 clusters.Can you let me know if this is still an issue once you upgrade to an M10/M30?", "username": "Tyler_Kaye" } ]
Limit connections used by Realm Triggers
2023-05-19T05:57:29.029Z
Limit connections used by Realm Triggers
309
null
[]
[ { "code": "", "text": "sir,\n“I am in a big trouble. Can you please help me to solve this? When I try to connect MongoDB to my local server, it’s not connecting but the code is still running. So, I decided to change the IP address from “127.0.0.1:27017” to “localhost”. However, this time it shows an error “ECONNREFUSED”. What should I do? I have watched more than 30 videos on YouTube and tried everything, but still have not found a solution. I have also checked after turning off the firewall and tried another network connection. Please help.”", "username": "Sayandh_Ms" }, { "code": "", "text": "Not able to connect but code is running means?\nHow did you try to connect? By mongo/mongosh?\nIs your mongod up?", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Thank you for your reply. I am showing my source code here. However, when I run this code, my terminal doesn’t display any output or response.\n\nScreenshot (3)1920×1080 189 KB\n", "username": "Sayandh_Ms" }, { "code": "mongodTest-NetConnection 127.0.0.1 -Port 27017", "text": "", "username": "Doug_Duncan" }, { "code": "", "text": "I’ve tried all the solutions to connect to MongoDB Community Server on my system, but it isn’t working at allI tried even using 127.0.0.1:27017\n\nimage912×250 6.8 KB\nCompass and MongoShell are also able to connect,\n\nimage750×587 14.1 KB\n\nimage1630×158 9.4 KB\nHow should I solve this ?\n\nimage998×403 14.5 KB\n", "username": "Arvind_Iyer" }, { "code": "", "text": "(::1) in your error message indicates it is trying to connect with Ipv6 address Switching to 127.0.0.1 should work\nTry to remove ipv6 entry from your config and use ipv4 only\nor it could be your node.js version\nDid you try upgrade/downgrade version?", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Last night when I tried all these solutions it didn’t work\nBut it miraculously worked today. ", "username": "Arvind_Iyer" }, { "code": "", "text": "\ndb1366×768 91.7 KB\n\nplz help everythings is ok username and password", "username": "Subhan_Hassan" }, { "code": "", "text": "Is your cluster up?\nCan you connect to your cluster with shell?", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Can you show your MongoDB config file?", "username": "Aneesh_Augustine" }, { "code": "", "text": "same scene happened to me, literally!! ", "username": "david_dubz" } ]
ECONNREFUSED - 127.0.0.1 to localhost
2023-04-27T17:41:13.386Z
ECONNREFUSED - 127.0.0.1 to localhost
739
null
[ "aggregation" ]
[ { "code": "[\n {\n $facet: {\n delivered: [\n {\n $match: {\n status: \"DELIVERED\"\n }\n },\n {\n $count: \"count\"\n }\n ],\n total: [{ $count: \"count\" }]\n }\n },\n {\n $unwind: \"$delivered\"\n },\n {\n $unwind: \"$total\"\n },\n {\n $set: {\n deliveredPercentage: {\n $divide: [\"$delivered.count\", \"$total.count\"]\n }\n }\n }\n]\n", "text": "We have a collection of ‘messages’ documents. I’d like to use Charts to present my customers with a visualisation of the number of messages that were delivered. I’ve used answers in this forum to put together the aggregation pipeline shown below, which gives me the percentage delivered for ALL customers. I would like to be able to use an injected function to filter the documents by the existing customerid field, so that each customer can see just their own delivered percentage. How can I do that?", "username": "Phil_Warner" }, { "code": "[\n {\n $group: {\n _id: \"$customerId\",\n total: { $sum: 1 },\n delivered: { $sum: { $cond: [ { $eq: [\"$status\", \"DELIVERED\"]}, 1, 0 ] } }\n }\n },\n {\n $set: {\n deliveredPercentage: {\n $divide: [\"$delivered\", \"$total\"]\n }\n }\n }\n]\n", "text": "Hi @Phil_Warner, thanks for using Charts!\nThe injected filter is applied after the query bar pipeline, which makes this a little tricky but hopefully not unsolvable. Rather than use a $facet, you could change the query to something like this to count the totals per customer ID:This will give you a row per customer ID (see my example below although I’m using different fields). From here you can use an injected filter so that each viewer only sees their own customerid document.\n\nimage1128×863 51.3 KB\n", "username": "tomhollander" }, { "code": "", "text": "Right on the button Tom! Thank you very much Charts was a big factor in the decision to move the Atlas. Thank you.", "username": "Phil_Warner" } ]
Filtering the output of a faceted aggregation pipeline
2023-05-18T14:37:47.240Z
Filtering the output of a faceted aggregation pipeline
679
https://www.mongodb.com/…a292ae2fc413.png
[ "node-js", "containers" ]
[ { "code": "const MongoClient = require(\"mongodb\").MongoClient;\nconst url = \"mongodb://localhost:27017/\";\n//const url = \"mongodb://127.0.0.1:27017\";\n\nMongoClient.connect(url, { useUnifiedTopology: true }, function (err, client) {\n if (err) {\n console.log(\"err\");\n } else {\n console.log(\"Database Connected\");\n }\n});\n", "text": "Hi,\nI am a beginner in mongodb, trying to establish a connection to mongodb from nodejs. There is no error and it just hangs in the terminal window in VS code.\n\nscreeshot854×114 2.66 KB\nI set up the environment variable Path to C:\\Program Files\\MongoDB\\Server\\6.0\\bin\nand ran ‘npm install mongodb’.\n“mongodb://localhost:27017/” is running in my local docker desktop.\n“mongodb://localhost:27017/” succcessfully connects from Mongodb for VS code tool but not connecting from VS code nodejs. Below is my code and any help would be greatly appreciated!!Thanks\nChitra", "username": "Chitra_Krishnan1" }, { "code": "", "text": "Did you try with the line 127.0.0.1 which is commented in your code?", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Hi,\nThanks for your response. Yes I did try 127.0.0.1 but not connecting. I am seeing below errors in the mongod log files.“msg”:“Failed to check socket connectivity”,“attr”:{“error”:{“code”:6,“codeName”:“HostUnreachable”,“errmsg”:“Connection reset by peer”}}}\n“msg”:“Interrupted operation as its client disconnected”,Thanks\nChitra", "username": "Chitra_Krishnan1" } ]
MongoClient.connect not working from nodejs
2023-05-18T16:03:56.943Z
MongoClient.connect not working from nodejs
1,079
https://www.mongodb.com/…8a233858a0b8.png
[ "node-js", "connecting" ]
[ { "code": "", "text": "When I try to connect mongodb with node I get this error.In picture\nIn many tutorial I have seen the solution of the problem can be found by whitelisting the ip address. but trust me I did but nothing happened.\n\nmongohelp2889×322 44.2 KB\n", "username": "Riyad_Sm" }, { "code": "mongosh0.0.0.0/0", "text": "Hello @Riyad_Sm and welcome to the MongoDB community forums. Can you use mongosh from that machine to connect to your Atlas cluster? Have you temporarily added 0.0.0.0/0 to the allow list to see if your Node app could connect? Are you behind a firewall or proxy that is not allowing your connection to go out to the MongoDB cluster?", "username": "Doug_Duncan" }, { "code": "", "text": "I am a beginner . I never used Mongoose.As I said I am a beginner I just did what ever was in the documentation and I am confident I did not commit any error in the code.(In picture I have given My code)\nHow can I find out if I am behind a proxy or firewall?\n-Thanks\n\nmongohelp1841×527 27.1 KB\n", "username": "Riyad_Sm" }, { "code": "", "text": "I forgot to tag you @Doug_Duncan", "username": "Riyad_Sm" }, { "code": "", "text": "How can I find out if I am behind a proxy or firewall?If you are running the code from your house you probably don’t have a firewall or proxy in place, especially if you’re not sure. If you’re running code from a work location however, you might be going through a proxy. You would have to ask your network team.Note that I am able to connect to that cluster (I can’t do anything as I don’t have the credentials), so it appears that you do indeed have the server open to the world for anyone to access.\nimage746×255 28.2 KB\nI would recommend restricting access only to the IP addresses that should be able to access this data. This is extremely important if you plan on having sensitive data stored in the cluster. I would recommend this even with authentication enabled.As for your code, I’m not a Node developer (I’m an admin/ops guy) so I’m not sure if your code is valid or not.", "username": "Doug_Duncan" }, { "code": "", "text": "@Doug_Duncan Yes. I am using from home. My code is correct as far as I know. But still I am getting the error.!!", "username": "Riyad_Sm" }, { "code": "mongosh mongodb+srv://cluster0.hd6phlm.mongodb.net", "text": "I am not sure why you’re getting a time out error. As you can see I was able to connect to the server so the server is accessible from anywhere. I just copied your code and was able to connect to the server without issue, so the code works as well.If you are getting a time out error from running the code then something on your end is blocking the connection from your machine to your atlas server.What happens if you run mongosh mongodb+srv://cluster0.hd6phlm.mongodb.net? Do you get a connection or do you get a timeout?", "username": "Doug_Duncan" }, { "code": "", "text": "I have no idea about mongosh. can you be more specific like what should I change in my code to connect? I would be grateful !", "username": "Riyad_Sm" }, { "code": "", "text": "@Doug_Duncan I used mongoose to connect, now I am getting this error:\nerror : MongooseServerSelectionError: Could not connect to any servers in your MongoDB Atlas cluster. One common reason is that you’re trying to access the database from an IP that isn’t whitelisted. Make sure your current IP address is on your Atlas cluster’s IP whitelist: https://docs.atlas.mongodb.com/security-whitelist/\nplease help!", "username": "Riyad_Sm" }, { "code": "", "text": "mongosh is a mongo shell\nA command line tool to connect to your mongodb\nBefore you connect with your app/mongoose check if you can connect by shell\nYou don’t have add anything in your code\nFrom command prompt\nJust run mongosh “your connect string”\nAre you using any VPN,anti virus or other softwares which may be blocking your connection or it could be firewall/proxy issues\nDid you try from another network or mobile hotspot just to check if it works or not on a different network", "username": "Ramachandra_Tummala" }, { "code": "", "text": "@Ramachandra_Tummala I tried with mongoose with proper documentation. Nothing happened . And also I tried with my mobile network as well.still the same result.\nHow can I know that if is a proxy or firewall issue.? I did already try turning of my firewall though. How can I check if it is for proxy issue? Is there anyway to check that?", "username": "Riyad_Sm" }, { "code": "mongosh mongodb+srv://cluster0.hd6phlm.mongodb.net", "text": "What happens if you run mongosh mongodb+srv://cluster0.hd6phlm.mongodb.net ?What is the result of above?\nEven i can connect to your cluster like Doug_Duncan was able to connect\nYou have to contact your network admin if any firewall/proxy causing blocking", "username": "Ramachandra_Tummala" }, { "code": "", "text": "I Don’t have any network admin. I am using an local ISP here. What specific question should I ask to them about proxy? They Don’t seem to know the answer though. And Where exactly should I put that part in my code?", "username": "Riyad_Sm" }, { "code": "", "text": "Do you have mongosh installed on your system?\nJust run mongosh from your terminal command prompt and show the output\nIf shell is available all you have to do is run below\n#mongosh mongodb+srv://cluster0.hd6phlm.mongodb.netAlso check this linkTo see if you’re using a proxy/VPN online, go to www.whatismyproxy.com. It will say if you’re connected to a proxy or not.\n\nPC: Check under you", "username": "Ramachandra_Tummala" }, { "code": "", "text": "I checked the link. It says no proxy was used.\nAnd also I Don’t have mongosh installed and when i searched about that I found no article Where they show how to connect with mongosh with a node.js app. All I found was mongoose Which I already tried.", "username": "Riyad_Sm" }, { "code": "", "text": "@Doug_Duncan @Ramachandra_Tummala\nI tried with mongosh still got the error. I am sending you the screenshot\n\nmongo3956×222 7 KB\n", "username": "Riyad_Sm" }, { "code": "", "text": "As both I and Ramachandra can connect to your Atlas instance, there is nothing from the MongoDB Atlas side that is blocking your connection. This would mean that there is something on your side that is blocking the connections. If you have disabled the firewall and you’re still not able to connect, you might be going through a VPN that is causing issues with you making a connection.", "username": "Doug_Duncan" }, { "code": "", "text": "Hi @Riyad_Sm,Could you try the solution mentioned in the given thread https://www.mongodb.com/community/forums/t/error-in-connect-to-atlas/180790/7?u=kushagra_kesav\nimage.png3170×714 135 KB\nI hope it helps!Thanks,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "@Kushagra_Kesav @Ramachandra_Tummala @Doug_Duncan\nThanks for helping me my brothers.\nI solved the problem. Basically created another mongodb account with another gmail. And it worked! Maybe something was wrong with my previous gmail.\nPeace!", "username": "Riyad_Sm" }, { "code": "", "text": "11 posts were split to a new topic: ECONNREFUSED - 127.0.0.1 to localhost", "username": "Kushagra_Kesav" } ]
Cant connect mongodb with node.js due to MongoServerSelectionError
2022-08-26T17:11:50.601Z
Cant connect mongodb with node.js due to MongoServerSelectionError
15,509
null
[]
[ { "code": "", "text": "Would Mongodb be too heavy for a small business to run for something like Etsy stores or a drop shipping business?", "username": "UrDataGirl" }, { "code": "", "text": "", "username": "psram" }, { "code": "", "text": "In general, MongoDB can be more resource-intensive, need three node cluster minimumYou may run with a single instance if data replication and constant availability is not a hard requirement. Backups may be sufficient in some cases. For some low traffic the Atlas free tier can be a viable solution to start with because you can easily migrate to more powerful tier without hardware investment.It uses a document-oriented data model that allows for flexible and dynamic data structures.Absolutely and that is why I use it.", "username": "steevej" }, { "code": "", "text": "Have there been any big problems you’ve experienced?", "username": "UrDataGirl" } ]
Small home business
2023-04-08T18:25:59.015Z
Small home business
1,128
null
[]
[ { "code": "", "text": "I am trying to pass on my MongoDB Atlas lab task in the online course.\nI try to do the following:atlas setup --clusterName myAtlasClusterEDU --provider AWS --currentIp --skipSampleData --username myAtlasDBUser --password myatlas-001 | tee atlas_cluster_details.txtBut I am getting the following error message:Error: unknown flag: --currentIPIdo not know what should be the problem, because I did everything as the description said.", "username": "Anna_Kovacs" }, { "code": "", "text": "Please check what exactly you passed in your command\ncurrentIp vs currentIP", "username": "Ramachandra_Tummala" }, { "code": "", "text": "This topic was automatically closed 60 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Unknown flag --currentIP
2023-05-18T07:26:43.382Z
Unknown flag &ndash;currentIP
801
null
[ "queries" ]
[ { "code": "", "text": "Please help me find the correct wording for the solution I am looking for:Briefly:\nI would like to make API queries from MongoDb to an external API, and then save the data to a collection.More elaborate:\nI would like a backend service that queries a price API and saves the price data to a MongoDb. Then within MongoDb, I can manipulate the data as needed, then provide the “new” data to be exposed to an API for my users.This seems like a simple task, but after extensive research, I cannot find the correct solution. Can someone point me in the right direction?", "username": "enjoywithouthey" }, { "code": "", "text": "Just write a program in Node or Python or Go or PHP or C/C++ that does the REST API and then updates the MongoDB.", "username": "Jack_Woehr" }, { "code": "", "text": "It took me less than 5 minutes to do this on Acho.io (including sign-up for an account)The MongoDb documentation and examples on this are horrific.", "username": "enjoywithouthey" }, { "code": " return EJSON.parse(response.body.text());\n", "text": "I figured it out.", "username": "enjoywithouthey" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Query an extrenal api from within MongoDb
2023-05-19T02:53:04.228Z
Query an extrenal api from within MongoDb
356
null
[ "schema-validation" ]
[ { "code": "", "text": "Hi,Is “if-then-else” introduced in Draft 07 supported by MongoDB? My first attempt failed.https://json-schema.org/understanding-json-schema/reference/conditionals.html#if-then-elseThank you,\nPJ", "username": "cloudsurfer22_N_A" }, { "code": "if-then-else", "text": "MongoDB supports draft 4 of JSON Schema, including core specification and validation specification, with some differences. For details, see Extensions and Omissions.if-then-else is draft-07", "username": "Jack_Woehr" } ]
Is “if-then-else” supported in schema validation?
2023-05-18T18:48:11.137Z
Is “if-then-else” supported in schema validation?
736
null
[ "aggregation", "atlas-search" ]
[ { "code": "/* 1 */\n{\n \"startDate\" : ISODate(\"2022-11-29T14:28:38.166Z\"),\n \"startTimestamp\" : 1669732118166,\n \"customerName\" : \"Eduardo Bechtelar\"\n}\n\n/* 2 */\n{\n \"startDate\" : ISODate(\"2022-11-29T14:28:38.258Z\"),\n \"startTimestamp\" : 1669732118258,\n \"customerName\" : \"Sylvia Wolf\"\n}\n\n/* 3 */\n{\n \"startDate\" : ISODate(\"2022-11-29T14:28:38.284Z\"),\n \"startTimestamp\" : 1669732118284,\n \"customerName\" : \"Jeremy McLaughlin\"\n}\n\n/* 4 */\n{\n \"startDate\" : ISODate(\"2022-11-29T14:28:38.298Z\"),\n \"startTimestamp\" : 1669732118298,\n \"customerName\" : \"Laura Lynch\"\n}\n\n/* 5 */\n{\n \"startDate\" : ISODate(\"2022-11-29T14:28:38.311Z\"),\n \"startTimestamp\" : 1669732118311,\n \"customerName\" : \"Noel Lubowitz\"\n}\n\n/* 6 */\n{\n \"startDate\" : ISODate(\"2022-11-29T14:28:38.435Z\"),\n \"startTimestamp\" : 1669732118435,\n \"customerName\" : \"Natalie Cummerata DVM\"\n}\n{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"customerName\": {\n \"type\": \"autocomplete\"\n },\n \"startDate\": {\n \"type\": \"date\"\n },\n \"startTimestamp\": {\n \"representation\": \"int64\",\n \"type\": \"number\"\n }\n }\n }\n}\nnearvar now = Date.now()\n\ndb.getCollection('search-tests').aggregate([\n {\n $search: {\n index: \"testingSearchIndex\",\n compound: {\n should: [\n {\n near: {\n path: \"startTimestamp\",\n origin: now,\n pivot: 1,\n }\n },\n ]\n }\n }\n },\n {\n $project: {\n startDate: 1,\n startTimestamp: 1,\n score: { $meta: \"searchScore\" },\n }\n }\n])\n", "text": "I have multiple documents that have dates and timestamps really close relative to each other. Here is the data I have:Here is the index definition:As you can see the timestamps are really closed to each other. The difference is in milliseconds. What I’m trying to do is to sort the items asc or desc based on start timestamp using near operator. What I saw is that search index cannot handle big numbers using timestamps.I tried to sort items descending using the near operator, like this:I was expecting to see items sorted descending based on startTimestamp. But the sorting is kind of random. What am I doing wrong?", "username": "Andrei_Batinas" }, { "code": "nowstartTimestampscore", "text": "Hi @Andrei_Batinas,I believe what you’re seeing is because of the score being the same assuming the variable now is relatively large amount away from the startTimestamp values for your sample documents - Since you projected the score field I presume the score for all those documents were the same?Will send a DM with some more information that may help.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "", "username": "Jason_Tran" } ]
How can I sort by timestamp mongodb documents, using atlas search index?
2023-02-24T19:39:15.846Z
How can I sort by timestamp mongodb documents, using atlas search index?
1,488
null
[]
[ { "code": "", "text": "I have an RHEL 9 server.\nIs there any way to get Mongodb installed there ?\nI can see “MongoDB Repositories”\nSo it makes me believe version 6 is available for RHEL 9 ? But it cannot be reached by yum ?Installation docs for RHEL only mentions RHEL 8 I tried installing the RHEL 8 package via yum but got these errors. Is there a workaround ?Problem: package mongodb-org-6.0.1-1.el8.x86_64 requires mongodb-org-database, but none of the providers can be installed", "username": "Johann_Carstens" }, { "code": "", "text": "When I have the following baseURL\n“MongoDB Repositories”I get error No match for argument: mongodb-orgsudo yum install -y mongodb-orgUpdating Subscription Management repositories.\nMongoDB Repository 2.1 kB/s | 4.2 kB 00:02\nNode.js Packages for Enterprise Linux 9 - x86_64 36 kB/s | 132 kB 00:03\nNo match for argument: mongodb-org\nError: Unable to find a match: mongodb-org", "username": "Johann_Carstens" }, { "code": "", "text": "ALso tried base URL to 9Server … same error", "username": "Johann_Carstens" }, { "code": "", "text": "where you ever able to resolver this?\nI am having this same issue rigth now", "username": "Felix_Roberto_Read_Rivero" } ]
Installing MongoDB on RHEL 9
2022-09-14T12:28:38.346Z
Installing MongoDB on RHEL 9
3,870
https://www.mongodb.com/…5_2_1023x404.png
[ "atlas-device-sync" ]
[ { "code": "Source:\n\nWrite originated from MongoDB\nLogs:\n[\n \"Upload message contained 2 changeset(s)\",\n \"Integrating upload required conflict resolution to be performed on 0 of the changesets\",\n \"Latest server version is now 36\"\n]\nPartition:\n\nOrder Tracker\nWrite Summary:\n{\n \"OrderClass\": {\n \"deleted\": [\n \"60bb92c3a654cddf5736c5fb\",\n \"60bb893757d59169fcdae14d\"\n ]\n }\n}\n", "text": "The issue is that even though data has been deleted from both Atlas as well as locally, old data continues to reappear and sync. I have deleted the local files and terminated sync in the console and at that point, there is no local data. As soon as Sync is Enabled in the console, data sync’s (two objects) even though there is no data showing in the console.Here’s a screenshot of the collection that previously had a couple of objects but as you can see, there is currently no data\nConsole2024×800 80.8 KB\nWhen the app is run, the local Realm files are created and contain no data as expected. However, as soon as Sync is Enabled in the console, it appears that the old ‘ghost’ data is pushed to the app.I am seeing this message in the console log but it’s not clear if this is indicating the client is pushing data UP to the server or if the server is pushing old deleted data to the client.The local files were manually deleted when the app was not running so this isn’t cached data.Any ideas?", "username": "Jay" }, { "code": "", "text": "Hi, would you mind sharing your application URL (App Services) or just the app_id string that the console provides to you. Both are safe to send, but if you prefer, my email is [email protected]. I should be able to decipher what is going on hopefully by looking at your logs.The UI logs you are sharing here suggest that 2 objects were deleted by a MongoDB user and those writes are being replicated to Sync. Can you confirm that you are terminating sync (in the UI which gives you a warning), waiting 1-2 minutes, enabling sync, and then this is happening? If so I can take a look to see what might be going on.Thanks,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "Thanks @Tyler_KayeHere’s the URLApp ServicesI am the sole user and the only objects (two of them) were deleted in the console when the app was not running. The local files were manually deleted so there should be no data to sync that originates locally.Yes, sync was terminated in the console, I waited the normal 1-2 minutes and the enabled sync and the files appeared locally the next time the app was run.One fun little additional nugget of info:Since I drafted this email yesterday the app has not been used - I am the only person that has the app. There were no objects showing in the console yesterday per my screen shot above.This morning I log into the console to get that URL I posted above and by chance, I checked that same collection and now it has an object - it just appeared out of the blue.I checked the console and I see activity - none of it was app generated so it’s not clear (to me) what that activity means or why it’s there since there are no users and the app is not running. It’s probably server related but why would an object suddenly appear?\nConsole Activity2480×704 78.4 KB\n", "username": "Jay" }, { "code": "", "text": "Hi, looking at your app now. It does seem like there is activity coming from the cluster itself. Sync opens a changestream to your synced collections to send down any changes made (updates, inserts, replaces, deletes) by any MongoDB users (via the Atlas Data Explorer / Drivers / Shell). You can tell that the write that you mention is created in Atlas/MongoDB and inserted into sync because the log says that “Write originated from MongoDB”. Can you confirm that no one else is writing to these collections? Because that is what it seems like is happening which is “works as designed”, but if not I can dig deeper into our logging for your application.Thanks,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "@Tyler_KayeI can absolutely guarantee there are literally no other users or anyone reading or writing data. I hold the source code and this is not a published app so nobody has it.The app has not been used since my original post.", "username": "Jay" }, { "code": "", "text": "Ok, I will take another look now, but I was not suggesting that another app was inserting the data but rather that someone was inserting data into the MongoDB collection, which can be done from any Shell/Driver/Atlas-UI", "username": "Tyler_Kaye" }, { "code": "", "text": "I see. Nobody else has access to the account or console.", "username": "Jay" }, { "code": "", "text": "Hey Jay, I took a look at the logs as well and I think I can explain this. When you reset sync, it doesn’t clear the data in the backing collections it just resets the sync history and builds up a synthetic history from the existing state on the server. Expanding the log that says “Write originated from MongoDB” which is an unexpected write from your app’s perspective you can see the object id (60c0c6b0203ccf72b42d2363). Looking further down through the logs, you can see that this object id actually originated from a previous write from your app a day earlier. So it looks like this object was written by the original sync history after you deleted everything from the console. Does that make any sense? If not, at what point did you delete the data from the console, before/during/after sync was disabled?", "username": "James_Stone" }, { "code": "", "text": "actually originated from a previous write from your app a day earlierYes, I would agree it was from a previous write from two days ago - after that two other objects where written. The first object never appeared in the console->collections but the two following ones did which lead me to just blowing out everything and starting over.The sequence I always follow to reset and clear everything is the same. These steps are required any time there’s a destructive change to the models so it happens frequently (and is really overly complex might I add)thenFrom there, we are usually good to go; we can start the app, the fresh object models are created locally and then shortly after the schema appears in the console.However, in this case, we started the app and had two objects sync down from the server (the ghost objects)Then, after crafting the initial post here, and then doing that entire process again yesterday (you can see that in the logs), that object you’re referring to (60c0c6b0203ccf72b42d2363 ) appeared in the console (Collections)The app has not been run so I don’t know where the data is coming from.", "username": "Jay" }, { "code": "", "text": "@James_StoneWe have stepped away for a few days, came back and deleted everything via the console twice, and the ghost objects appear to have abated. So… ?", "username": "Jay" }, { "code": "Jun 05 10:24:56-04:00\nWrite from a sync client: https://realm.mongodb.com/groups/5ee10c5ab73ba7359e3b9fa1/apps/5f8afc185d3fe2e9bf73a7da/logs?co_id=60bb8938fcb69e2f9abc2a63\n{\n \"OrderClass\": {\n \"inserted\": [\n \"60bb893757d59169fcdae14d\"\n ]\n }\n}\n\n\nJun 05 11:05:39-04:00\nWrite from a sync client: https://realm.mongodb.com/groups/5ee10c5ab73ba7359e3b9fa1/apps/5f8afc185d3fe2e9bf73a7da/logs?co_id=60bb92c12cb8623cdf9b868c\n{\n \"OrderClass\": {\n \"inserted\": [\n \"60bb92c3a654cddf5736c5fb\"\n ]\n }\n}\n\n\nJun 09 9:48:32-04:00\nWrite from a sync client: https://realm.mongodb.com/groups/5ee10c5ab73ba7359e3b9fa1/apps/5f8afc185d3fe2e9bf73a7da/logs?co_id=60c0c6b0af199486503ae04c\n{\n \"OrderClass\": {\n \"inserted\": [\n \"60c0c6b0203ccf72b42d2363\"\n ]\n }\n}\n\n\nJun 09 9:49:56-04:00\nWrite from the translator: https://realm.mongodb.com/groups/5ee10c5ab73ba7359e3b9fa1/apps/5f8afc185d3fe2e9bf73a7da/logs?co_id=60c0c704fb915d82bea7dafe\n{\n \"OrderClass\": {\n \"deleted\": [\n \"60bb92c3a654cddf5736c5fb\"\n ]\n }\n}\n\nJun 09 9:53:59-04:00\nWrite from a sync client: https://realm.mongodb.com/groups/5ee10c5ab73ba7359e3b9fa1/apps/5f8afc185d3fe2e9bf73a7da/logs?co_id=60c0c7f7af199486504eb917\n{\n \"OrderClass\": {\n \"inserted\": [\n \"60c0c7f6e7fd1cc2012873e7\"\n ]\n }\n}\n\nJun 09 10:17:41-04:00\nWrite from the translator: https://realm.mongodb.com/groups/5ee10c5ab73ba7359e3b9fa1/apps/5f8afc185d3fe2e9bf73a7da/logs?co_id=60c0cd85af19948650635082\n{\n \"OrderClass\": {\n \"deleted\": [\n \"60bb92c3a654cddf5736c5fb\",\n \"60bb893757d59169fcdae14d\"\n ]\n }\n}\n\n\n\nJun 10 10:12:05-04:00\nWrite from the translator: https://realm.mongodb.com/groups/5ee10c5ab73ba7359e3b9fa1/apps/5f8afc185d3fe2e9bf73a7da/logs?co_id=60c21db5d5e92f62ef7ff6f7\n{\n \"OrderClass\": {\n \"replaced\": [\n \"60c0c6b0203ccf72b42d2363\"\n ]\n }\n}\n", "text": "Hi, I’ve been taking a look at your logs and wanted to post an update. Your termination procedure seems good, though I don’t think you should have to restart your Mac (that seems unnecessary), and if you want to simplify the process of making destructive changes, you can terminate sync, make any changes that you want, and then just enable sync (takes some steps / warnings out of the equation).As for this issue you are seeing, I have been unable to reproduce this issue and I do not believe this to be a caching issue because there is no cache that lives between terminating and re-enabling sync. It still just looks to me like there is a document in MongoDB (60c0c6b0203ccf72b42d2363) and that document is modified by the Atlas UI / Data Explorer (those show up as Replaces) at Jun 10 10:12:05-04:00.Has this been happening numerous times or just on a single occasion? It could be an issue with the Free Tier / perhaps there was a lot of replication lag, but that would only cause additional latency (and we cant see this per-user on free-tier clusters, only for dedicated clusters).Blockquote", "username": "Tyler_Kaye" }, { "code": "", "text": "document is modified by the Atlas UI / Data Explorer (those show up as Replaces) at Jun 10 10:12:05-04:00.@Tyler_Kaye Thanks for looking into this. I didn’t want this to turn into a specific tech support thing but was hoping others who have experienced something like this might benefit from the process.I am the only one who has the app and has access to the console so those changes were not don’t by me on the client side. It has happened before but resolved itself within a few hours but in this case those objects just kept showing up. In fact, as a test, I opened a brand new Mac that has never been used, created a new user an dropped the app on it and ran it and the ghost objects appeared soon after, even through nothing was showing in the console (as previously mentioned).At this point it has resolved itself and I cannot duplicate it at this time either. Let’s close this out and re-open/file a bug report if I can duplicate in the future.Thanks for all your help.", "username": "Jay" }, { "code": "", "text": "This post was flagged by the community and is temporarily hidden.", "username": "Andrew_Scott" } ]
Ghost Data Syncing
2021-06-09T15:07:40.705Z
Ghost Data Syncing
4,987
null
[ "production", "server" ]
[ { "code": "", "text": "MongoDB 4.4.22 is out and is ready for production deployment. This release contains only fixes since 4.4.21, and is a recommended upgrade for all 4.4 users.Fixed in this release:4.4 Release Notes | All Issues | All DownloadsAs always, please let us know of any issues.– The MongoDB Team", "username": "Britt_Snyman" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB 4.4.22 is released
2023-05-18T20:13:16.408Z
MongoDB 4.4.22 is released
1,071
null
[ "production", "c-driver" ]
[ { "code": "", "text": "Announcing 1.23.4 of libbson and libmongoc, the libraries constituting the MongoDB C Driver.No changes since 1.23.3. Version incremented to match the libmongoc version.Fixes:Thanks to everyone who contributed to this release.", "username": "Kevin_Albertson" }, { "code": "", "text": "Hello all!\nI am trying to install C drivers libmongoc and libbson to use them in Visual Studio Code. I have installed gcc as compiler (MingGW-W64) and install cmake too. Also I downloaded mongo-c-driver-1-23-4-tar.gz and decompress it. When I try to follow the step in documentation to install drivers I recieved errors: C:\\mongo-c-driver-1.23.4\\cmake-build>cmake -G “Visaul Studio 14 2015 Win64\"\"-DCMAKE_INSTALL_PREFIX=c:\\mongo-c-driver”\"-DCMAKE_PREFIX_PATH=c:\\mongo-c-driver\"\nCMake Error: Could not create named generator Visaul Studio 14 2015 Win64\"-DCMAKE_INSTALL_PREFIX=c:\\mongo-c-driver\"-DCMAKE_PREFIX_PATH=c:\\mongo-c-driver\\Well, I do not know if there is more documentation that can help me to use the: #include <mongoc.h>\n#include <bson.h> in my app because at present when I compile the program the headers libraries are not found, eventhough I include the path in the environment variables.\nThank you for your time.", "username": "Alonso_Marquez" }, { "code": "", "text": "Visaul StudioThis may be a typo try: “Visual Studio”", "username": "Kevin_Albertson" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB C Driver 1.23.4 Released
2023-05-08T12:42:35.529Z
MongoDB C Driver 1.23.4 Released
961
null
[ "swift" ]
[ { "code": "Sync: Realm sync client ([realm-core-13.10.1])\nSync: Connection[1]: Session[1]: Binding '[obfuscatedpath]/flx_sync_default.realm' to ''\nSync: Connection[1]: Session[1]: client_reset_config = false, Realm exists = true, client reset = false\nSync: Connected to endpoint 'x.xx.xxx.xx:xxx' (from 'xxx.xxx.x.xx:xxxxx')\nSync: Connection[1]: Closing the websocket with status='SystemError: invalid certificate chain', was_clean='false'\nSync: Connection[1]: Connection closed due to error\n", "text": "The SwiftUI template todo app is unable to maintain a connection with MongoDB. The app itself works just fine, but it is unable to sync. The steps I’m taking are:I can see the created user in App Services, but no todos are ever synced. Opening the app on a different simulator confirms that there is no syncing happening. The following is output when the app launches and attempts to connect to flexible sync (I’ve removed some identifying info from these logs):", "username": "BartHarleyJarvis" }, { "code": "", "text": "If you’re using Realm, I would suggest going through the SwiftUI Getting Started guide and the following the instructions in the Add Sync section as those have been thoroughly vetted.Ensure appId and appUrl are correct in atlasConfig.plistI am a bit unsure if that’s a correct step - where are you seeing those instructions?", "username": "Jay" }, { "code": "", "text": "Thanks Jay, I’ll give it the Getting Started guide a shot.Those instructions are in the README of the template app, see here:Atlas Template Starter App - SwiftUI. Contribute to mongodb/template-app-swiftui-todo development by creating an account on GitHub.The template app actually downloads with the correct values set in the plist file, so this step was just confirming the values are correct; I didn’t actually have to change anything.", "username": "BartHarleyJarvis" }, { "code": "Sync: Connected to endpoint 'x.xx.xxx.xx:xxx' (from 'xxx.xxx.x.xx:xxxxx')", "text": "Ah - yes, I see. A little different approach than what’s outlined in the guide but the result is the same./// When you create your own Atlas Device Sync app, use your preferred method\n/// to store and access app configuration details.Thanks for the link!This message is certainly vague:Sync: Connected to endpoint 'x.xx.xxx.xx:xxx' (from 'xxx.xxx.x.xx:xxxxx')It’s almost like it doesn’t recognize the endpoint. Did you go through and create the App etc in the Realm Console - maybe you did in step 1?See Create a Template App if notThen, log into the console, navigate to your app, scroll to to Logs and then View All Log Activity and see if there are any message or errors indicated there.Jay", "username": "Jay" } ]
I'm unable to get the SwiftUI template app to maintain a connection over flexible sync
2023-05-16T13:26:27.760Z
I&rsquo;m unable to get the SwiftUI template app to maintain a connection over flexible sync
726
null
[]
[ { "code": "", "text": "Hi there, I am very new to mongodb, I have managed to create a cluster composed by 3 servers 1 primary and 2 secondary, replication is working but I am stuck on how I will connect my application to that cluster and make sure if te primary mongodb server fails my application can connect to the other servers in the cluster.Probably I am not asking a very smart question, but I need some help to where look for the solution, thanks.", "username": "jonathan_canales" }, { "code": "", "text": "Hi @jonathan_canalesWhen a driver connects to a replicaSet it discovers the topology of the replicaSet and will become aware of the other member it can use.Typically all hosts that could become primary(priority > 0 and can vote in replica set elections) are in the connection string, these are used as the seed nodes in the discovery during connection.You can read up on this at, also be sure so check the driver documentation for driver specific options and nuances.", "username": "chris" } ]
Self-hosted cluster connector to application
2023-05-17T15:11:41.912Z
Self-hosted cluster connector to application
641
https://www.mongodb.com/…296c8b28ba46.png
[ "node-js", "mongoose-odm", "compass", "database-tools", "backup" ]
[ { "code": "-dmongorestore -d NHM-local NHM-db-prod/\n", "text": "I restored a DB in my local machine but I made a mistake and put an extra space between -d flag and DB name. command I used looks like thisso the database that was created also had an extra space\nand now I am unable to connect to this DB via mongoose and nodeJS or DROP this via mongoDB Compassputting space while dropping this database makes no difference.please suggest me some way to drop this DB.", "username": "Yashvardhan_N_A" }, { "code": "", "text": "Can you post the versions of your mongodb and mongorestore ?", "username": "chris" }, { "code": "", "text": "MongoDB version - 6.0.5\nmongorestore version - 100.7.0", "username": "Yashvardhan_N_A" }, { "code": "", "text": "I’m struggling to reproduce. What are the OS and shell where you are running mongorestore?Are you able to reproduce from dump through to restore?", "username": "chris" } ]
How to drop a databse whose name starts with a space
2023-05-10T11:41:30.893Z
How to drop a databse whose name starts with a space
966
null
[]
[ { "code": "", "text": "Hello, everyone.I am building a system using the MERN stack to do it. Is there a way for me to store pdf files or docs files in MongoDB?I’m new to MongoDB. Appreciate any help or suggestions.", "username": "Jason_Bsng" }, { "code": "", "text": "", "username": "steevej" } ]
How to Store Document File (pdf, docs)
2023-05-17T05:09:42.722Z
How to Store Document File (pdf, docs)
564
null
[]
[ { "code": "filterthis.sdk = new ChartsEmbedSDK({\n baseUrl: 'BASE_URL',\n height: 500,\n filter: [{\n\t\t$match: matchQuery }\n\t}, {\n\t\t$unwind: unwindQuery\n\t}, {\n\t\t$project: projectQuery\n\t}, {\n\t\t$group: groupQuery\n\t}],\n getUserToken: () => {\n return token\n }\n });\n\n this.chart = this.sdk.createChart({\n chartId: 'CHART_ID'\n });\n\n this.chart.render(document.getElementById(\"element_id\")).catch(() => window.alert('Chart failed to initialise')); \n$matchError loading data for this chart (error code: 7). User filter is not allowed.", "text": "Hi, I am using ChartsEmbedSDK to embed a chart I have created using authenticated JWT token. In MongoDB atlas, I have entered an aggregate query in the query bar to pre-process data.Now, while embedding the chart, the aggregate query has some dynamic values which I want to pass from my app. I could not get the reference to this use-case anywhere in documentation so I tried passing aggregate query in the filter key.\nThis is my code:I have added the field inside $match query in the User Specified filters in Authenticated Embed chart.But, I am still getting this error: Error loading data for this chart (error code: 7). User filter is not allowed.", "username": "Rahul_Chouhan1" }, { "code": "", "text": "Hi @Rahul_Chouhan1,Welcome to MongoDB community.I don’t think you can pass an aggregation pipeline as a filter.The way to try and do that is to use a pipeline in your data source and build the data in a way that you pass field filtering as a plain simple query.Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "_user: { $in: [ObjectID1, ObjectID2, ...] }$match$unwind$project$group$in$cond$gte$sum$last", "text": "\nAgg. query in mongodb charts1920×691 72.1 KB\nThis is how I am processing my data in charts.Now, if I write this aggregate query in the pipeline in the data source as you suggested, I need to pass an array of ObjectIds as you can see in the image, like this_user: { $in: [ObjectID1, ObjectID2, ...] }So, the problem isIt will be really helpful if you explain with an example.", "username": "Rahul_Chouhan1" }, { "code": "$match$project$group", "text": "Hi @Rahul_Chouhan1 -Right now there are a few limitations in what you can do here:This means that while it is possible for you to pass in your ObjectID query through the SDK, it will be inserted after the query bar query, the $project and $group stages will execute before the filter (which probably isn’t what you want).Grouping and projections are automatically generated when you encode fields, so perhaps you could find a way to make this work. We do realise the current behaviour can be limiting for advanced scenarios, and we hope to find ways to make it more flexible in the future.Tom", "username": "tomhollander" }, { "code": "\n[{$match: {\n borough: 'Manhattan',\n cuisine: {\n $in: [\n 'Mexican',\n 'Japanese',\n 'Thai',\n 'Southwestern',\n 'Steak',\n 'Soups',\n 'Tapas'\n ]\n }\n}}, {$group: {\n _id: {\n __alias_0: '$geometry',\n __alias_1: '$name',\n __alias_2: '$cuisine'\n }\n}}, {$project: {\n _id: 0,\n __alias_0: '$_id.__alias_0',\n __alias_1: '$_id.__alias_1',\n __alias_2: '$_id.__alias_2'\n}}, {$project: {\n geopoint: '$__alias_0',\n detail: '$__alias_1',\n color: '$__alias_2',\n _id: 0\n}}, {$match: {\n 'geopoint.type': 'Point',\n 'geopoint.coordinates': {\n $type: 'array'\n },\n 'geopoint.coordinates.0': {\n $type: 'number',\n $ne: {\n $numberDouble: 'NaN'\n },\n $gte: -180,\n $lte: 180\n },\n 'geopoint.coordinates.1': {\n $type: 'number',\n $ne: {\n $numberDouble: 'NaN'\n },\n $gte: -90,\n $lte: 90\n }\n}}, {$limit: 1000}]\n", "text": "I just noticed that you can obtain the aggregation pipeline used by MongoDB Charts when you edit a chart, but what are you supposed to do with it, and more importantly could it be used to make the chart more dynamic (and not just in the future). Is it available now as a diagnostic only? This is more just for curiosity than anything practical.", "username": "Ilan_Toren" }, { "code": "", "text": "Hi @Ilan_Toren -Some people use the pipeline to diagnose why the chart is showing the data it is, or as an educational tool to learn more about the aggregation framework. Advanced users may also want to copy and paste the pipeline into the chart query bar, so they can make their own tweaks to the pipeline.There isn’t currently any way of programmatically modifying a chart’s pipeline, although we are thinking about some of these scenarios. If you have any specific suggestions on what you’d like to see, feel free to suggest them here or in the MongoDB Feedback Engine.Tom", "username": "tomhollander" }, { "code": "", "text": "We re-use the exposed aggregation pipeline in our back end as the basis for a full export of the resulting data. This is helpful when the chart can only reasonably display 20-30 rows of data without becoming too crowded. Our customers can then do what they want with the full data set.", "username": "Phil_Warner" } ]
How to pass aggregate query while embedding MongoDB chart to pre-process the data?
2021-05-27T16:06:21.690Z
How to pass aggregate query while embedding MongoDB chart to pre-process the data?
5,483
null
[ "node-js", "data-modeling" ]
[ { "code": "", "text": "Hello everyone,I have added a new field in my existing schema and tried to insert the data. Insertion is successful without my newly added field in the schema model.I can’t delete my collection or move the data to a new collection. I want to create documents with my updated schema model.Instruct me guys, how can I get my new entry with my updated schema?Your support is appreciable!", "username": "Deepanshu_Bedi" }, { "code": "", "text": "Hey @Deepanshu_Bedi,Welcome to the MongoDB Community Forums! In order to better able to understand your problem and help you, can you please share the following with us:Regards,\nSatyam", "username": "Satyam" } ]
Why document are not inserting with updated schema model?
2023-05-11T05:08:10.618Z
Why document are not inserting with updated schema model?
687
null
[ "aggregation" ]
[ { "code": "", "text": "My collection has 100 million documents. There is a counter field with values from 1 to 100 million uniquely assigned to each document and indexed.If a query needs to be run on this collection to find documents that match certain filter conditions, how do enable currency?For example, the collection could be divided into 100 parts. And then there would be 100 concurrent threads searching those 100 parts separately.How can the above be achieved?", "username": "Big_Cat_Public_Safety_Act" }, { "code": "", "text": "I can’t believe such thing exists in any well known DBMS, but let me know if there’s one.In most cases, all indexes only sit in memory when db is running, so why bother using multiple threads on it?the collection could be divided into 100 parts. And then there would be 100 concurrent threads searching those 100 parts separatelythis is much easier said than done. And i even doubt if this can really make the queries faster.", "username": "Kobe_W" } ]
How to enable concurrency in queries?
2023-05-17T23:54:16.783Z
How to enable concurrency in queries?
315
null
[ "aggregation", "python", "crud" ]
[ { "code": "", "text": "Hi,I have multiple threads using UpdateOne inside a bulk write operation to update documents inside the database. I am trying to modify an item inside an array based on whether it exists or not and I also have an additional check inside the query parameter of UpdateOne which checks whether the added item is the latest or not based on the timestamp. For some reason, the update picks up the older timestamped item even though it receives the new timestamp item. My hypothesis is that, two updates happen at the same time, checks that the document does not contain the item inside the array and each of them overwrite on one another. This only happens for a very small number of times. My questions is, is there a way to make it thread safe such that if two updates happen at the same time, both of them happen serially, rather than in parallel? Is the query param and the aggregation pipeline param thread safe, do they happen in one lock step? If not, what can I do to mitigate this?For ex: I have UpdateOne({id: ‘1234’, … additional condition to check if the item exists inside the array or not}, [{…aggregation pipeline to set the item inside the array which uses concatArray}]}Let me know if you need additional clarifications.", "username": "Sangeet_K" }, { "code": "", "text": "Single updates are atomic, but you should also learn about Transactions in MongoDB", "username": "Jack_Woehr" }, { "code": "", "text": "Hi Jack,Thank you for getting back. The thing that I am curious about is whether transaction will do the trick for me. Assuming I have two transactions trying to update the same document, the query condition in UpdateOne, how does it work? Will it check the database and see that a record does not exist and both of the transactions will update the same document hence a race condition since I have the condition to check whether a document exists or not is inside the query filter. Or will it happen in a serialized fashion, meaning transaction 1 will complete in one lock step followed by another transaction reading that value. Sorry for my lack of understanding here.", "username": "Sangeet_K" }, { "code": "updateupsertupdatefalse", "text": "There is update and there is the upsert boolean option to an update.An update of any sort is atomic. It may succeed or fail (e.g., if you update with upsert set to false and the document does not exist), but if it succeeds, no other action was performed on that document until the update was complete.With multiple threads in your application, there is no way of telling what order the updates will happen, but you know that they didn’t write at the same time.In many situations, there is no such thing as “just an update”, there’s more happening, so you want to wrap the steps in a transaction.", "username": "Jack_Woehr" }, { "code": "", "text": "Check my answers in this post.Two single doc update operation will not conflict with each other. If only one doc matches the filter condition and it is updated by the first update op, then second op will do nothing. in this case, they will happen sequentially.", "username": "Kobe_W" } ]
Is UpdateOne thread safe?
2023-05-17T00:59:43.437Z
Is UpdateOne thread safe?
1,244
null
[]
[ { "code": "`{\n \"_id\": 1,\n \"item\": \"abc\",\n \"price\": 10,\n \"quantity\": 2,\n \"product\": \"A\"\n },\n {\n \"_id\": 2,\n \"item\": \"jkl\",\n \"price\": 20,\n \"quantity\": 1,\n \"product\": \"B\"\n }`\n`{$set:{\n \"product\": { $switch : {\n branches: [\n {case : \"A\", then : \"B\"},\n {case : \"B\", then : \"A\"},\n ]\n }}\n }}`\n`{\n \"_id\": 1,\n \"item\": \"abc\",\n \"price\": 10,\n \"quantity\": 2,\n \"product\": {\n \"$switch\": {\n \"branches\": [\n {\n \"case\": \"A\",\n \"then\": \"B\"\n },\n {\n \"case\": \"B\",\n \"then\": \"A\"\n }\n ]\n }\n }\n }`\n`{\n \"_id\": 1,\n \"item\": \"jkl\",\n \"price\": 20,\n \"quantity\": 1,\n \"product\": \"B\"\n }`\n", "text": "My original document was like thisI used the below query to swap the product A and B, using the $set and $switch operatorsMy document got updated like thisCan any one explain this behaviour? I was expecting a result like below", "username": "sandeep_s1" }, { "code": "updatedb.collection.updateMany()`{\n \"_id\": 1,\n \"item\": \"abc\",\n \"price\": 10,\n \"quantity\": 2,\n \"product\": \"A\"\n }\n`{\n \"_id\": 1,\n \"item\": \"jkl\",\n \"price\": 20,\n \"quantity\": 1,\n \"product\": \"B\"\n }`\n$set_id: 1item\"abc\"item[\n { _id: 1, item: 'abc', price: 10, quantity: 2, product: 'B' }, /// <-- 'A' switched with 'B' for document _id: 1\n { _id: 2, item: 'jkl', price: 20, quantity: 1, product: 'A' } /// <-- 'B' switched with 'A' for document _id: 2\n]\nmyFirstDatabase> db.docs.find()\n[\n { _id: 1, item: 'abc', price: 10, quantity: 2, product: 'A' },\n { _id: 2, item: 'jkl', price: 20, quantity: 1, product: 'B' }\n]\nupdateMany()myFirstDatabase> db.docs.updateMany({},\n[\n {\n '$set': {\n product: {\n '$switch': {\n branches: [\n { case: { '$eq': [ '$product', 'A' ] }, then: 'B' },\n { case: { '$eq': [ '$product', 'B' ] }, then: 'A' }\n ]\n }\n }\n }\n }\n])\n{\n acknowledged: true,\n insertedId: null,\n matchedCount: 2,\n modifiedCount: 2,\n upsertedCount: 0\n}\n/// Output AFTER the update above\nmyFirstDatabase> db.docs.find()\n[\n { _id: 1, item: 'abc', price: 10, quantity: 2, product: 'B' },\n { _id: 2, item: 'jkl', price: 20, quantity: 1, product: 'A' }\n]\n", "text": "Hi @sandeep_s1,Can you share the full command? I believe it’s working as expected if you’re passing through a document for the update - Refer to the db.collection.updateMany() parameters for more details.My original document was like thisCan any one explain this behaviour? I was expecting a result like belowBased off your $set details, the document with _id: 1 should have the item value equal to \"abc\" still. I assume this might’ve been a typo and your expected output would be the following instead but please correct me if I am wrong since there is no details of the item field value being changed in the update opreation:On my test environment I have the same 2 sample documents:I perform the following updateMany() with the below output:Hope this helps.If you believe this works for you, please test thoroughly on a test environment first to verify it suits all your use case and requirements.Regards,\nJason", "username": "Jason_Tran" }, { "code": "{\n \"_id\": 1,\n \"item\": \"abc\",\n \"price\": 10,\n \"quantity\": 2,\n \"product\": \"B\"\n }\n`db.testCollection.update(\n {},\n { $set: {\n \"product\": { $switch : {\n branches: [\n { case : \"A\", then : \"B\" },\n { case : \"B\", then : \"A\" },\n ]\n }}\n }}\n)`\n", "text": "@Jason_Tran , the “item” field value is “abc” only its just a typo from my side. My concern was with product field. My expectation was like thisMy query was like this", "username": "sandeep_s1" }, { "code": "", "text": "@Jason_Tran , thank you for the correct query. Now it is working.", "username": "sandeep_s1" }, { "code": "", "text": "Glad to hear it. FWIW I would refer to the Update Operators documentation to understand why the original query was generating the results you were experiencing.Thanks for updating this post as well.Jason", "username": "Jason_Tran" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
$set operator not working as given in the documentation
2023-05-17T07:39:23.388Z
$set operator not working as given in the documentation
513
null
[ "aggregation", "queries", "java", "morphia-odm" ]
[ { "code": " return Engine.getInstance().getDatabaseManager().datastore.find(EngineKingdom.class).filter(\n Filters.and(\n Filters.lte(\"rootLocation.x\", location.x - 1024)\n )\n ).first();\n@Entity(value = \"kingdoms\")\n@NoArgsConstructor\n@Setter\n@Getter\npublic class EngineKingdom extends PKingdom {\n public @Id ObjectId id;\n private long timestamp;\n private String server;\n@Getter\n@Setter\npublic class PKingdom {\n private UUID uuid = UUID.randomUUID();\n private Location rootLocation;\n private Location spawnLocation;\n private int size;\npublic class Location {\n public String world;\n public int x;\n public int y;\n public int z;\n[16:25:47 ERROR]: java.lang.reflect.InvocationTargetException\n[16:25:47 ERROR]: at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n[16:25:47 ERROR]: at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)\n[16:25:47 ERROR]: at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n[16:25:47 ERROR]: at java.base/java.lang.reflect.Method.invoke(Method.java:568)\n[16:25:47 ERROR]: at dev.lightdream.redismanager.manager.RedisEventManager$EventClass.fire(RedisEventManager.java:199)\n[16:25:47 ERROR]: at dev.lightdream.redismanager.manager.RedisEventManager$EventClass.access$500(RedisEventManager.java:166)\n[16:25:47 ERROR]: at dev.lightdream.redismanager.manager.RedisEventManager$EventObject.fire(RedisEventManager.java:161)\n[16:25:47 ERROR]: at dev.lightdream.redismanager.manager.RedisEventManager$EventObject.access$200(RedisEventManager.java:114)\n[16:25:47 ERROR]: at dev.lightdream.redismanager.manager.RedisEventManager.fire(RedisEventManager.java:106)\n[16:25:47 ERROR]: at dev.lightdream.redismanager.event.RedisEvent.fireEvent(RedisEvent.java:72)\n[16:25:47 ERROR]: at dev.lightdream.redismanager.manager.RedisManager$1.lambda$onMessageReceive$0(RedisManager.java:152)\n[16:25:47 ERROR]: at java.base/java.lang.Thread.run(Thread.java:833)\n[16:25:47 ERROR]: Caused by: dev.morphia.query.ValidationException: Could not resolve path 'root_location.x' against 'com.pokeninjas.kingdoms.velocity.database.impl.EngineKingdom'. Unknown path element: 'root_location'.\n[16:25:47 ERROR]: at dev.morphia.internal.PathTarget.failValidation(PathTarget.java:155)\n[16:25:47 ERROR]: at dev.morphia.internal.PathTarget.resolve(PathTarget.java:184)\n[16:25:47 ERROR]: at dev.morphia.internal.PathTarget.translatedPath(PathTarget.java:124)\n[16:25:47 ERROR]: at dev.morphia.query.filters.Filter.path(Filter.java:167)\n[16:25:47 ERROR]: at dev.morphia.query.filters.Filter.encode(Filter.java:66)\n[16:25:47 ERROR]: at dev.morphia.query.filters.LogicalFilter.lambda$encode$0(LogicalFilter.java:40)\n[16:25:47 ERROR]: at dev.morphia.aggregation.codecs.ExpressionHelper.document(ExpressionHelper.java:81)\n[16:25:47 ERROR]: at dev.morphia.query.filters.LogicalFilter.encode(LogicalFilter.java:40)\n[16:25:47 ERROR]: at dev.morphia.query.MorphiaQuery.lambda$getQueryDocument$0(MorphiaQuery.java:392)\n[16:25:47 ERROR]: at dev.morphia.aggregation.codecs.ExpressionHelper.document(ExpressionHelper.java:81)\n[16:25:47 ERROR]: at dev.morphia.query.MorphiaQuery.getQueryDocument(MorphiaQuery.java:389)\n[16:25:47 ERROR]: at dev.morphia.query.MorphiaQuery.toDocument(MorphiaQuery.java:265)\n[16:25:47 ERROR]: at dev.morphia.query.MorphiaQuery.iterable(MorphiaQuery.java:351)\n[16:25:47 ERROR]: at dev.morphia.query.MorphiaQuery.prepareCursor(MorphiaQuery.java:371)\n[16:25:47 ERROR]: at dev.morphia.query.MorphiaQuery.iterator(MorphiaQuery.java:228)\n[16:25:47 ERROR]: at dev.morphia.query.MorphiaQuery.first(MorphiaQuery.java:200)\n[16:25:47 ERROR]: at dev.morphia.query.MorphiaQuery.first(MorphiaQuery.java:195)\n[16:25:47 ERROR]: at com.pokeninjas.kingdoms.velocity.database.impl.EngineKingdom.get(EngineKingdom.java:84)\n[16:25:47 ERROR]: at com.pokeninjas.kingdoms.velocity.module.kingdoms.manager.RedisListener.getKingdomFromLocation(RedisListener.java:56)\n[16:25:47 ERROR]: at com.pokeninjas.kingdoms.velocity.module.kingdoms.manager.RedisListener.onKingdomRequest(RedisListener.java:38)\n[16:25:47 ERROR]: ... 12 more\n", "text": "I am trying to get every EngineKingdom instance with the rootLocation.x smaller than 1000\nI have tried", "username": "Radu_Voinea" }, { "code": "Location@Entity", "text": "Morphia isn’t aware of that Location type. Put @Entity on it so that Morphia knows to map it and you should be good to go.", "username": "Justin_Lee" } ]
Morphia nested documents
2023-05-17T23:32:44.269Z
Morphia nested documents
831
null
[]
[ { "code": "", "text": "Hello,\nI have MongoDB running on Ubuntu 22.04 and have my drive formatted as XFS. Some time in the past few days my 512GB drive was suddenly full because of the /var/log/mongodb log file. Now MongoDB won’t start.\nI’ve read about enabling log rotation, but I cannot start MongoDB to enable this.\nI have a lot of data in MongoDB that I have not backed up in a few weeks so really want to be careful with any steps I take.What options are possible to get it working?", "username": "Scott_N_A2" }, { "code": "", "text": "I suppose you can just remove the log file. It’s for logging only, and doesn’t have any real user data.At startup mongodb will create the log file if not exist.", "username": "Kobe_W" }, { "code": "", "text": "That’s what I was wondering. But I had read that removing the log file could result in data loss or problems with the database. Do you think that is a real concern?", "username": "Scott_N_A2" }, { "code": "", "text": "But I had readfrom where? log files are for troubleshooting only and any operation on it should never cause user data loss.", "username": "Kobe_W" }, { "code": "truncate -s0 /var/log/mongodb/mongod.loggzippkill -USR1 -x mongod", "text": "If you’re feeling paranoid you can:truncate -s0 /var/log/mongodb/mongod.log this will keep everything about the file the same except its contents.If I ever come accross asituation like this I gzip the offending file in place and pkill -USR1 -x mongod as the mongod is not running in your installation just start mongod as @Kobe_W has suggested.", "username": "chris" }, { "code": "", "text": "Thanks for the suggestions so far.I gzipped the file to another directory to clear the space off my XFS /var mount. It compressed down to just 20mb.I then ran the truncate command and then systemctl started mongod. Just connected with Compass and confirmed all is just fine.Thanks a lot!\nNow I can set up log rotation to prevent this happening again.", "username": "Scott_N_A2" } ]
Disk out of space, Can't start MongoDB
2023-05-17T18:39:54.798Z
Disk out of space, Can&rsquo;t start MongoDB
503
null
[ "aggregation" ]
[ { "code": "const preliminaryResults = await Entity.aggregate(prePipeline).exec();\n\nconst results = await Entity.aggregate([ ...prePipeline, ...postPipeline ]).exec();\nconst postSortPipeline = [{ $skip: skip }, { $limit: limit }]; // skip = 3 and limit = 3", "text": "I have an aggregation pipeline that is returning unexpected results. It’s a long pipeline so I started splitting it and seeing what the results from the first half were before applying both pipelines together. like so:each entity has a field called serialNumber and the results after the preliminary pipeline are getting what I would expect, in the order I would expect. They’re listed in order (serialNumber: 1, then serialNumber: 2, etc.). However, when I apply the postPipeline:const postSortPipeline = [{ $skip: skip }, { $limit: limit }]; // skip = 3 and limit = 3I get weird results. I get the document where the serialNumber is 1, the document where the serialNumber is 3 and the document where the serialNumber is 6. Any ideas what might be going on with this?", "username": "Daniel_Geiger" }, { "code": "", "text": "If you want a specific order you must sort.", "username": "steevej" } ]
$skip and $limit are not behaving as excepted. They seem to skip random results in the list of results
2023-05-17T19:56:53.744Z
$skip and $limit are not behaving as excepted. They seem to skip random results in the list of results
379
null
[]
[ { "code": "", "text": "Looking at the support policies: MongoDB Software Lifecycle Schedules | MongoDB this page doesn’t seem to have much information about what kind of support to expect related to a product that has reached the end of its support lifecycle.For example, we are currently using a mongoDB 4.4 cluster hosted with mongoDB Atlas, what kind of changes in support and maintenance can we expect come the end of 4.4’s lifecycle in February of 2024?", "username": "Adam_Stone" }, { "code": "", "text": "Hi @Adam_StoneWhen EoL is approaching I believe the Org Owner and Project Owner roles should be receiving emails on the impending EoL and encouraging them to upgrade.If no extension agreement has been reached with MongoDB then the cluster would be automatically upgraded to the next major release on the EoL date.", "username": "chris" } ]
MongoDB software lifecycle schedules, what do they actually mean?
2023-05-16T06:18:14.911Z
MongoDB software lifecycle schedules, what do they actually mean?
686
null
[]
[ { "code": "", "text": "Hi all!I am requesting a mirror for fastdl.mongodb.org downloads in my company’s Artifactory server and I would need to know which is the repository type.The only thing I have found about this on the internet is: MVN repo and downloads of mongo artifact · Issue #69 · joelittlejohn/embedmongo-maven-plugin · GitHub, would it just be a Generic repository with that custom layout?Thank you in advance!\nFrancisco Robles Martín", "username": "froblesmartin" }, { "code": "", "text": "Hi,\nI would also like to add mongodb binaries to my company’s Artifactory using a remote repository.\nAnyone knows how to that ?", "username": "Paul_Souteyrat" } ]
Which repository type is fastdl.mongodb.org?
2020-08-10T08:23:05.173Z
Which repository type is fastdl.mongodb.org?
3,998
null
[ "java", "change-streams", "spring-data-odm" ]
[ { "code": "", "text": "Hello,I use a Mongodb Kafka source connector to do CDC, and I would like to transform the fullDocument of the CDC messages before saving them to the target collection. The transformation is quite complex, so I choose to do it in a Kafka Stream instead of a mongo pipeline, and to push the transformed messages in a new topic and save them to mongo with kafka connect sink.\nI use Spring Cloud Stream for the streaming of the CDC messages, and all works fine, but the deserialization of the BSON document in POJO needs “a lot” of custom code with codec registries etc (compared to Spring Data Mongo queries for example). As Kafka seems recommanded by Mongo for CDC, is there a simple and automatic way to deserialize CDC messages from topics?Thanks in advance,", "username": "David_BERTRAND" }, { "code": "", "text": "Hello @David_BERTRAND,Short answer - No.I’m sorry it’s taken so long for this to get answered, but in short no, you will need to use Pojo, if you really want to be an amazing help in the future, it would be to take Pojo and launch some of your own pushes to it with custom code templates to initiate this.I’ve been looking at integrating Kafka with Java, as well as implementing Kafka with MongoDB Device Sync, but like you’re noticing it takes a lot of custom code and time.", "username": "Brock" }, { "code": "", "text": "Hello,I would be happy to help improving this part. My idea was to provide a Kafka Stream Serdes for class com.mongodb.client.model.changestream.ChangeStreamDocument to be able to serialize/deserialize automatically the CDC message by configuration. But I don’t know if it can be a mongodb related code?", "username": "David_BERTRAND" } ]
Mongo CDC and Kafka Stream
2023-03-29T15:47:03.546Z
Mongo CDC and Kafka Stream
1,098
null
[]
[ { "code": "", "text": "Hi Everyone,\nI am new here,\nI have a scenarion, say I am using updateAndModify method and marking the status as “processing” for that document and at the same time if a get request comes for the same document will the old data will be returned for the get request or it will wait for the update operation to complete and then pass new data back,\nAlso wanted to understand, if we are writing in master server, at the same time will the slave get updated or not.", "username": "Shubhankar_Ghosh" }, { "code": "", "text": "if we are writing in master server, at the same time will the slave get updated or not.Yes, but not at the same time. Replication takes time, even in light speed.will the old data will be returned for the get request or it will wait for the update operation to complete and then pass new data backCan be either. The read either happens before the update or happens after the update.\nThat being said, you will never see a “tearing” value. (meaning, only half write goes through).", "username": "Kobe_W" }, { "code": "", "text": "Thanks Kobe for clarifying my doubts. Much Appreciated.", "username": "Shubhankar_Ghosh" } ]
How update works internally
2023-05-11T11:28:59.324Z
How update works internally
398
https://www.mongodb.com/…e_2_1024x611.png
[ "aggregation", "java", "containers", "change-streams", "spring-data-odm" ]
[ { "code": "ocean-docker-server-elastic-search-1 | 2023-05-04 20:03:16,697 ERROR r.c.p.Operators@[annel-group-0-handler-executor] - Operator called default onErrorDropped\nocean-docker-server-elastic-search-1 | reactor.core.Exceptions$ErrorCallbackNotImplemented: com.mongodb.MongoCommandException: Command failed with error 8000 (AtlasError): 'Error modifying $match value for change streams / $currentOp. err=Oplog ns RegEx queries must begin with ^' on server ac-3htpqvx-shard-00-02.hor3ln3.mongodb.net:27017. The full response is {\"ok\": 0, \"errmsg\": \"Error modifying $match value for change streams / $currentOp. err=Oplog ns RegEx queries must begin with ^\", \"code\": 8000, \"codeName\": \"AtlasError\"}\nocean-docker-server-elastic-search-1 | Caused by: com.mongodb.MongoCommandException: Command failed with error 8000 (AtlasError): 'Error modifying $match value for change streams / $currentOp. err=Oplog ns RegEx queries must begin with ^' on server ac-3htpqvx-shard-00-02.hor3ln3.mongodb.net:27017. The full response is {\"ok\": 0, \"errmsg\": \"Error modifying $match value for change streams / $currentOp. err=Oplog ns RegEx queries must begin with ^\", \"code\": 8000, \"codeName\": \"AtlasError\"}\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException(ProtocolHelper.java:198)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.InternalStreamConnection$2$1.onResult(InternalStreamConnection.java:512)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.InternalStreamConnection$2$1.onResult(InternalStreamConnection.java:498)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.InternalStreamConnection$MessageHeaderCallback$MessageCallback.onResult(InternalStreamConnection.java:821)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.InternalStreamConnection$MessageHeaderCallback$MessageCallback.onResult(InternalStreamConnection.java:785)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.InternalStreamConnection$5.completed(InternalStreamConnection.java:645)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.InternalStreamConnection$5.completed(InternalStreamConnection.java:642)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.AsynchronousChannelStream$BasicCompletionHandler.completed(AsynchronousChannelStream.java:250)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.AsynchronousChannelStream$BasicCompletionHandler.completed(AsynchronousChannelStream.java:233)\nocean-docker-server-elastic-search-1 | at com.mongodb.internal.connection.tlschannel.async.AsynchronousTlsChannel.lambda$read$4(AsynchronousTlsChannel.java:122)\nocean-docker-server-elastic-search-1 | at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source)\nocean-docker-server-elastic-search-1 | at java.base/java.util.concurrent.FutureTask.run(Unknown Source)\nocean-docker-server-elastic-search-1 | at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)\nocean-docker-server-elastic-search-1 | at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)\nocean-docker-server-elastic-search-1 | at java.base/java.lang.Thread.run(Unknown Source)\n", "text": "I have a spring boot microservice that communicates with the MongoDB cluster deployed on MongoDB Atlas. The mongodb driver version is 4.6.1 In this microservice, we actually trigger some functions if data is inserted, updated, deleted in some specific collections. For catching these events, we are using mongodb-driver-reactivestreams library. I am facing a srange issue. I have 2 clusters in MongoDB Atlas that can be seen below in the following image:image2560×1528 249 KBThe microservice starts without any errors if i try to connect it with the first cluster Cooperants-DE but when i connect it with the second cluster ScopeSet I see this error in the logs when microservice startsCan someone guide me what I am doing wrong? I have also tried to run the microservice with the MongoDB version 6.0.5 that I deployed locally on my machine and microservice starts successfully without any errors but only when I try to connect it with the ScopeSet cluster the above error appears in the logs. Can some one guide me in this regard what i am doing wrong? Thanks", "username": "Usman_Sajid" }, { "code": "Cooperants-DEScopeSet{\"ok\": 0, \"errmsg\": \"Error modifying $match value for change streams / $currentOp. \nerr=Oplog ns RegEx queries must begin with ^\", \"code\": 8000, \"codeName\": \"AtlasError\"}\n^Atlas atlas-gbccyn-shard-0 [primary] local> db.oplog.rs.findOne({ns:{$regex:\"sample\"}})\nMongoServerError: Oplog ns RegEx queries must begin with ^\nAtlas atlas-gbccyn-shard-0 [primary] local> db.oplog.rs.findOne({ns:{$regex:\"^sample\"}})\n{\n op: 'c',\n ns: 'sample_analytics.$cmd',\n ui: new UUID(\"9e2aa592-eb09-4b4d-9f89-e03036b45540\"),\n o: {\n startIndexBuild: 'transactions',\n indexBuildUUID: new UUID(\"487cd2d1-ca36-400e-b654-a10754eb9d57\"),\n indexes: [\n {\n v: 2,\n key: { 'transactions.amount': 1 },\n name: 'transactions.amount_1',\n sparse: false\n }\n ]\n },\n ts: Timestamp({ t: 1664519274, i: 2 }),\n t: Long(\"96\"),\n v: Long(\"2\"),\n wall: ISODate(\"2022-09-30T06:27:54.871Z\")\n}\n", "text": "Hello @Usman_Sajid,Apologies for the late response.we actually trigger some functions if data is inserted, updated, or deleted in some specific collections.Could you please share if you have any Atlas database triggers associated with this and clarify what the functions are doing?The microservice starts without any errors if I try to connect it with the first cluster Cooperants-DE but when I connect it with the second cluster ScopeSet I see this error in the logs when the microservice startsI see you have two different MongoDB Cluster tiers i.e., Cooperants-DE with M30 (dedicated tier) and ScopeSet with M0 (shared tier).Based on the error message, it appears that there is an issue with your aggregation pipeline. Could you please share the aggregation pipeline with us so that we can reproduce it in our environment?Also, please specify the specific database and collection you are querying. Based on the error, it seems that you are trying to access the oplog from the local database of MongoDB Atlas.Shared tiers (M0/M2/M5) have certain restrictions in place that make them slightly different from a dedicated clusters. Please refer to the Manage Clusters and Atlas M0 (Free Cluster), M2, and M5 Limitations documentation for more details. Although we strive to make them functionally equivalent, some features are exclusively available on dedicated clusters. In this particular case, one restriction is that when performing a regex query on an oplog in a shared tier deployment, it must be preceded by an anchor (^).Could you modify your query so that it includes this anchor and see if it works? For example:Best,\nKushagra", "username": "Kushagra_Kesav" } ]
MongoDB change streams not working with one specific cluster
2023-05-04T22:15:28.275Z
MongoDB change streams not working with one specific cluster
955
https://www.mongodb.com/…dc7dbe9dfa8a.svg
[ "queries" ]
[ { "code": "executionStats", "text": "I check the manual because I got curious while conducting the explain(“executionStats”) test.\nBut I have a question because there is a slightly ambiguous sentence for me to understand.executionStats ModeMongoDB runs the query optimizer to choose the winning plan, executes the winning plan to completion, and returns statistics describing the execution of the winning plan.Does this sentence mean that you actually run the query (not sample) using the winning plan selected by the optimizer and show the statistics of the results?If I’m running a real query, then .explain(“executionStats”) will use the same amount of resources as when I run the query.", "username": "Kim_Hakseon" }, { "code": "explain()", "text": "Does this sentence mean that you actually run the query (not sample) using the winning plan selected by the optimizer and show the statistics of the results?Yes, it actually runs the query and will show the statistics of the query execution, apart from the results.If I’m running a real query, then .explain(“executionStats”) will use the same amount of resources as when I run the query.explain() has overhead.", "username": "Jack_Woehr" }, { "code": "", "text": "Hi, @Jack_Woehr \nThank you very much for your reply.", "username": "Kim_Hakseon" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Q. explain() executionStats mode
2023-05-17T01:12:46.754Z
Q. explain() executionStats mode
456
null
[ "aggregation" ]
[ { "code": "...\n$map: {\n input: \"$x\",\n as: \"map1\",\n in: {\n key1: \"$$map1.somevalue\",\n mapper: {\n $map: {\n input: \"$y\",\n as: \"map2\",\n in: { key2: \"$$key1\" // access key from above }\n }\n }\n }\n}\n[1,2,3,4,5].map(ele => ({ \n a: ele, \n b: new Array(ele).fill(0).map(item => ({ c: a })) // doesn't work here like this\n}))\n", "text": "Hi, is it possible to access variables created in $map as such:something similar in JS:", "username": "Sambhav_Jain3" }, { "code": "$let[\n { x: [ 1, 2, 3, 4, 5 ] },\n { x: [ 1, 2, 3, 4, 5 ], y: [ 10, 20, 30, 40, 50 ] }\n]\n$letvar b = \n{\n '$addFields': {\n test: {\n '$map': {\n input: '$x',\n as: 'map1',\n in: {\n '$let': {\n vars: { key1: '$$map1' },\n in: {\n mapper: {\n '$map': { input: '$y', as: 'map2', in: { key2: '$$key1' } }\n }\n }\n }\n }\n }\n }\n }\n}\ndb.array.aggregate(b)\n[\n {\n _id: ObjectId(\"646433adc39a2fadd17b3963\"),\n x: [ 1, 2, 3, 4, 5 ],\n test: [\n { mapper: null },\n { mapper: null },\n { mapper: null },\n { mapper: null },\n { mapper: null }\n ]\n },\n {\n _id: ObjectId(\"64643544c39a2fadd17b3964\"),\n x: [ 1, 2, 3, 4, 5 ],\n y: [ 10, 20, 30, 40, 50 ],\n test: [\n {\n mapper: [\n { key2: 1 },\n { key2: 1 },\n { key2: 1 },\n { key2: 1 },\n { key2: 1 }\n ]\n },\n {\n mapper: [\n { key2: 2 },\n { key2: 2 },\n { key2: 2 },\n { key2: 2 },\n { key2: 2 }\n ]\n },\n {\n mapper: [\n { key2: 3 },\n { key2: 3 },\n { key2: 3 },\n { key2: 3 },\n { key2: 3 }\n ]\n },\n {\n mapper: [\n { key2: 4 },\n { key2: 4 },\n { key2: 4 },\n { key2: 4 },\n { key2: 4 }\n ]\n },\n {\n mapper: [\n { key2: 5 },\n { key2: 5 },\n { key2: 5 },\n { key2: 5 },\n { key2: 5 }\n ]\n }\n ]\n }\n]\n$let$let", "text": "Hi @Sambhav_Jain3,Have you attempted using $let?I have the following sample documents:Example aggregation with $let and the output:The above demonstration is just an example of using $let - I do not know what your expected output is or what your document structure is like. Hopefully use of $let works for your environment / use case.Regards,\nJason", "username": "Jason_Tran" } ]
Access custom variable created in nested maps
2023-05-15T08:25:40.428Z
Access custom variable created in nested maps
409
null
[ "schema-validation" ]
[ { "code": "db.createCollection(\"orders\", {\n validator: {\n $jsonSchema: {\n bsonType: \"object\",\n required: [ \"orderType\" ],\n properties: {\n orderType: {\n enum: [ \"online\", \"in-store\" ],\n description: \"must be either 'online' or 'in-store'\"\n },\n shippingAddress: {\n $cond: {\n if: { $eq: [ \"$orderType\", \"online\" ] },\n then: { bsonType: \"object\", required: [ \"street\", \"city\", \"state\", \"zip\" ] },\n else: { bsonType: \"object\" }\n }\n },\n storeLocation: {\n $cond: {\n if: { $eq: [ \"$orderType\", \"in-store\" ] },\n then: { bsonType: \"object\", required: [ \"storeId\", \"storeName\" ] },\n else: { bsonType: \"object\" }\n }\n }\n }\n }\n }\n})\n", "text": "Hi, how can I achieve following?", "username": "cloudsurfer22_N_A" }, { "code": "", "text": "Can you make the question you are asking clearer?", "username": "Jack_Woehr" }, { "code": "", "text": "I want to do conditional validation using my schema. So if a field has certain value (say A) then I need validator to ensure certain fields (say 1,2,3 ) are present. If the value is B then I need the validator to ensure fields 4,5,6 are present - 1,2,3 re optional.So, in the example I provided earlier, if the orderType has a value online, the the shippingAddress object needs to have required fields street, city, state and zip. If the orderType has some other value, then shippingAddress can be an empty object.", "username": "cloudsurfer22_N_A" }, { "code": "oneOfjsonSchema", "text": "Yes, you are now clear, @cloudsurfer22_N_A … An interesting requirement!Maybe you can do this using the oneOf keyword where each supplied jsonSchema object in the array is an arbitrarily deeply nested schema.", "username": "Jack_Woehr" } ]
How to do conditional schema validation?
2023-05-16T02:56:33.253Z
How to do conditional schema validation?
921
null
[ "node-js" ]
[ { "code": "", "text": "Hello,I am trying to change string value to object Id. I tried several ways, but couldn’t get what I want.const { ObjectId } = require(‘mongodb’);const stringValue = ‘999’; // Your string valuetry {\nconst objectId = ObjectId(stringValue);\nconsole.log(objectId); // The converted ObjectId\n} catch (error) {\nconsole.error(‘Invalid string value:’, error);\n}Receiving an error as - MongoDB Node.js Driver is not supported.\nI added mongodb dependency to my project.Please do let me know solution to fix this issue.\nThanks in advance.", "username": "sunita_kodali" }, { "code": "", "text": "Hello @sunita_kodali ,Can you please share a few more relevant details for me to understand your issue better?Kindly refer below links for more information on MongoDB Node.js driverLearn how to use MongoDB’s Node.js driver with npm to connect with the database and perform CRUD operations.Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "Will go over the articles. Thank you.", "username": "sunita_kodali" } ]
String to object id
2023-05-11T14:50:13.764Z
String to object id
453
null
[ "kafka-connector" ]
[ { "code": "", "text": "I want to know if there is a way to change the default delivery semantic of mongo source kafka connector which by default is At-least once, I want to use exactly-once delivery semantic.", "username": "Yash_Pradhan" }, { "code": "", "text": "Exactly once is not possible today.", "username": "Robert_Walters" }, { "code": "", "text": "Exactly once is not possible todayWhy? because even before your application can receive the data, tcp may have already re-transmit the data behind the scenes for multiple times.That being said, there can only be idempotent processing.", "username": "Kobe_W" }, { "code": "", "text": "Okay…Got it…thanks for the explanation…", "username": "Yash_Pradhan" }, { "code": "", "text": "My above comment is from low level. From high level, say your app does following:No.3 is needed for obvious reason, but no.2 and no.3 is not atomic operation. So what if your app crashes in between? in that case the event has to be resent.So no exactly once delivery from both low level and high level.", "username": "Kobe_W" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can I use exactly-once delivery semantic for mongo source kafka connector?
2023-05-15T07:10:52.815Z
Can I use exactly-once delivery semantic for mongo source kafka connector?
803
null
[]
[ { "code": "", "text": "Connection pool for ServerId{clusterId=ClusterId{value=‘6294f04f10ec570412083368’, description=‘null’}, address=10.8.0.1:19018} is paused because another operation failed; nested exception is com.mongodb.MongoConnectionPoolClearedException: Connection pool for ServerId{clusterId=ClusterId{value=‘6294f04f10ec570412083368’, description=‘null’}, address=10.8.0.1:19018} is paused because another operation failed", "username": "Guang_Sun" }, { "code": "", "text": "I also have the same error. What is the reason or how to solve it?", "username": "chch213_chch213" }, { "code": "", "text": "Me too BRO, anyone solved it??", "username": "Simone_Mastella" } ]
Connection Pool Cleared Exception
2022-05-31T06:35:26.783Z
Connection Pool Cleared Exception
3,691
null
[ "database-tools", "backup" ]
[ { "code": "", "text": "Hello everyone,We have encountered a vulnerability during our deployment. This GO vulnerability affects the components listed below.Environment: Ubuntu 22.04.2 LTS (Jammy)\nPackage: MongoDB 6.0.4 MongoDB RepositoriesPackages that have the vulnerability:bsondump|1.19.3\nmongodump|1.19.3\nmongoexport|1.19.3\nmongofiles|1.19.3\nmongoimport|1.19.3\nmongorestore|1.19.3\nmongostat|1.19.3\nmongotop|1.19.3Upgrading these components to use GO 1.19.8 or higher should eliminate the vulnerability.Does anyone know when the official Ubuntu repo will add a new stable release or a fix for this?orIs there any workaround that can disable the installation of these tools?Thank you in advance.", "username": "Batu" }, { "code": "", "text": "Hello, and welcome!Do you have documentation of this vulnerability?", "username": "Felipe_Gasper" } ]
Go 1.19.3 Vulnerability in Ubuntu
2023-05-09T09:10:05.772Z
Go 1.19.3 Vulnerability in Ubuntu
836
null
[ "queries" ]
[ { "code": "db.getCollection(\"task-templates\").insert({\"taskTemplateId\" : \"788281802383956355\", \"playbookTemplateId\" : \"661636468\", \"subject\" : \"reteyyyyyyyyyyyyyy\", \"taskTypeId\" : \"702875519296739235\", \"taskTypeName\" : \"Compliance\", \"priority\" : \"MEDIUM\", \"ownerType\" : \"CUSTOM\", \"owners\" : [ \"1\" ], \"franchiseeAccess\" : \"NONE\", \"creationDate\" : ISODate(\"2023-05-16T05:50:27Z\"), \"templateTimeline\" : { \"relativeTimeline\" : { \"start\" : { \"days\" : 0, \"hours\" : 0, \"scheduleType\" : \"AFTER\" } }, \"timeless\" : true }, \"tenantId\" : \"nocustomerdb.franconnect.net\", \"taskEmailReminder\" : false, \"deleted\" : false, \"archived\" : false, \"creatorUserNo\" : \"1\", \"moduleId\" : \"32\", \"templateType\" : \"PLAYBOOK_TASK_TEMPLATE\", \"externalFileUploadAllowed\" : false, \"completeTaskOnUpload\" : false, \"ownersPlaceHolders\" : [ ], \"_class\" : \"com.franconnect.taskmanagement.domain.TaskTemplate\"});\nWriteResult({\n \"nInserted\" : 0,\n \"writeError\" : {\n \"code\" : 121,\n \"errmsg\" : \"Document failed validation\"\n }\n", "text": "Hi Team,\nI am facing the error below while inserting one documentCan someone help with this please find the validation rules?", "username": "ankur_arya" }, { "code": "", "text": "If you download and use MongoDB Compass, it can display the validation rules for a database.", "username": "Jack_Woehr" } ]
Mongo DB document validation failed
2023-05-16T06:04:54.706Z
Mongo DB document validation failed
562
null
[ "atlas-cluster", "php" ]
[ { "code": "<?php\nuse MongoDB\\Client;\nuse MongoDB\\Driver\\ServerApi;\necho extension_loaded(\"mongodb\") ? \"loaded\\n\" : \"not loaded\\n\";\n$uri = 'mongodb+srv://user:[email protected]/?retryWrites=true&w=majority';\n$manager = new MongoDB\\Driver\\Manager($uri);\nprint_r($manager);\n$bulk = new MongoDB\\Driver\\BulkWrite();\n$bulk->insert(['x' => 1]);\n$bulk->insert(['x' => 2]);\n$bulk->insert(['x' => 3]);\n$manager->executeBulkWrite('TestyDB.TestyCollection', $bulk);\n\n$filter = ['x' => ['$gt' => 1]];\n$options = [\n 'projection' => ['_id' => 0],\n 'sort' => ['x' => -1],\n];\n\n$query = new MongoDB\\Driver\\Query($filter, $options);\n$cursor = $manager->executeQuery('TestyDB.TestyCollection', $query);\n\nforeach ($cursor as $document) {\n var_dump($document);\n}\n\n?>\n", "text": "I had an issue which I maanged to eventually solve myself.More info at: mongodb - PHP Mongo Bulk Write - Stack OverflowIn short PHP 8.0 removes the +srv from the connection string.\nWhere as, PHP 7.4 keeps it.Why is the PHP 8.0 driver doing this? I assume because I found the wrong syntax or something and that it’s not easy to find the write usage with PHP due to the new nature of PHP 8.0. I really would like to know how do do this in PHP 8.0.The script to save you going to the above link is", "username": "RussellSmithers" }, { "code": "mongodb+srv://", "text": "In short PHP 8.0 removes the +srv from the connection string. Where as, PHP 7.4 keeps it.See my response in the Stack Overflow thread. I addressed various lines from your original question in detail. I think you were just misunderstanding how a driver processes a mongodb+srv:// URI.It looks like you edited your Stack Overflow answer shortly after creating this thread to note:I contacted my hosting company support about this. They opened the port on 27017 and this script now works with PHP 8.0 and 7.4That would have explained the original connection error you were seeing. I found no evidence of a driver issue or any behavioral differences between PHP 7.4 and 8.0.", "username": "jmikola" } ]
Why does PHP 8.0 remove the +srv?
2023-04-29T05:51:24.422Z
Why does PHP 8.0 remove the +srv?
884
null
[]
[ { "code": "", "text": "Hi Team,We need automation mongo script for MongoDB Slow running /Long running queries and all Databases all operations such as command , getmore, insert, delete , update\nand also output export into emails as a table format", "username": "hari_dba" }, { "code": "", "text": "Any body know shell script slow running /long running queries alert emailswithout use automation tools .", "username": "hari_dba" } ]
Identify slow running queries automated mongo script and output generate email as table format
2020-04-01T17:32:08.472Z
Identify slow running queries automated mongo script and output generate email as table format
2,125
null
[ "queries", "node-js", "mongoose-odm" ]
[ { "code": "\nUser.findOne({ 'local.email' : email })\n .then((user)=>{\n if (!user){\n return done(null, false, req.flash('loginMessage', 'No user found.')); // req.flash is the way to set flashdata using connect-flash\n\n }\n if (!user.validPassword(password)){\n return done(null, false, req.flash('loginMessage', 'Oops! Wrong password.')); // create the loginMessage and save it to session as flashdata\n }\n\n // return user\n req.user= user;\n done(null, user);\n \n })\n", "text": "I’m trying to set up passport-local as described here: passport-localHowever, as you can see, this uses findOne with a callback, which is no longer supported by mongoose.However, I’m running into difficulties because while I can find a user, it isn’t getting passed back to my authentication route. Here is the code I have now:", "username": "Veye_Roll" }, { "code": "User.findOne({ 'local.email': email })\n .then((user) => {\n if (!user) {\n return done(null, false, req.flash('loginMessage', 'No user found.'));\n }\n if (!user.validPassword(password)) {\n return done(null, false, req.flash('loginMessage', 'Oops! Wrong password.'));\n }\n\n // Assign user to req.user\n req.user = user;\n return done(null, user);\n })\n .catch((err) => {\n return done(err);\n });\n", "text": "Hey @Veye_Roll,Welcome to the MongoDB Community forums I’m running into difficulties because while I can find a user, it isn’t getting passed back to my authentication routeI modified the code little bit and it worked fine for me:Let us know if it helps. Otherwise, could you please share the error message you are receiving?Best,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "routes.js\n// =====================================\n// PROFILE SECTION =====================\n// =====================================\n// we will want this protected so you have to be logged in to visit\n// we will use route middleware to verify this (the isLoggedIn function)\napp.get('/profile', isLoggedIn, function(req, res) {\n res.render('pages/profile.ejs', {\n user : req.user // get the user out of session and pass to template\n });\n});\n\n\n// route middleware to make sure a user is logged in\nfunction isLoggedIn(req, res, next) {\n console.log(\"Upon successful login, this should print a user but prints 'undefined'\");\n console.log(req.user);\n // if user is authenticated in the session, carry on \n if (req.isAuthenticated()){\n return next();\n }\n // if they aren't redirect them to the home page\n res.redirect('/');\n}\n", "text": "I am still seeing an error. For example, upon correct username/password login, it tries to redirect to ‘/profile’ and here is the relevant code from routes.js but you can see that the isLoggedIn fails:", "username": "Veye_Roll" }, { "code": "", "text": "Ah, I think I figured out the problem here. I have been testing locally and the cookies saved by passport.js weren’t working as a result. I changed the passport section as mentioned here and now it works: express-session - npm", "username": "Veye_Roll" } ]
Alternatives to findOne() callbacks for passport.js with mongoose
2023-05-15T23:01:35.213Z
Alternatives to findOne() callbacks for passport.js with mongoose
1,210
null
[ "aggregation" ]
[ { "code": "- loads (collection)\n - orders (collection)\n - costs (sub document)[\n {\n cost_centre: ( object Id of costcentres collection)\n }\n ]\ndb.loads.aggregate([\n {\n $lookup: {\n from: \"orders\",\n localField: \"order\",\n foreignField: \"_id\", \n as: \"load_order\",\n pipeline:[{\n\n $lookup: {\n from: \"costcentres\",\n localField: \"load_order.costs.cost_centre\",\n foreignField: \"_id\",\n as: \"_cost_centre\",\n }\n }]\n \n }\n },\n])\n", "text": "I have the nested array object and I need to lookup the field inside the array object.Data structure:I tried this queryQuery:But this is not populating ‘cost_centre’, can someone help me ?", "username": "gowri_sankar" }, { "code": "db.loads.aggregate( [\n { $lookup: {\n from: \"orders\",\n localField: \"order\",\n foreignField: \"_id\", \n as: \"load_order\" \n } } ,\n { $lookup: {\n from: \"costcentres\",\n localField: \"load_order.costs.cost_centre\",\n foreignField: \"_id\",\n as: \"_cost_centre\",\n } }\n] )\n", "text": "I am pretty sure that you cannot refer to load_order inside the inner $lookup because the field is being created by the outer one. Technically, it does not exist yet.Try to remove pipeline: from the outer $lookup and move the inner one as another stage of the main pipeline. Something like the following untested code:In principal, you do not need to $unwind before using $lookup on an array.Note that there will be no duplicate in _cost_center even if some order refers to the same cost_center.The array _cost_center will be a top level field just like load_order, so cost_center documents will not be a sub-document of load_order. You might want to use $addField with $map if you want to embed cost_center into load_order. Personally, I prefer to perform this kind of data cosmetic on the application. Doing the $addField/$map will increased the data transmitted if some load_order refers to the same cost_center.", "username": "steevej" }, { "code": "{\n '$lookup': {\n 'from': 'comments', // this should be your collection name for candidates.\n 'let': {'id_field': {$toObjectId: \"$_id\"}},\n pipeline: [\n {\n $match: {\n $expr: {\n $and: [\n {$eq: [\"$post\", \"$id_field\"]},\n ]\n }\n }\n },\n {$sort: {createdAt: -1}},\n {$limit: 5},\n {\n '$lookup': {\n 'from': 'likes', // this should be your collection name for candidates.\n 'let': {'id_field': {$toObjectId: \"$_id\"}},\n pipeline: [\n {\n $match: {\n $expr: {\n $and: [\n {$eq: [\"$liker\", mongoose.Types.ObjectId(userId)]},\n {$eq: [\"$comment\", \"$id_field\"]},\n ]\n }\n }\n },\n ],\n 'as': 'likes_u'\n }\n },\n ],\n 'as': 'comments'\n }\n}\n", "text": "Hi @steevej,I have the other problem.this is the nested lookup.\nBut comments have the value, likes_us is always empty.", "username": "Bale_Garez" }, { "code": "", "text": "If you read the documentation about let in lookup you will see that your variable id_field should use $$id_field with 2 dollar signs rather than $id_field with a single $ sign.Having to continuously convert data with $toObjectId is detrimental to performance. ObjectId should be stored as ObjectId rather than the string representation. ObjectId is smaller and faster and way faster if you do not need to convert each and every one when you are querying.This thread is 7 months old. You should start a new thread with more details about your issue. You should include sample documents from your source collections. Expected results based on your sample documents are also needed.", "username": "steevej" } ]
Lookup inside nested array object
2022-10-04T08:47:41.162Z
Lookup inside nested array object
3,581
null
[ "aggregation", "queries", "crud" ]
[ { "code": "db.inventory.insertMany( [\n { item: \"journal\", instock: [ { warehouse: \"A\", qty: 5 }, { warehouse: \"C\", qty: 15 } ] },\n { item: \"notebook\", instock: [ { warehouse: \"C\", qty: 5 } ] },\n { item: \"paper\", instock: [ { warehouse: \"A\", qty: 60 }, { warehouse: \"B\", qty: 15 } ] },\n { item: \"planner\", instock: [ { warehouse: \"A\", qty: 40 }, { warehouse: \"B\", qty: 5 } ] },\n { item: \"postcard\", instock: [ { warehouse: \"B\", qty: 15 }, { warehouse: \"C\", qty: 35 } ] }\n]);\n", "text": "This help document shows how to query a nested document. https://www.mongodb.com/docs/manual/tutorial/query-array-of-documents/Taking the same example as in the above link, I have a requirement where I need to find what is the ‘qty’ value for item = “journal” and instock- warehouse = “A”. Please share your ideas on how this can be done efficiently.", "username": "Sai_Deeksh" }, { "code": "qtyinstock.warehousedb.inventory.find(\n {\n item: \"journal\",\n \"instock.warehouse\": \"A\"\n },\n {\n \"instock.$\": 1\n }\n)\n$\"instock.$\": 1instock{\n _id: ObjectId(\"64626b267d66abce46242ac0\"),\n instock: [\n {\n warehouse: 'A',\n qty: 5\n }\n ]\n}\n", "text": "Hello @Sai_Deeksh ,Welcome to The MongoDB Community Forums! To find the value of qty for an item equal to “journal” and instock.warehouse equal to “A”, you can try below queryThis query uses the dot notation to access the nested fields. The $ positional operator in the projection (\"instock.$\": 1) ensures that only the matching element of the instock array is returned in the result.OutputNote: Please test as per ypur use case and update it as per the requirements.Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "Thanks Gaurav. The find command extracts the required embedded document.I also found another thread where an aggregation solution was provided and that worked fine too\nedt-confluence.nam.nsroot.net/confluence/display/C153250A/Important+Links", "username": "Sai_Deeksh" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Find a value within embedded document based on another value from the embedded document
2023-05-12T04:41:17.552Z
Find a value within embedded document based on another value from the embedded document
650
null
[ "compass" ]
[ { "code": "", "text": "Is it possible to restart device sync without termination - after the problem with unsynced documents?if 100k items are unsynced then we get a notification:\n‘A Sync process has failed and cannot be restarted’Then, through mongodb compass, we remove documents from the database that were created with a incorrect data structure (we remove them from normal collections, not from the unsynced_documents collection).We then try to restart the sync, but it fails. We have to do a terminate and enable device sync from scratch - then we lose the data stored locally on the phones while device sync was not working.Are we able to restart sync without terminate so that we don’t lose locally saved data?", "username": "Mateusz_Piwowarski" }, { "code": "", "text": "I have the same issue", "username": "Jakub_Kowalski" }, { "code": "", "text": "Hi, when you click “Restart Sync” does it fail again with an error about hitting the unsyncable document count? The issue is that it is still processing through the oplog to get to the events generated by you making them syncable (or deleting them in this case).If this is the case, you can give me your application ID and I can artificially increase the maximum number of unsyncable documents and you should be able to continue syncing without restarting sync.The app ID is just the ObjectId hex in the URL when you are in the Realm/AppServices UI /groups/${group_id}/apps/${app_id}. This is safe to send here but you can also DM me on the forums if that is preferred.", "username": "Tyler_Kaye" }, { "code": "", "text": "Yes, it fails again. app_id: 63a5cdc25c766c7a43776b1c", "username": "Mateusz_Piwowarski" }, { "code": "", "text": "I have the same issueplease let me know if you find any solution", "username": "Mateusz_Piwowarski" }, { "code": "", "text": "Hi, sync is enabled and running on that application. Did you terminate and re-enable sync?Like I said I can allow you to move past this issue but I am not seeing evidence that the application you mentioned is running into this.", "username": "Tyler_Kaye" }, { "code": "", "text": "Yes, such a problem occurred a few days ago and we wanted to look for solutions for the future and protect ourselves from similar situations.", "username": "Mateusz_Piwowarski" }, { "code": "", "text": "Got it. Yeah, we can always raise these limits for individual customers. We added the hard limit because at a certain point, it is good to fail loudly and let the customer know that a large percentage of their documents are not being synced due to the fact that they do not adhere to the schema.Could I ask what the issue was that led to so many documents being unsyncable? We have had some customers run into this when adding a new required field. We have tried to make this clear in the docs but are thinking through some other ways of preventing this in that case. If you are curious and this is what you ran into here are the docs: https://www.mongodb.com/docs/atlas/app-services/sync/data-model/update-schema/#add-a-required-property", "username": "Tyler_Kaye" }, { "code": "", "text": "There were documents in the database whose _id fields were of type ObjectId instead of string. Unfortunately, there was a vulnerability in our code that created documents without _id.", "username": "Mateusz_Piwowarski" } ]
Is it possible to restart device sync without termination - after the problem with unsynced documents?
2023-05-12T11:43:24.847Z
Is it possible to restart device sync without termination - after the problem with unsynced documents?
785
https://www.mongodb.com/…3_2_1024x729.png
[ "queries", "node-js" ]
[ { "code": "", "text": "I am new to MongoD trying to learn realm. I just tried to create an HTTP Endpoint through this function (I’ve attached the screenshot below), I can’t understant the error when I have already defined it.\nimage1201×856 48.4 KB\nThanks in advance.", "username": "Booby_Trap" }, { "code": "", "text": "did you get an answer to this question I am getting the same error, following along with a tutorial and no idea what I need to do.", "username": "Jordan_Enwright" }, { "code": "// This function is the endpoint's request handler.\nexports = async function(request, response) {\n // Data can be extracted from the request as follows:\n\n // Query params, e.g. '?arg1=hello&arg2=world' => {arg1: \"hello\", arg2: \"world\"}\n const {resultsPerPage = 20, page = 0} = request.query;\n\n // Headers, e.g. {\"Content-Type\": [\"application/json\"]}\n const contentTypes = request.headers[\"Content-Type\"];\n\n // Raw request body (if the client sent one).\n // This is a binary object that can be accessed as a string using .text()\n const reqBody = request.body;\n\n console.log(`resultsPerPage => ${resultsPerPage}, page => ${page}`);\n console.log(\"Content-Type:\", JSON.stringify(contentTypes));\n console.log(\"Request body:\", reqBody);\n \n return {success: true};\n};\n", "text": "Hi @Booby_Trap & @Jordan_Enwright,Apologies for the late reply: we’ve been trying a similar function, as below:and that works perfectly well, so it’s difficult to understand what may be wrong in your implementations without more context (for example, HTTP endpoint settings, Authorization, etc.).Can you please post, in textual format (screenshots don’t help much here), the function that fails in as few steps as possible, including the additional information about the endpoint configuration?", "username": "Paolo_Manna" }, { "code": "// This function is the webhook's request handler.\nexports = async function(payload, response) {\n \n const id = payload.query.id || \"\"\n\n const restaurants = context.services.get(\"mongodb-atlas\").db(\"sample_restaurants\").collection(\"restaurants\");\n\n const pipeline = [\n {\n $match: {\n _id: BSON.ObjectId(id),\n },\n },\n {\n $lookup: {\n from: \"reviews\",\n let: {\n id: \"$_id\",\n },\n pipeline: [\n {\n $match: {\n $expr: {\n $eq: [\"$restaurant_id\", \"$$id\"],\n },\n },\n },\n {\n $sort: {\n date: -1,\n },\n },\n ],\n as: \"reviews\",\n },\n },\n {\n $addFields: {\n reviews: \"$reviews\",\n },\n },\n ]\n \n restaurant = await restaurants.aggregate(pipeline).next()\n restaurant._id = restaurant._id.toString()\n \n restaurant.reviews.forEach(review => {\n review.date = new Date(review.date).toString()\n review._id = review._id.toString();\n });\n return restaurant\n};\n\n error: \n{\"message\":\"Cannot access member 'id' of undefined\",\"name\":\"TypeError\"}\n", "text": "I am following a “FreeCodeCamp” tutorial but am paying for mongodb and want to turn this into a final project for schooli get this error:", "username": "Jordan_Enwright" }, { "code": "exports = async function (payload, response) {\n const reqId = payload.query.id || \"\"\n \n console.log(`Request: ${EJSON.stringify(payload)}`);\n console.log(`Requested id: ${reqId}`);\n \n const restaurants = context.services.get(\"mongodb-atlas\").db(\"sample_restaurants\").collection(\"restaurants\");\n\n const pipeline = [\n {\n $match: {\n _id: BSON.ObjectId(reqId),\n },\n },\n {\n $lookup: {\n from: \"reviews\",\n let: {\n id: \"$_id\"\n },\n pipeline: [\n {\n $match: {\n $expr: {\n $eq: [\"$restaurant_id\", \"$$id\"]\n },\n },\n },\n {\n $sort: {\n date: -1\n },\n },\n ],\n as: \"reviews\"\n },\n },\n {\n $addFields: {\n reviews: \"$reviews\"\n },\n },\n ]\n\n restaurant = await restaurants.aggregate(pipeline).next();\n \n console.log(`Restaurant: ${EJSON.stringify(restaurant)}`);\n \n restaurant._id = restaurant._id.toString()\n\n restaurant.reviews.forEach(review => {\n review.date = new Date(review.date).toString()\n review._id = review._id.toString();\n });\n \n return restaurant\n};\nsample_restaurants", "text": "Hi @Jordan_Enwright,That doesn’t look like is the same as the OP issue: the following runs without errors:(assuming the sample_restaurants database is around, of course)Can you please test it in your environment?", "username": "Paolo_Manna" }, { "code": "", "text": "\nScreenshot from 2023-05-09 11-13-281629×787 60.2 KB\n\nFacing similar error please help with this.Thanks in advance.", "username": "Vipul_S_Patil" }, { "code": "AtlaspublickeyAtlasprivatekeycontext.values.get('Atlaspublickey')context.values.get('Atlasprivatekey')", "text": "Hi @Vipul_S_Patil,This is certainly an issue in your setup: while I can’t refer directly to your app, as this is a public forum, I’ll try to explain a couple of things that needs to be corrected.Once you’ve done that, we can diagnose if and what else would need corrections.", "username": "Paolo_Manna" }, { "code": "", "text": "Thanks… Let me do the Required setup and check.", "username": "Vipul_S_Patil" } ]
"message":"Cannot access member 'resultsPerPage' of undefined","name":"TypeError"
2022-04-18T04:29:08.913Z
&ldquo;message&rdquo;:&rdquo;Cannot access member &lsquo;resultsPerPage&rsquo; of undefined&rdquo;,&rdquo;name&rdquo;:&rdquo;TypeError&rdquo;
3,550
null
[ "aggregation", "queries", "indexes" ]
[ { "code": "db.user.aggregate([\n {\n $lookup: {\n from: \"user\",\n as: \"user\",\n localField: \"_id\",\n foreignField: \"_id\",\n },\n },\n {\n $project: { ... }\n }\n])\nuser_idexplain$lookup$project$lookup$project", "text": "The user collection has 100,000 documents. Without the $lookup stage, it takes 3 seconds. With it, it takes 15 seconds.Is this expected? What can explain this? The _id field is indexed and unique. I know that the index is used due to the explain. And if I were to join on a column that is not index, this query will timeout with an error.So why is that even though an index is used, it still takes 5x longer?EDIT:There is actually a $project stage that comes after the $lookup. However, if I move the $project stage to before the $lookup or actually remove it entirely, the query returns to 3 seconds again.So now the question becomes, what explains the $project stage in all of this?", "username": "Big_Cat_Public_Safety_Act" }, { "code": "", "text": "Hi @Big_Cat_Public_Safety_Act and welcome to MongoDB community forums!!Could you provide the output for explain() of the test aggregations performed on the sample data mentioned earlier to enhance my understanding?\nAdditionally, could you kindly share sample documents from all the necessary collections, which would help me to replicate the issue in my local environment?\nLastly, we would greatly appreciate it if you could inform us of the MongoDB version you are currently using.Regards\nAasawari", "username": "Aasawari" } ]
$lookup stage increased query time by 5x despite joining on _id on the same collection
2023-05-11T03:20:05.576Z
$lookup stage increased query time by 5x despite joining on _id on the same collection
716
null
[ "aggregation", "queries", "crud" ]
[ { "code": " const reAdjustTargetDateOnDrop = async (\n targetStartDate: Date,\n track: string,\n studentId: string\n ) => {\n const result = await PICollection.updateMany(\n {\n trackNo: track,\n startDate: { $gte: targetStartDate },\n studentId: new Realm.BSON.ObjectID(studentId),\n },\n\n [\n {\n $set: {\n startDate: {\n $dateAdd: {\n startDate: \"$startDate\",\n unit: \"day\",\n amount: 5,\n },\n },\n },\n },\n ]\n );\n return result;\n };\n", "text": "Hello, I’m facing a problem with my query. I was trying to update some documents on MongoDB realm. I have a field ‘startDate’ and what I want is to add 5 more days with the startDate of the document. But its not working. I tried this:I’m getting this error: “cannot transform type primitive.D to a BSON Document: WriteArray can only write a Array while positioned on a Element or Value but is positioned on a TopLevel”but when I try this :startDate: new Date(“2023-05-21”), this is working", "username": "Tanjil_Hossain" }, { "code": "date> db.sampleTest.insertOne( { \"title\": \"Sample Document\", \"content\": \"This is a sample document with a Date field.\", \"createdAt\": ISODate(\"2022-05-27T12:30:00Z\")})\n{\n acknowledged: true,\n insertedId: ObjectId(\"64622cf15d279eac2cd4f3a4\")\n}\ndate> db.sampleTest.aggregate( [ { $addFields: { NewDate: { $add: [ \"$createdAt\", 3*24*60*60000 ] } } }])\n[\n {\n _id: ObjectId(\"64622cf15d279eac2cd4f3a4\"),\n title: 'Sample Document',\n content: 'This is a sample document with a Date field.',\n createdAt: ISODate(\"2022-05-27T12:30:00.000Z\"),\n NewDate: ISODate(\"2022-05-30T12:30:00.000Z\")\n }\n]\ndate>\n", "text": "Hi @Tanjil_Hossain and welcome to MongoDB community forums!!and what I want is to add 5 more days with the startDate of the document.If I understand the above statement correctly, are you trying to add days to the “starttDate” field of the document.\nThis would be possible using the aggregation using the $add operator by adding the specific milliseconds to the createdAt fields.Based on the sample data provided:Let us know if you have any further questions.Regards\nAasawari", "username": "Aasawari" } ]
How to add days with dates while using updateMany?
2023-05-11T06:56:42.691Z
How to add days with dates while using updateMany?
759
null
[]
[ { "code": "finOprtn.pfmCtgyLst\nUpdate update = new Update()\n .addToSet(\"finOprtn.pfmCtgyLst.$[pfmCtgy]\", new PfmCty())\n update.filterArray(Criteria.where(\"pfmCtgy.pfmSrc\").ne(\"A\"));\n", "text": "Hello, I’m struggling long time with case to create or update nested object in below structure:A want to create new object pfmCtgy in list pfmCtgyLst only if in list doesn’t exist object with pfmCtgy.pfmSrc=‘A’. If object with pfmSrc exists I want to update its field. I have below code, but doesn’t work:I handled update object in structure, but I can not achieve create object if not exist in array.\nThanks in advance for any help.\nRegards", "username": "12345alcatraz_N_A" }, { "code": "", "text": "Hello @12345alcatraz_N_A ,Welcome to The MongoDB Community Forums! To understand your use-case better, can you please share more details such as:Regards,\nTarun", "username": "Tarun_Gaur" } ]
How to handle case: create or update nested object in Mongo within one upsert
2023-05-11T14:27:55.305Z
How to handle case: create or update nested object in Mongo within one upsert
626
null
[]
[ { "code": "", "text": "Can we pass indexId in the payload while creating a search index through API?", "username": "Shabir_Hamid" }, { "code": "indexIdindexId", "text": "Hi @Shabir_Hamid - Welcome to the community Can we pass indexId in the payload while creating a search index through API?I assume you’re referring to the Create One Atlas Search Index API but please correct me if I am wrong here. Looking through the request body, I am unable to locate any indexId but do see it in the response (when analyzing the response sample on the linked documentation) - In saying so, could you please further expand on the use case here and further clarify the indexId details you’re referring to?Regards,\nJason", "username": "Jason_Tran" } ]
Can we pass indexId while creating an Atlas Search Index
2023-05-15T22:10:18.571Z
Can we pass indexId while creating an Atlas Search Index
433
null
[ "aggregation", "atlas-search" ]
[ { "code": "def test_list_obj(self, test_profile) -> None:\n response = self.endpoint.get()\n assert response.status_code == 200\n assert len(response.json()) == 3\n for received in response.json():\n assert_list_obj_fields(\n received,\n ObjModel.objects.get(id=received[\"id\"]),\n )\n search_query = {\n \"compound\": {\n \"must\": [\n {\"text\": {\"query\": OPEN_OBJ_CONSTANT, \"path\": PATH0}},\n ]\n },\n }\n\n if search_title is not None:\n search_query[\"compound\"][\"should\"] = [\n {\n \"autocomplete\": {\n \"query\": search_title,\n \"path\": PATH1,\n \"fuzzy\": {\"maxEdits\": 1}\n }\n },\n {\n \"autocomplete\": {\n \"query\": search_title,\n \"path\": PATH2,\n \"fuzzy\": {\"maxEdits\": 1}\n }\n },\n ]\n search_query[\"compound\"][\"minimumShouldMatch\"] = 1\n\n \n obj_search = obj_search.aggregate(\n [\n {\"$search\": search_query},\n {\"$skip\": start},\n {\"$limit\": size},\n {\n \"$project\": {\n \"_id\": 0,\n }\n },\n ],\n )\n\n", "text": "Hey guys,So, I use mongo atlas search.\nI have pytest cases all over the place to make sure that things are working as expected.Here is a sample endpoint test caseHere is the sample atlas search query we do in the api:I refactored the code and removed sensitive data, meaning I may have made punctuation errors in doing so.Either way, when I run tests locally, everything and I mean every 120 endpoint test cases related to atlas search work well.However, when these tests are run from bitbucket pipelines, things are uncertain.Bitbucket IPs are added to Mongo’s network connection access point.\nBitbucket devs have gotten back to me showing that there has been no network drops between test cases or during test cases that might have caused the issue.This issue is becoming very annoying. Recently I saw that usually things begin to behave differently after there is a new mongo update.Has anyone ever faced this issue?\nWhat is the resolution? How can I achieve consistency here?", "username": "Aleksandre_Bregadze" }, { "code": "", "text": "Hello @Aleksandre_Bregadze ,I notice you haven’t had a response to this topic yet - were you able to find a solution?It seems that you are experiencing inconsistency in the results of your Atlas Search tests when running them from Bitbucket Pipelines compared to running them locally. While the tests consistently pass on your local environment, they intermittently fail on Bitbucket Pipelines.To better understand the issue, it would be helpful to gather more information:Note: Since the tests consistently pass locally, the possibilities for failure lie within the Bitbucket Pipelines environment itself. It could be related to Bitbucket infrastructure, test setup, or other factors specific to that environment.Best Regards,\nTarun", "username": "Tarun_Gaur" } ]
Unexpected and occasional behavior: bitbucket pipelines + atlas search
2023-05-02T16:18:49.098Z
Unexpected and occasional behavior: bitbucket pipelines + atlas search
714
null
[ "python", "atlas-cluster" ]
[ { "code": "[ERROR] ServerSelectionTimeoutError: cluster0-shard-00-00-....mongodb.net:27017: timed out,cluster0-shard-00-01-....mongodb.net:27017: timed out,cluster0-shard-00-02-....mongodb.net:27017: timed out, Timeout: 30s, Topology Description: <TopologyDescription id: 645dcca1e91881ffaf45360b, topology_type: ReplicaSetNoPrimary, servers: [<ServerDescription ('cluster0-shard-00-00-....mongodb.net', 27017) server_type: Unknown, rtt: None, error=NetworkTimeout('cluster0-shard-00-00-....mongodb.net:27017: timed out')>, <ServerDescription ('cluster0-shard-00-01-....mongodb.net', 27017) server_type: Unknown, rtt: None, error=NetworkTimeout('cluster0-shard-00-01-....mongodb.net:27017: timed out')>, <ServerDescription ('cluster0-shard-00-02-....mongodb.net', 27017) server_type: Unknown, rtt: None, error=NetworkTimeout('cluster0-shard-00-02-....mongodb.net:27017: timed out')>]>\nclient = MongoClient(uri, readPreference='secondaryPreferred')\n\nmongodb+srv://<user>:<pass>@cluster0-....mongodb.net/db\n \"createdAt\": {\n \"$gte\": datetime(year=yesterday.year, month=yesterday.month, day=yesterday.day),\n \"$lt\": datetime(year=today.year, month=today.month, day=today.day)\n },\n", "text": "Hello,I have created an AWS Lambda function in Python 3.9 and pymongo 4.3.3. It should count some documents in two collections and send the counts to slack. I’m facing an issue where at times the first count_documents function call causes the function to hang for 30 seconds and timeout with:Im settingoutside the handler function. This uri is of the format(the db is probably unnecessary, but it is what I use in another app and get it through param store)And running client.db.collection.count_documents() with:to get all documents for yesterday.The lambda function gets as far as the first count_documents call and hangs for the default server selection timeout of 30 seconds and times out and exits the function via error. At times, the function works correctly.I’ve set the function to be invoked via CRON and AWS Lambda retries twice by default, but even that isn’t always enough. It failed last night and this morning invoking manually it worked on the 19th try. Then sometimes it works on the first try without a problem. This makes me believe it is not a networking issue. The Lambda resides in a VPC and the NAT GW ip is allowed in Mongo Atlas.", "username": "Riku" }, { "code": "error=NetworkTimeout('cluster0-shard-00-02-....mongodb.net:27017: timed out')", "text": "error=NetworkTimeout('cluster0-shard-00-02-....mongodb.net:27017: timed out')Hi, this error means the the client was unable to connect to the remote server(s). Is it possible that some of the Lambda machines are assigned IPs that are not allowed to connect to your Atlas cluster? Could you verify this hypothesis by temporarily allowing all IPs in your cluster to see if these connection errors go away?", "username": "Shane" }, { "code": "", "text": "Hey,Thank you for replying. I don’t think I can open the database to the whole world, but it does sound like a reasonable cause. I have tried explicitly setting the Lambda to all existing security groups one by one, but that didn’t seem to have an effect. The Lambda should also have internet access only through NAT gateway which has two associated IP addresses. These are allowed in Atlas.Edit: I realized we have VPC Peering in place, so the NAT GW shouldn’t be in use any way. I will check VPC Flow logs next.", "username": "Riku" }, { "code": "", "text": "Every time the error happens, the call to pymongo.MongoClient is successful as well as “initializing” the collections to collection = client[db_name].collection. The time out happens when trying to actually count the documents. I have a feeling the error message might be a bit misleading here. I’m trying to count over 13 million documents with a filter for just the previous day and getting the error. I’ll see if adding an index helps…", "username": "Riku" }, { "code": "", "text": "From VPC Flow logs I can see, that PyMongo sends a variety of requests and the source ports vary. Source IP is an IP that is accepted on the Atlas side for both a request that succeeds and fails.", "username": "Riku" }, { "code": "", "text": "Not surprisingly (in hindsight) - I was incorrect. I placed my lambda in multiple subnets and didn’t realize some of them didn’t have a route in their routetable to the VPC Peering (which - in addition - I forgot about initially). Specifying the correct subnets fixed my problem. It was indeed a connection issue. Thank you for your help Shane!", "username": "Riku" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Pymongo AWS Lambda ServerSelectionTimeoutError
2023-05-12T06:28:07.561Z
Pymongo AWS Lambda ServerSelectionTimeoutError
909
https://www.mongodb.com/…8_2_1024x201.png
[ "compass", "connecting", "mongodb-shell", "containers" ]
[ { "code": "docker run -d -P --name some-mongo \\\n\t-e MONGO_INITDB_ROOT_USERNAME=mongoadmin \\\n\t-e MONGO_INITDB_ROOT_PASSWORD=secret \\\n\tmongo\n", "text": "Hi everyone\nI’m trying to start a new simple MongoDB instance with Docker. I followed the guide from Docker Official to start a new instance likeI thought for any connections, client must provide a set of username and password. And then I tried to connect MongoDB with DataGrip 2023.1.1 without any username and password, it makes me surprise because the tool still can connect with MongoDB. Did something go wrong? I tried with MongoDB Compass then I can’t access like what I expected. Last try, I connect with mongosh, the shell still make connection successfully.\n\nScreenshot from 2023-05-15 17-16-381191×234 24.6 KB\nCan any explain that for me, what should I do to trully disable connection from annonymous users.\nThank you.", "username": "Tuyen_Nguyen" }, { "code": "hellomongoshdb.hello()mongoshshow collectionsMongoServerError: Command listDatabases requires authentication\nhostInfo", "text": "If you just connect, it will work because there are some basic operations that are unauthenticated, e.g. the hello command that you can try in mongosh with the db.hello() helper.However, as soon as you start doing something that requires auth, you will get an error. For example, in mongosh something as simple as show collections will fail with an error:When Compass connects it does a bunch of thing, e.g. calling hostInfo, listing databases and collections, etc. that require auth and that is the reason why it fails.I don’t know exactly you DataGrip works, but it’s possible that it just establishes the connection and waits idly until there is some user action that needs to trigger an authenticated command.", "username": "Massimiliano_Marcon" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoDB on docker still can access anonymous users with initialed username and password option via DataGrip
2023-05-15T10:15:04.281Z
MongoDB on docker still can access anonymous users with initialed username and password option via DataGrip
1,533
null
[ "mongodb-shell", "atlas-cluster" ]
[ { "code": "", "text": "Hi Team,I am getting error while logging in Atlas Mongo DB “MongoAPIError: URI cannot contain options with no value”Below is the command for which i used to connect DB through Mongoshell1.mongosh “mongodb+srv://hostname.mongodb.net/test?authMechanism=MONGODB-X509&authSource=$external&tls”2.mongosh --tls --tlsCertificateFile <path_to_cert.pem> --tlsCertificateKeyFile <path_to_key.pem> --host .mongodb.netKindly some one help me to overcome this issue.Thanks,\nKrishnakumar K", "username": "KRISHNAKUMAR_K" }, { "code": "", "text": "Hi Team,Now i am able to connect the MongoDB using below command, But i am unable to take dump database getting error can someone share me the mongodump command to take backup the database.mongosh “mongodb+srv://hostname.mongodb.net/?authSource=%24external&authMechanism=MONGODB-X509&retryWrites=true&w=majority” --tls --tlsCertificateKeyFile x5091248.pem", "username": "KRISHNAKUMAR_K" }, { "code": "", "text": "Hi Team,Now i am able to connect the MongoDB using below command, But i am unable to take dump database getting error can someone share me the mongodump command to take backup the database.mongosh “mongodb+srv://hostname.mongodb.net/?authSource=%24external&authMechanism=MONGODB-X509&retryWrites=true&w=majority” --tls --tlsCertificateKeyFile x5091248.pem", "username": "KRISHNAKUMAR_K" }, { "code": "", "text": "Hi @KRISHNAKUMAR_K,But i am unable to take dump database getting errorWhat is the error?can someone share me the mongodump command to take backup the database.Check out the following Connect to a Cluster using Command Line Tools documentation.Regards,\nJason", "username": "Jason_Tran" } ]
Getting Error While connecting MongoDB through x509 Authentication Method
2023-05-12T06:41:13.217Z
Getting Error While connecting MongoDB through x509 Authentication Method
704
https://www.mongodb.com/…_2_1024x576.jpeg
[ "charts" ]
[ { "code": "", "text": "I’m learning about MongoDB charts and I came across the geospatial map for visualizing data. however the world map isn’t being displayed on the geospatial map like in the tutorial video on my chart dashboard. Instead, my data is displayed on a white screen as opposed to it being on a map. see in the image below\nimage1920×1080 180 KB\n", "username": "David_Adeyemi" }, { "code": "", "text": "Thanks for the note, turns out there was a configuration issue on the map tile server. This should be fixed now.", "username": "tomhollander" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoDB charts world map not being displayed at the back of geospatial map
2023-05-14T13:55:02.135Z
MongoDB charts world map not being displayed at the back of geospatial map
762
null
[ "compass" ]
[ { "code": "2023-05-13T16:40:14.778+0000 restoring indexes for collection memaback.issues from metadata\n2023-05-13T16:40:14.778+0000 index: &idx.IndexDocument{Options:primitive.M{\"name\":\"dateIssISO_1\", \"unique\":true, \"v\":2}, Key:primitive.D{primitive.E{Key:\"dateIssISO\", Value:1}}, PartialFilterExpression:primitive.D(nil)}\n2023-05-13T16:40:14.778+0000 index: &idx.IndexDocument{Options:primitive.M{\"name\":\"issueid_-1\", \"unique\":true, \"v\":2}, Key:primitive.D{primitive.E{Key:\"issueid\", Value:-1}}, PartialFilterExpression:primitive.D(nil)}\n2023-05-13T16:40:14.778+0000 run create Index command for indexes: dateIssISO_1, issueid_-1\n", "text": "Dear friends,\nI have installed a new 6.0.6 MongoDB on Debian 11 on prem.\nWhile restoring a db/collection, the documents get restored but I also get an error regarding the indexes of the original collection:what is this saying exactly? I do remember about the existance of an index for those two keys but don’t remember the index type/details (and am bedridden with COVID without an access to a Compass client).\nThanks a lot", "username": "Robert_Alexander" }, { "code": "", "text": "Hi @Robert_AlexanderThe dump/restore should be completed on matching major versions of MongoDB, this could be why you are seeing this issue creating indexes.You could:", "username": "chris" }, { "code": "", "text": "Well actually it seems I just needed to recreate those indexes manually and all is fine. Thanks a lot", "username": "Robert_Alexander" } ]
Slight problem restoring a V4 collection to a V6 server
2023-05-13T16:49:15.926Z
Slight problem restoring a V4 collection to a V6 server
533
https://www.mongodb.com/…_2_1024x576.jpeg
[ "aggregation", "queries", "data-modeling", "hyderabad-mug" ]
[ { "code": "", "text": "\nMUG Hyderabad Event 2 (2)1920×1080 126 KB\nHyderabad MongoDB User Group is organizing a power packed event on Sunday, May 14, 2023, at 11:00 AM IST at Thoughtworks Hyderabad!The theme of the event is to discuss some advanced features of MongoDB​:rocket: such as queryable encryption, aggregation framework, and querying along with some general insights on fundamentals of data models and query languages.We have identified some top notch speakers with vast experience and knowledge, and we can’t tell you how excited we are for this one!To RSVP - Please click on the “ ✓ RSVP ” link at the top of this event page if you plan to attend. The link should change to a green button if you RSVPed correctly. You need to be signed in to access the button.Event Type: In-Person\nLocation: Thoughtworks Hyderabad\nIMG_20230421_195823 (1)1800×1800 274 KB\n\nakhil1920×1920 182 KB\n\n1655742220279800×800 156 KB\n", "username": "Yashraj_Kakkad" }, { "code": "", "text": "Nice.Thanks for the opportunity", "username": "Sridhar_R" }, { "code": "", "text": "Hey @MUG-APAC-Hyderabad\nOnce I RVSP to event to be held on 14 May after 2 days(RVSP) I got an email for confirmation which I missed so is there a chance to get in", "username": "bala_naga_vamsi" }, { "code": "", "text": "Hi All,Please find the presentation used in this Meet-Up for MongoDB Deep Dive.This Slide is privateRegards,\nSrinivas\nhttps://www.linkedin.com/in/mutyalasrinivas/", "username": "Srinivas_Mutyala" }, { "code": "", "text": "Thanks for sharing Srinivas.", "username": "Adi_Seshu" } ]
Hyderabad MUG: Data Modelling and MongoDB Deep Dive
2023-04-24T15:51:58.775Z
Hyderabad MUG: Data Modelling and MongoDB Deep Dive
3,717
null
[ "node-js", "mongoose-odm", "database-tools", "backup" ]
[ { "code": "", "text": "Hi,I am trying to come up with a NodeJS script to migrate data from one collection to another collection in a different database using mongoose. The schema of the target collection maybe different but the collection names are the same.But the issue I am facing is we cannot connect to 2 different DB’s at the same time from a single script and also we cannot create 2 different models with the same name.I have tried “mongodump” and “mongorestore”, but the issue if for some reason a document cannot be migrated to target collection I need the process to stop and show error and rollback. Also I need the flexibility to manipulate data before migrating to target collectionCan anyone please suggest a solution?I found this link online which mentions the above limitationsRegards,\nHarsha", "username": "Harsha_Buggi" }, { "code": "", "text": "Hello @Harsha_Buggi, Welcome to the MongoDB community forum,There are lots of ways to do your requirement in the link that you shared.It will be helpful if you post your try and where you fail, so I can help you with that details.", "username": "turivishal" }, { "code": "const mongoose = require('mongoose');\nconst { Schema } = mongoose;\n\nasync function connectToDb() {\n try {\n await mongoose.connect(\"mongodb+srv://<URL>/<DB_name>?retryWrites=true&w=majority\", {\n useNewUrlParser: true,\n useUnifiedTopology: true\n });\n console.log('DB connection successfull');\n\n const Users = mongoose.model('user', Schema({\n ...\n ...\n }));\n\n users_list = await Users.find()\n console.log(users_list)\n mongoose.connection.close()\n await mongoose.connect(\"mongodb+srv://<URL>/<second_ DB_name>?retryWrites=true&w=majority\", {\n useNewUrlParser: true,\n useUnifiedTopology: true\n });\n console.log('DB 2 connection successfull');\n const Users_DB2 = mongoose.model('user', Schema({\n ...\n ...\n }));\n users_second_list = (await Users_DB2.find()).forEach(function (x) {\n console.log(x)\n console.log('insert to collection in second DB')\n });\n mongoose.connection.close()\n } catch (err) {\n console.log(err);\n }\n}\nconnectToDb();\nOverwriteModelError: Cannot overwrite `user` model once compiled.\n", "text": "Hi Vishal,I couldnt connect to 2 different data bases ( 2 different ATLAS accounts ) in the same script. I tried to do the followingBut I found we cannot compile 2 models with same name. This is the code.This is the error I am gettingThe model for the second DB may or may not have the same schema as first DB. Also if I am able to get past this error I would like to get the code to insert data to second DB where the line I have mentioned as “console.log(‘insert to collection in second DB’)”. If the insert operation fails I need to be able to detect it and take action like roll back the copy operation.Also it would be great if you can tell me how to be connected with both DB when I am migrating data instead of having to close connection to DB.Regards,\nHarsha", "username": "Harsha_Buggi" }, { "code": "const firstConn = mongoose.connect(\"first connection url\");\nconst firstSchema = firstConn.model(\"user\", schema);\n\nconst secondConn = mongoose.connect(\"second connection url\");\nconst secondSchema = secondConn.model(\"user\", schema);\n", "text": "Hello Harsha,You need to store the specific connection in a variable and create a model using that variable, means should be 2 separate variables for both connections.Ex:Well I have created a quick demo in GitHub and it is working, It will just connect 2 separate servers connection and insert a document, you can modify it as per your use, this is just to clear the things how you can implement it.Connect multiple atlas/server connections. Contribute to turivishal/mongodb-multiple-server-connections development by creating an account on GitHub.", "username": "turivishal" }, { "code": "let conn = await mongoose.connect(\"mongodb+srv://<URL_1>/<DB_name>?retryWrites=true&w=majority\", {\n useNewUrlParser: true,\n useUnifiedTopology: true\n });\nconst Users = conn.model('user', Schema({\n ...\n ...\n }));\n\nlet conn_2 = await mongoose.connect(\"mongodb+srv://<URL_2>/<second_ DB_name>?retryWrites=true&w=majority\", {\n useNewUrlParser: true,\n useUnifiedTopology: true\n });\nconst Users_DB2 = conn_2.model('user', Schema({\n ...\n ...\n }));\nOverwriteModelError: Cannot overwrite `user` model once compiled.\n", "text": "Hi Vishal,I tried what you suggested and storing the connection to a variable in my script. But I still get the same error. What am I missing ?Error isI am going through your code in GitHub. But I would appreciate it if you could tell me what is wrong with my script.In my scenario the 2 different DB’s and collections already exist. I am currently unable to read from both using a single script.Regards,\nHarsha", "username": "Harsha_Buggi" }, { "code": "__v:0__v:1", "text": "If mongoose stops you from doing exactly what you want to do you may try connecting to the read-from server using pure MongoDB API and you mongoose for the write-to server.If I understand @turivishal’s example, the schema is the same for both models, while yoursmay not have the same schema as first DBIf you must absolutely read using the mongoose model API, you may try to temporarily rename your write-to model to something like newuser just for the purpose of the migration. Once the migration is done you may use aggregation $out to move the newuser collection to user.Another avenue you may look at is to use mongoose’s schema versioning. I really do not know how this work, but may be you can read with __v:0 and write with __v:1.Issues like this is one reason why I avoid abstraction layers like mongoose. It is so much easier in pure mongo.", "username": "steevej" }, { "code": "createConnectionconnect.jsconst mongoose = require(\"mongoose\");\nmodule.exports = (name, connString) => {\n const db = mongoose.createConnection(connString);\n db.on('connected', () => {\n console.info(`${name} MongoDB connection succeeded!`);\n });\n db.on('error', (err) => {\n console.error(`${name} MongoDB connection failed, ` + err);\n db.close();\n });\n db.on('disconnected', () => {\n console.info(`${name} MongoDB connection disconnected!`);\n });\n process.on('SIGINT', () => {\n mongoose.connection.close().then(() => {\n winston.info(`${name} Mongoose connection disconnected through app termination!`);\n process.exit(0);\n });\n });\n // EXPORT DB OBJECT\n return db;\n}\nindex.js// UPDATE YOUR DEFAULTS\nconst firstDB = {\n name: \"First\", // whatever for display purpose in console\n connStr: \"your first db connection url\",\n db: \"mydb\",\n coll: \"mycoll\"\n};\n// CONNECT DB\nconst firstConn = require(\"./connect\")(firstDB.name, firstDB.connStr);\n{}strict: falseuseDB// CREATE MODEL & SCHEMA\nconst FirstSchema = firstConn.useDb(firstDB.db).model(\n firstDB.coll, \n new mongoose.Schema({}, { strict: false, collection: firstDB.coll })\n);\n// DO TRANSACTIONS WITH SCHEMA\nasync function firstInsert() {\n let doc = await FirstSchema.create({ name: \"test\", calledAt: new Date() });\n console.log(doc);\n}\nfirstInsert();\n// UPDATE YOUR DEFAULTS\nconst secondDB = {\n name: \"Second\", // for demo\n connStr: \"your second db connection url\",\n db: \"mydb\",\n coll: \"mycoll\"\n};\n// CONNECT DB\nconst secondConn = require(\"./connect\")(secondDB.name, secondDB.connStr);\n// CREATE MODEL & SCHEMA\nconst SecondSchema = secondConn.useDb(secondDB.db).model(\n secondDB.coll, \n new mongoose.Schema({}, { strict: false, collection: secondDB.coll })\n);\n// DO TRANSACTIONS WITH SCHEMA\nasync function secondInsert() {\n let doc = await SecondSchema.create({ name: \"test\", calledAt: new Date() });\n console.log(doc);\n}\nsecondInsert();\n", "text": "Hello @Harsha_Buggi,Can you please try the demo that I have provided on GitHub, Sorry I did not explain the code here, let me do that first,You have to use the createConnection method to connect with the specific MongoDB server, where I have created a common connect.js file to reuse this code for the second MongoDB server connection.This will return a connection object when you import it in the main file.Let’s move to the main index.js file, Update you constantsLet’s import connect file and connect your first db,Create model and schema, as you said you have different schemas for both collections so this will create a nonstrict schema, by providing empty {} schema property and strict: false in schema options. and the useDB method will select the database for real-time operationsNow you can do any operation by the above schema object, as I did insert the operation,You can repeat the same steps that we did above for the second MongoDB server connection, you can see my code in GitHub that I have already done, and also I am adding here by combining them all together,", "username": "turivishal" }, { "code": "let doc = await FirstSchema.find();\nlet doc = await SecondSchema.find();\nnew mongoose.Schema({}, { strict: false, collection: firstDB.coll })\n", "text": "HI @turivishal ,Thank you so much !!!I tried your code and it works. I am able to connnect to 2 different DB’s in 2 different ATLAS accounts and read “users” collection from both and display data.I just replaced the connection strings and DB and collection names in your code and instead of writing I did a .find() and its working.I also tried to add an actual schema object where you are passing an empty one in this line. I will try to check if it works if I pass different schemas here. Would you be able to tell me if it will work ?I just tried adding the property “{ strict: false, collection: ‘users’ }” to the “Schema” in my script but it didnt work.I still have a long way to go (if you see my requirement from my first post) and I am going over your code to understand all the parts.@steevej\nThank you so much for your suggestions !!!\nI am sure I will use them at some point …\nIs there any readymade script I can use using MongoDB API ? I am just trying to assemble something that works and meets my requirement.Regards,\nHarsha", "username": "Harsha_Buggi" }, { "code": ".Schema({},strict: false.Schema({ // your schema properties }, { collection: \"collection name\" })", "text": "Hello @Harsha_Buggi,Glad you solved the connection issues, I think now it is easy, you just need to put your logic of migration and insert/update it into a new collection or log the error.I also tried to add an actual schema object where you are passing an empty one in this line. I will try to check if it works if I pass different schemas here. Would you be able to tell me if it will work ?It should work, make sure you are using the latest version of Mongoose npm, the purpose of .Schema({}, and strict: false is to make schema flexible, there is no restriction to insert new properties in the collection, but it is okay if you do fix schema properties, it will work. like this .Schema({ // your schema properties }, { collection: \"collection name\" })Is there any readymade script I can use using MongoDB API ? I am just trying to assemble something that works and meets my requirement.I am not sure if is it available or not…", "username": "turivishal" }, { "code": "async function secondInsert() {\n source_data = await firstInsert();\n source_data.forEach(async element => {\n let result = await SecondSchema.collection.insertOne(element);\n console.log(result)\n });\n}\n", "text": "@turivishal, I am still understanding the parts. But this is exactly what I needed and works like a charm. This is how I am currently using it to copy data from one collection to another (I am using the function names you gave - but you get the idea … )Thank you.I will try to keep you posted on its development.Regards,\nHarsha", "username": "Harsha_Buggi" }, { "code": "", "text": "@turivishal could you tell me how to get a list of all collections in the DB’s in your script ? I am unable to find how to do it.I searched online and got this page but couldnt get anything that worked.Regards,\nHarsha", "username": "Harsha_Buggi" }, { "code": "listCollectionsconst firstUseDB = firstConn.useDb(firstDB.db);\nfirstConn.on('open', () => {\n firstUseDB.db.listCollections().toArray().then((names) => {\n console.log(names)\n })\n .catch((err)=>{\n return err;\n });\n});\n", "text": "Hello Harsha,You can use listCollections method to get collections, before that you need to update the below line because I have done it directly with a model in my script,Now check is connection open then get the list of collections,", "username": "turivishal" }, { "code": "async function firstInsert() {\n my_db = firstConn.useDb(firstDB.db)\n console.log(my_db.listCollections())\n let doc = await FirstSchema.find();\n //console.log(doc);\n return doc\n}\n", "text": "@turivishal thanks again !!! This works. I was earlier tried to do something like this… but it wouldn’t work and I get error “UnhandledPromiseRejectionWarning: TypeError: my_db.listCollections is not a function”. I thought it’s enought to be connected to DB. It appears we need to wait for “open” event too. I am trying to look it up online.", "username": "Harsha_Buggi" } ]
MongoDB migration
2023-05-03T17:04:31.768Z
MongoDB migration
2,890
null
[]
[ { "code": "", "text": "Due to failed payment attempts my account is suspended and this has affected all users in production.\nNow i have registered a new credit card. But still the account is suspended. There is no option to manually retry the failed payments.\nPlease help me getting my account.", "username": "Developer_Team" }, { "code": "", "text": "Looking at the in-app support there is a soltion for viewing and paying existing invoices.In-app support when logged into cloud.mongodb.com\n\nimage390×677 39.2 KB\n\nimage654×797 117 KB\n", "username": "chris" } ]
URGENT: My Production account is suspended
2023-05-13T05:34:59.487Z
URGENT: My Production account is suspended
784
null
[ "spark-connector" ]
[ { "code": "# Create a SparkSession\nspark = SparkSession.builder.appName(\"SparkSQL\").getOrCreate()\n\nspark = SparkSession \\\n .builder \\\n .appName(\"SparkSQL\") \\\n .config(\"spark.mongodb.input.uri\", \"mongodb://127.0.0.1/client.coll\") \\\n .config(\"spark.mongodb.output.uri\", \"mongodb://127.0.0.1/test.coll\") \\\n .getOrCreate()\n\ndf = spark.read.format(\"mongo\").load()\njava.lang.NoClassDefFoundError: org/bson/conversions/Bson", "text": "I am trying to write a basic pyspark script to connect to MongoDB. I am using Spark 3.1.2 and MongoDb driver 3.2.2.My code is:\nfrom pyspark.sql import SparkSessionWhen I execute in Pyspark with /usr/local/spark/bin/pyspark --packages org.mongodb.spark:mongo-spark-connector_2.12:3.0.1 I get:java.lang.NoClassDefFoundError: org/bson/conversions/BsonI am very new to Spark. Could someone please help me understand how to install the missing Bson reference? I couldn’t see this in the sample code or MongoDB PySpark documentation.Thanks in advance,Ben.", "username": "Ben_Halicki" }, { "code": "", "text": "Looks like you don’t have all the dependencies installed for the MongoDB Spark Connector.I do have a docker environment that will spin up spark, mongodb and a jypter notebook. This will get you up and running quickly.Docker environment that spins up MongoDB replica set, Spark, and Jupyter Lab. Example code uses PySpark and the MongoDB Spark Connector. - GitHub - RWaltersMA/mongo-spark-jupyter: Docker environme...", "username": "Robert_Walters" }, { "code": "", "text": "Hi Robert, thank you for your reply. My apologies for not getting back to you earlier, I had forgotten about this post.Thanks for the link to your Docker image, i’ll take a look. Do you have any instructions on how to setup all the dependencies? I have been through the MongoDB Spark documentation and couldn’t find a workable solution.Thanks in advance,Ben.", "username": "Ben_Halicki" }, { "code": "", "text": "Hi\nAre you able to resolve this issue.\nI am also facing the same issue. Not finding any suitable solution yet\nThanks\nSaswata Dutta", "username": "Saswata_Dutta" }, { "code": "", "text": "Hi Saswata,I don’t remember exactly what the solution was, but I think it might have been an issue with my environment. I would try a clean installation if you can. If you are still having issues, contact me back and i’ll share some pyspark with a mongodb connection and commands for how I submit to the cluster.Kind regards,Ben.", "username": "Ben_Halicki" }, { "code": "", "text": "Hi Ben\nI am using AWS EMR instance where i installed mongodb 6.\nI am using spark 3 up. I have used mongodb-spark connectors as provided by mongodb.\nI tried all different option that is availabel in documents. But not luck.\nI am trying to connect from notebook\nCan you please help\nThanks\nSaswata", "username": "Saswata_Dutta" }, { "code": "", "text": "Hi Saswata,I’m not familiar with AWS EMR so probably not much help to you. The only thing I can think of, is when I submit a job to the cluster I have to specify what packages to load. For example, this is the command I execute:\nspark-submit --packages org.mongodb.spark:mongo-spark-connector_2.12:3.0.1 --driver-memory 6G --master spark://192.168.1.13:7077 ./some_pyspark.pyIs it possible that when you execute the notebook, it isn’t including the mongodb packages? Are you able to validate your solution outside of AWS (ie a locally installed cluster & mongodb instance)?Cheers,Ben.", "username": "Ben_Halicki" }, { "code": "", "text": "Dear nawaz_nawaz, your post looks a lot like a ChatGPT text.Could you please clarify the pertinence of your answer?It is clear from the previous posts on this thread that the people involved know what is PySpark.", "username": "steevej" }, { "code": "", "text": "Hi Nawaz, using ChatGPT to reply to legitimate questions is inappropriate IMO. I have flagged this response to admins.Thanks,Ben.", "username": "Ben_Halicki" } ]
PySpark MongoDb Connector
2021-09-17T02:07:34.183Z
PySpark MongoDb Connector
5,540
null
[ "node-js", "crud" ]
[ { "code": "const cartSchema = new Schema<ICart>(\n {\n orders: {\n type: [\n {\n hotelId: {\n type: SchemaTypes.ObjectId,\n required: true,\n ref: 'hotels',\n },\n startDate: { type: Date, required: true },\n endDate: { type: Date, required: true },\n rooms: {\n type: [\n {\n roomTypeId: {\n type: SchemaTypes.ObjectId,\n required: true,\n ref: 'roomTypes',\n },\n quantity: {\n type: Number,\n min: 1,\n required: true,\n },\n },\n ],\n required: true,\n min: 1,\n },\n },\n ],\n required: true,\n max: 20,\n min: 1,\n },\n userId: {\n type: SchemaTypes.ObjectId,\n index: true,\n unique: true,\n required: true,\n ref: 'users',\n },\n isActive: { type: Boolean, default: true, required: true },\n },\n { timestamps: true, collection: 'carts' },\n);\n\nconst Cart = model<ICart>('carts', cartSchema);\n\nCart .findOneAndUpdate(\n {\n userId,\n orders: {\n $elemMatch: {\n hotelId: newOrder.hotelId,\n startDate: newOrder.startDate,\n endDate: newOrder.endDate,\n },\n },\n },\n {\n $set: { 'orders.$': newOrder },\n },\n { new: true },\n );\n\n", "text": "i have modeli want if orders have hoteiId startDate, endDate same value in newOrder it will update, if not , it insert new newOrder , but not work", "username": "Anh_Tu_n_Hu_nh_Van" }, { "code": "[\n {\n _id: ObjectId(\"645a057f469b1090934a6747\"),\n orders: [\n {\n hotelId: ObjectId(\"61a3a731c0a774594ee276d5\"),\n startDate: ISODate(\"2023-05-14T00:00:00.000Z\"),\n endDate: ISODate(\"2023-05-15T00:00:00.000Z\"),\n rooms: [\n {\n roomTypeId: ObjectId(\"61a3a731c0a774594ee276d4\"),\n quantity: 2\n }\n ]\n },\n {\n hotelId: ObjectId(\"61a3a731c0a774594ee276d6\"),\n startDate: ISODate(\"2023-05-15T00:00:00.000Z\"),\n endDate: ISODate(\"2023-05-16T00:00:00.000Z\"),\n rooms: [\n {\n roomTypeId: ObjectId(\"61a3a731c0a774594ee276d7\"),\n quantity: 1\n }\n ]\n }\n ],\n userId: ObjectId(\"61a3a731c0a774594ee276d8\"),\n isActive: true,\n createdAt: ISODate(\"2023-05-09T14:30:00.000Z\"),\n updatedAt: ISODate(\"2023-05-09T14:30:00.000Z\")\n }\n]\nconst newOrder = {\n hotelId: ObjectId('61a3a731c0a774594ee276dA'), // the ID of the new hotel\n startDate: ISODate('2023-05-17T00:00:00.000Z'),\n endDate: ISODate('2023-05-18T00:00:00.000Z'),\n rooms: [\n {\n roomTypeId: ObjectId('61a3a731c0a774594ee276dB'),\n quantity: 1,\n },\n ],\n};\nconst filter = {\n 'orders.hotelId': { $ne: newOrder.hotelId },\n 'orders.startDate': { $ne: newOrder.startDate },\n 'orders.endDate': { $ne: newOrder.endDate },\n};\nconst update = {\n $push: {\n orders: newOrder,\n },\n};\ndb.post225342_02.updateOne( filter, update)", "text": "Hi @Anh_Tu_n_Hu_nh_Van and welcome to MongoDB community forums!!Based on the schema design, I tried to create the sample document in my local environment:\nThe sample document looks like the following:i want if orders have hoteiId startDate, endDate same value in newOrder it will update, if not , it insert new newOrder , but not workand if I understand the above statement correctly, you wish to add a new order to the order array if the hotelID, startDate and EndDate not have the match.Based on my understanding, here is how the update can be performed.db.post225342_02.updateOne( filter, update)Let us know if the above query does not work for the requirements.Regards\nAasawari", "username": "Aasawari" }, { "code": " // update order if order exist\n const cartDb = await cartService.findOne({ userId: userId });\n\n const updateCartOfUser = await cartService.findOneAndUpdate(\n {\n userId,\n 'orders.hotelId': newOrder.hotelId,\n 'orders.startDate': newOrder.startDate,\n 'orders.endDate': newOrder.endDate,\n },\n {\n $set: { 'orders.$': newOrder, isActive: true },\n }\n );\n\n // if not add order\n if (!updateCartOfUser) {\n cartDb.orders.push(newOrder);\n\n await cartDb.save();\n return oke(newOrder);\n }\n", "text": "\nimage1557×310 36.3 KB\n\nit can not update element 8 , even though it’s the same hotelId startDate endDatemy solutionbut i want a method that can just update or add a new element", "username": "Anh_Tu_n_Hu_nh_Van" } ]
How update object in array or add new value in array
2023-05-08T10:44:45.261Z
How update object in array or add new value in array
711
https://www.mongodb.com/…69a6a61658f6.png
[ "aggregation", "database-tools", "backup", "views" ]
[ { "code": "", "text": "Hello,There is a collection ABC with ~350 Millions documents.I want to partition ABC based on one key that has 7 unique values (say - v1, v2, v3, v4, v5, v6, v7) throughout the collection.\nI.e. Want to partition ABC into 7 different collections (abc_v1, abc_v2, abc_v3, abc_v4, abc_v5, abc_v6, abc_v7)I don’t want to go for sharding as it will double the cost & we have not reached to that level yet.\nI can easily sustain for next 2 years if we are partitioning the ABC into 7 different collections.I have completed a POC on taking query based mongodump & restore it into the separate collection.\ni.e. I took a query based mongodump (for all the docs of that has v1) & restored it into abc_v1.\nI did the same 7 times. I have attached the image of POC result.I tried mongodump, mongoexport, $out/$merge - out of all mongodump is giving me better result but that is not enough as even mongodump/restore is taking huge time.am I doing something wrong or what is the optimised way to partition such huge collection?", "username": "Ashish_Zanwar" }, { "code": "", "text": "Why do you want to partition your collection?Each collection needs at least 2 files. So for this single collection will go from 2 files to 14 files for the same data. Your code will not be more complicated because it will have to determine which collection to query. You gonna use more resources for the same amount of data. What is your goal?", "username": "steevej" }, { "code": "*Why do you want to partition your collection?*[{\n \"field_1\" : 111,\n \"field_2\" : 222,\n \"field_3\" : \"v1\", .......\n},\n{\n \"field_1\" : 333,\n \"field_2\" : 444,\n \"field_3\" : \"v2\", .......\n},\n{\n \"field_1\" : 555,\n \"field_2\" : 666,\n \"field_3\" : \"v3\", ......\n} ....... 350 M]\n", "text": "*Why do you want to partition your collection?*As I have mentioned it has 350 Millions documents & it’s increasing day by day.\nSample documents of ABC collectionWe have 13 indexes as we can’t fire a single query without index so I can’t reduce the indexes.\nThat it affecting on writes (insert/update) a lot.As of now all the insert, updates & reads are happening based on 7 unique values only so instead of firing query on ABC with {“field_3”: v1} we can fire it on abc_v1 collectionSee we have very clear sight onI am looking for optimised way to split 1 collection into 7 collections.", "username": "Ashish_Zanwar" }, { "code": "", "text": "We have 13 indexes as we can’t fire a single query without index so I can’t reduce the indexes.Will the 13 indexes still exist in the 7 partitioned collections? If the 13 indexes still exist in each collection, then you end up with 91 indexes and files. Updating an index is O(log n) so if you still update 13 indexes per collection you are roughly looking at the same amount of work.Your splitting has some potential performance improvement if and only if you can eliminate some indexes for some of the collections. For example, abc_v4 might not need all the 13 original indexes, so an update on abc_v4 will touch less indexes. This is assuming that some indexes are not needed for abc_v4.If you think that updating indexes is the culprit, then optimizing the indexes might be a better avenue. Since all your queries including field_3 with a specific value (v1 to v7), field_3 is probably (should be) present in all indexes. One way to reduce the number of indexes to update is to have more indexes but partial indexes. This is with, again, assuming that some indexes are not needed for some collections. The partialFilterExpression will be on field_3. Basically, you would partition the indexes without partitioning the data.I am surprised thatmongodump is giving me better resultcompared to $out/$merge as there is extra disk I/O involve with mongodump/restore and potentially network I/O if mongodump/restore is performed from another machine. I think $merge will be a very bad choice for your use-case since it compares existing documents, while $out does not perform this check. I really cannot understand how dump/restore could be faster than $out. So if your performance numbers are based on $merge rather than $out, then you should revise them. What ever path you choose create your indexes after the partitioning.As I wrote disk I/O above, I thought that partitioning per database rather than per collection could be much better as you could leverage the directoryPerDB option where you make each database directory be a symlink to 7 different disks.", "username": "steevej" }, { "code": "Partial indexes - \n", "text": "7 partitioned collections will have 2-4 indexes/collection & there will be a huge performance improvement.We have already created 7 collections & added 1 document in it with required indexes such that when we insert documents into the collection, mongodb will take care of updating/sorting the indexes as per the newly inserted documents.Earlier I thought to create partial indexes only but you can consider -We have deployed ATLAS Cluster on AWS & we have created a large EC2 instance (to mongo dump/restore) in the same region.\nI think that’s the reason network latency is not there in case of mongo dump/restore.I didn’t understand how mongo dump/restore will take more IOPS as compare to $out?\nI mean in both the cases (mongo dump/restore & $out) we are reading the same data & writing into the separate collections.That should take equal number of IOPS right?", "username": "Ashish_Zanwar" }, { "code": "", "text": "7 partitioned collections will have 2-4 indexes/collectionThis is indeed the key.I didn’t understand how mongo dump/restore will take more IOPS as compare to $out?I mean in both the cases (mongo dump/restore & $out) we are reading the same data & writing into the separate collections.I might have wrongly assume that your mongodump is written to disk and mongorestore read from disk, hence more disk I/O.But still, with $out you only have mongod doing all the work. With mongodump/restore, you will have 3 processes so 3 times the amount of context switch. With $out the data stays on the server, with dump/restore data is exchanged from mongod to mongodump, then from mongodump to mongorestore and finally from mongorestore to mongod. That is a lot of extra IPC compared to $out. So I am really surprised. But that is the thing about performances, our intuition is often wrong.", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Partition a collection innto multiple collections
2023-05-11T07:38:56.686Z
Partition a collection innto multiple collections
1,044
null
[ "connector-for-bi" ]
[ { "code": "", "text": "Hi everyone,I’m currently working on a project that involves connecting MongoDB to a BI tool using the MongoDB BI Connector and the MongoDB ODBC Driver. I’m wondering if anyone has any information on the compatibility of these tools, or has encountered any issues when using them together.Here are the versions of the tools I used and it worked:I followed this tutorial and it works but I want to know more about the compatibilities.\n( Connecting MongoDB hosted in Mongo Atlas with PowerBI (including M0 clusters) | by Luis Mego | Qimi TechBlog | MediumBut I’m wondering why it is not working with newer versionsI’ve done some research on the compatibility of these tools, but I haven’t been able to find any clear answers. I’m wondering if anyone has successfully used these tools together, and could share their experience with me.Alternatively, if anyone has any information on the compatibility of these tools, or has encountered any issues when using them together, I would greatly appreciate any guidance or advice.Thank you in advance for your help!", "username": "Liber_Darius" }, { "code": "", "text": "Welcome @Liber_Darius and thanks for posting. My name is Alexi and I am the Product Manager for the BI Connector and the new Atlas SQL Interface. What BI tool are you trying to use with the BI Connector/ODBC Driver? And you running an Atlas instance or on-prem?Ideally we would have you connecting with the current versions of the BI Connector and ODBC Driver. Alternatively, we are in public preview with the Atlas SQL Interface where we have a JDBC Driver, Tableau Custom Connector and we will be releasing an ODBC Driver and Power BI Connector soon.If you can share the BI Tool that you are using, I can guide you on compatibility and the options you may have going forward.Best,\nAlexi\[email protected]", "username": "Alexi_Antonino" }, { "code": "", "text": "Hello, the BI tool that I am using is PowerBI and I’m running an atlas instance.", "username": "Liber_Darius" }, { "code": "", "text": "ok great! The current BI Connector + ODBC Driver does indeed work with Power BI Desktop and Gateway to refresh in the Power BI Cloud/web. Also, we are currently testing a new MongoDB Atlas Power BI custom connector - this is something you might be able to take advantage of as well.Next steps would be to share the error you are receiving or email me and we setup a call to figure out what is going on.\[email protected]", "username": "Alexi_Antonino" }, { "code": "", "text": "Thank you very much for your replies. I’m not facing any errors, I just want to know more about the compatibility of the versions of the BI Connector with the ODBC driver because I could not find that in the documentation.", "username": "Liber_Darius" }, { "code": "", "text": "MongoDB BI Connector ODBC Driver — MongoDB Connector for BI they added a compatibility section on the documentation.", "username": "Liber_Darius" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Compatibility between MongoDB BI Connector and ODBC Driver
2023-04-06T13:57:18.180Z
Compatibility between MongoDB BI Connector and ODBC Driver
1,161
null
[ "queries", "data-modeling" ]
[ { "code": "", "text": "While trying to create charts unable to access the fields of the reference field. For example in collection A i have a reference field collection B. And I am unable to create a chart which has field A1, A2, A3(from collection A ) and B1, B2, B3 from collection B in a single chart.", "username": "Shankar_Reddy" }, { "code": "", "text": "Hi @Shankar_Reddy , please see this page for details on how to do this:\nAdd a Lookup Field — MongoDB Charts", "username": "tomhollander" }, { "code": "", "text": "I tried this as its a reference field when adding the id from the other collection it returns null for all rows.", "username": "Shankar_Reddy" }, { "code": "", "text": "@tomhollander On add a added field to get the id from the reference field it does not show the lookup field option.\n\nimage611×909 67.9 KB\n", "username": "Shankar_Reddy" }, { "code": "", "text": "This is not how you add lookup fields. You need to use the … menu on the field you want to use to look up matching values in the other collection. On that menu you will see a Lookup Fields option.", "username": "tomhollander" }, { "code": "", "text": "lookup field works if id is stored and not the ref field of the collection\n\nimage1811×787 147 KB\n", "username": "Shankar_Reddy" }, { "code": "", "text": "@tomhollander I think the issue might be that for the reference field in the fields it should show it as a embedded field with $id as the field.\nNow it just shows the name and on accessing gives object object.\n\nimage1811×787 147 KB\n", "username": "Shankar_Reddy" } ]
How to get access to the other fields of the reference field in the mongo charts?
2023-05-10T06:17:36.687Z
How to get access to the other fields of the reference field in the mongo charts?
919
null
[ "database-tools", "backup" ]
[ { "code": "", "text": "Hello,\nI am trying to use the command line cmd mongorestore to restore a backup of a mongoDB database I have. Will this restore not work with the Atlas Cluster? The original DB was just a normal localized server database.The error I am getting is:\ndon’t know what to do with file “/dev/channels.bson”, skipping…\ndon’t know what to do with file “/dev/channels.metadata.json”, skipping…Any help would help! Thanks", "username": "Joe" }, { "code": "mongorestoremongodumpmongodump", "text": "It would help us help you if you provide the following information:Version of the Atlas cluster you are restoring to\nVersion of mongorestore you are using\nWas mongodump the same version?\nExact command you are running when you get this error.Usually this happens when you point mongodump at a subdirectory inside the full dump without specifying which namespace you want it to restore to, but I’m just guessing - if you provide full details, it may be more clear what the issue is.Asya", "username": "Asya_Kamsky" }, { "code": "", "text": "Use -db flag in your mongorestore command", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Thanks, it worked for me as well. But we need to pass command with -d insteed of -db.", "username": "Chetan_Patil" }, { "code": "", "text": "-d databaseName, now data will be restored in databaseName database.", "username": "Chetan_Patil" }, { "code": "", "text": "@Ramachandra_Tummala I am facing same issue, i have used below command to restoremongorestore --nsInclude dbname --ssl --host=“hostname:27017” --usernam\ne= --password= --authenticationDatabase=admin --sslCAFile rds-combined-ca-bundle.pem --gzip /path/Can you please help me on this issue?", "username": "KRISHNAKUMAR_K" }, { "code": "", "text": "just dbname will not work for nsInclude\n–nsInclude=Try nsInclude=“dbname.*”", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Hi @Ramachandra_TummalaI have tried as you said the above command i am getting same error, Please find the below restore command for your reference.mongorestore --nsInclude=\"dbname\" --ssl --host=“hostname:27017” --username=username --password=password --authenticationDatabase=admin --sslCAFile rds-combined-ca-bundle.pem /root/livedb/*", "username": "KRISHNAKUMAR_K" }, { "code": "", "text": "Where is dbname.* in your commandIf your are restoring one collection\nnsInclude=dbname.collection_name\nIf it is full DB you have to use wildcard (*)\nPlease check this link having some examples", "username": "Ramachandra_Tummala" }, { "code": "", "text": "mongorestore --nsInclude=“dbname.*” --ssl --host=“hostname:27017” --username=username --password=password --authenticationDatabase=admin --sslCAFile rds-combined-ca-bundle.pem /root/livedb/I have used above command, and getting same thing, may be version problem?", "username": "KRISHNAKUMAR_K" } ]
Mongorestore error "don't know what to do with file... .bjson and .json"
2022-02-14T18:25:36.138Z
Mongorestore error &ldquo;don&rsquo;t know what to do with file&hellip; .bjson and .json&rdquo;
22,316
null
[]
[ { "code": "atlas auth registerSuccessfully logged in as <my email>\nYou don't seem to have access to any project\n? Do you want to enter the Project ID manually? (y/N)\natlas setup --clusterName myAtlasClusterEDU --provider AWS --currentIp --skipSampleData --username myAtlasDBUser --password myatlas-001 | tee atlas_cluster_details.txt\nYou are already authenticated with an account (<my email>).\nRun \"atlas auth setup --profile <profile_name>\" to create a new Atlas account on a new Atlas CLI profile.\nError: please make sure to select or add an organization and project to the profile: default\n", "text": "This lab is divided into 3 parts.\nIn the first part, I started with the CLI by running:atlas auth registerI created a new account, I used the verification code, and everything went well.Then in the second part, I had to use their query to create the project MDB_EDU. Everything goes well again and it created 2 projects there in my organization. I know the IDE has a timer, however, I was unaware that this timer was also running between the steps. So before starting the third part of the lab, I left the pc to do something and when I came back it said my IDE timer ran out and I would have to start everything again.Now, after the CLI part, I get the message:I ignore it (since there’s no existing project) and I click on Check. Everything goes well and I move to the second step.Then in the second part, I had to use their query to create the project. But when I try to execute it, I got an error. So I go back to my org, I terminated the clusters, and then deleted the projects as I believe it was conflicting since it was trying to create a project that already exists. However, now when I run:I’m getting the following message:I already tried “atlas auth setup --profile <profile_name>”, I don’t know what is this default but I also tried to select my organization via its ID, I tried many things, I asked chatgpt and tried every single option it gave me and I’m out of options. I don’t have remaining emails to create another account and I can’t even delete my account to create another one because MongoDB doesn’t allow me to create another account with the email of the deleted one. I don’t know why this is not working since I have no projects there, I don’t know what is happening. This is so frustrating and I already lost so much time in here and I can’t move on with the course.Does anyone know how to solve this issue?", "username": "dani_costa" }, { "code": "", "text": "You have to logout (atlas logout) and then paste this command once again.", "username": "Piotr_Komisarski" }, { "code": "", "text": "This topic was automatically closed 60 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
LESSON 2: CREATING AND DEPLOYING AT ATLAS CLUSTER - doesn't create the project nor the cluster
2023-05-11T14:32:55.226Z
LESSON 2: CREATING AND DEPLOYING AT ATLAS CLUSTER - doesn&rsquo;t create the project nor the cluster
1,208
https://www.mongodb.com/…5_2_1024x321.png
[ "java" ]
[ { "code": "", "text": "I am using mongodb with java and when I open the server I get several mongo logs spammed by the console. I want to hide them because it is not very readable but I don’t know how and I have tried things but nothing. Any solution?\n\nimage1905×598 54 KB\n", "username": "Onova" }, { "code": "grep -v '\\[INFO\\]'|less", "text": "well, if you don’t generally care about [INFO] messages you can trygrep -v '\\[INFO\\]' somelogfile.log |less", "username": "Jack_Woehr" } ]
Problems hiding logs in console
2023-05-12T16:24:40.621Z
Problems hiding logs in console
455
null
[ "aggregation", "queries", "node-js", "data-modeling" ]
[ { "code": "", "text": "I am building an APP where users generate content, which is feed to other users. Users also have the option to block users and / or content.I need to know how to model this system that can scale.Let’s suppose an extreme example where a user has blocked over 1 million other users.Each time my server has to generate the list of content to return to this user, the query would have to check with this user’s array of 1 million items to check if the recommended content is blocked or not.How do modern web APPs solve this problem? I cannot think of any way scale when we consider these extreme examples.", "username": "Big_Cat_Public_Safety_Act" }, { "code": "", "text": "this userwhich user?with this user’s array of 1 million itemsWhy such a big array? why not use 1million rows for it?You know viewer id (or current user id) and the poster id, you can just do a query to check (view id, poster id) in database which is supposed to have an index.", "username": "Kobe_W" }, { "code": "$lookup", "text": "That is my current solution. It is one block per row. This means that I have do a $lookup to check for the blocked users.", "username": "Big_Cat_Public_Safety_Act" } ]
How to model blocked users?
2023-05-12T08:18:28.054Z
How to model blocked users?
927
https://www.mongodb.com/…4_2_1023x535.png
[ "node-js", "mongoose-odm", "connecting" ]
[ { "code": "", "text": "error1841×962 85 KB", "username": "VEDANT_VYAS" }, { "code": "", "text": "There should be more information further up the error trace.Are you sure mongod is running ?", "username": "chris" } ]
I am unable to connect mongodb with node js
2023-05-12T08:40:10.246Z
I am unable to connect mongodb with node js
676
null
[ "database-tools", "backup" ]
[ { "code": "", "text": "Hi Team,I have taken backup from mongodump from atlas mongo DB, while i restore the database to AWS Document DB i am getting below error.don’t know what to do with file “/root/path/filename.metadata.json.gz”, skippingand also i have add --db --nsInclude options also but getting same issue. How can i solve this issue. Kindly someone help me to overcome the issue.Thanks,\nKrishnakumar K", "username": "KRISHNAKUMAR_K" }, { "code": "", "text": "Atlas Mongo DB Version is 6.0\nAWS Document DB Version is 5.0I have used to below command for restoremongorestore --ssl --host=“:27017” --username=–password= --authenticationDatabase=admin --sslCAFile rds-combined-ca-bundle.pem /home/ubuntu/I have added -d --db and –nsInclude but getting same error", "username": "KRISHNAKUMAR_K" }, { "code": "", "text": "Documentdb is not MongoDB. mongodb-database-tools are developed and tested for use with mongodb, I would not expect interoperability.", "username": "chris" } ]
Don't know what to do with file "/root/path/filename.metadata.json.gz", skipping
2023-05-12T11:01:18.871Z
Don&rsquo;t know what to do with file &ldquo;/root/path/filename.metadata.json.gz&rdquo;, skipping
1,112
null
[]
[ { "code": "", "text": "MongoDB server continuously utilizing 15-16 GB ram of total 32 GB ram in windows enviornment.\nThere is continues selection and insertion operation is going on MongoDB collection which contains more than 200 million of documents.\nSo How we can reduce the ram utilization of MongoDB server ?", "username": "Nikhil_Shinde1" }, { "code": "", "text": "A fully utilised Wired Tiger cache would run to that.Default cache size is (System RAM - 1GB) / 2", "username": "chris" } ]
Mongo DB Utilizing too much ram
2023-05-10T05:02:52.625Z
Mongo DB Utilizing too much ram
441
null
[ "atlas-cluster", "ruby", "mongoid-odm" ]
[ { "code": "", "text": "I am using Rails 6, Ruby 2.7.3 along with\nmongo (2.17.1)\nmongo_auto_increment (0.1.3)\nmongoid (7.0.13)\nmongoid-compatibility (0.6.0)cluster version - 6.0.5I have setup cluster on mongo cloud but when our rails server trying to connect to mongo cluster we are getting issue, passing url like this to our applicationMONGO_URL = mongodb+srv://username:[email protected]/db_name?retryWrite=true&w=majorityError: ArgumentError: Host should not contain protocol. Did you mean to not use an array?\"Can anyone please guide how to fix this issue", "username": "Anand_Kumar_Tripathi" }, { "code": "", "text": "ArgumentError: Host should not contain protocolThis error is coming from the client validating an array of addresses and one contains what it thinks is the protocol. Can you share the actual connection string (without the username/passowrd) you’re using?", "username": "alexbevi" }, { "code": "", "text": "Thanks @alexbevi for your reply\nI am using below url string only which i am passing to rails application using environment variable MONGO_URLmongodb+srv://username:[email protected]/?retryWrites=true&w=majoritySame string i got from cluster config", "username": "Anand_Kumar_Tripathi" }, { "code": "[1] pry(main)> require 'bundler/inline'\n=> true\n[2] pry(main)> gemfile do\n[2] pry(main)* gem \"mongo\", \"2.17.1\"\n[2] pry(main)* end\n=> \"\"\n[3] pry(main)> Mongo::Client.new('mongodb+srv://username:[email protected]/?retryWrites=true&w=majority')\n=> #<Mongo::Client:0x1900 cluster=#<Cluster topology=ReplicaSetWithPrimary[yabx-mtn-zm-zedfin-shard-00-00.ru84g.mongodb.net:27017,yabx-mtn-zm-zedfin-shard-00-01.ru84g.mongodb.net:27017,yabx-mtn-zm-zedfin-shard-00-02.ru84g.mongodb.net:27017,name=atlas-quqaof-shard-0,v=1,e=7fffffff0000000000000002] servers=[#<Server address=yabx-mtn-zm-zedfin-shard-00-00.ru84g.mongodb.net:27017 SECONDARY replica_set=atlas-quqaof-shard-0 pool=#<ConnectionPool size=0 (0-5) used=0 avail=0 pending=0>>,#<Server address=yabx-mtn-zm-zedfin-shard-00-01.ru84g.mongodb.net:27017 SECONDARY replica_set=atlas-quqaof-shard-0 pool=#<ConnectionPool size=0 (0-5) used=0 avail=0 pending=0>>,#<Server address=yabx-mtn-zm-zedfin-shard-00-02.ru84g.mongodb.net:27017 PRIMARY replica_set=atlas-quqaof-shard-0 pool=#<ConnectionPool size=0 (0-5) used=0 avail=0 pending=0>>]>>", "text": "@Anand_Kumar_Tripathi the issue doesn’t appear to be with your connection string - at least not directly:=> #<Mongo::Client:0x1900 cluster=#<Cluster topology=ReplicaSetWithPrimary[yabx-mtn-zm-zedfin-shard-00-00.ru84g.mongodb.net:27017,yabx-mtn-zm-zedfin-shard-00-01.ru84g.mongodb.net:27017,yabx-mtn-zm-zedfin-shard-00-02.ru84g.mongodb.net:27017,name=atlas-quqaof-shard-0,v=1,e=7fffffff0000000000000002] servers=[#<Server address=yabx-mtn-zm-zedfin-shard-00-00.ru84g.mongodb.net:27017 SECONDARY replica_set=atlas-quqaof-shard-0 pool=#<ConnectionPool size=0 (0-5) used=0 avail=0 pending=0>>,#<Server address=yabx-mtn-zm-zedfin-shard-00-01.ru84g.mongodb.net:27017 SECONDARY replica_set=atlas-quqaof-shard-0 pool=#<ConnectionPool size=0 (0-5) used=0 avail=0 pending=0>>,#<Server address=yabx-mtn-zm-zedfin-shard-00-02.ru84g.mongodb.net:27017 PRIMARY replica_set=atlas-quqaof-shard-0 pool=#<ConnectionPool size=0 (0-5) used=0 avail=0 pending=0>>]>>Note that the driver here is able to parse the connection string, parse the host list (from the SRV) record and create a topology (replica set with 3 members). If you can create a self-contained reproduction using mongoid that you could share that demonstrates the issue I’d be happy to review further.", "username": "alexbevi" } ]
Cluster Url connection not happening from rails server
2023-05-12T14:45:58.066Z
Cluster Url connection not happening from rails server
776
null
[ "containers" ]
[ { "code": "", "text": "The default syslog logging is to localhost UDP 514.\nIs there option to send syslog to different port in configuration file?\nThis is for workaround on docker container running as non-root user.", "username": "Andrew_Hsia" }, { "code": "", "text": "I don’t think this is configurable for mongod.Assuming you’re running the image from docker hub in docker you can set a different log driver and have docker take care of the stslog.", "username": "chris" }, { "code": "", "text": "Thanks for the reply. Unfortunately, we are running in k8s env without any elevated permission, so the logging options that require log collector ruining as DaemonSet will not be possible.", "username": "Andrew_Hsia" } ]
Syslog on different UDP port
2023-05-10T06:37:08.929Z
Syslog on different UDP port
803
null
[ "aggregation", "node-js" ]
[ { "code": "$lookuplet is not supported$lookupletpipeline", "text": "I am working on Node.js and when I connect node.js with mongo database which is on Azure cosmos. it show an error “MongoServerError: let not supported”.\nI am using Mongodb version 4.4. how to fix this issue\nWhen i read microsoft document i show“The $lookup aggregation does not yet support the uncorrelated subqueries feature introduced in server version 3.6. You will receive an error with a message containing let is not supported if you attempt to use the $lookup operator with let and pipeline fields”", "username": "Avish_Pratap_Singh" }, { "code": "", "text": "Hi @Avish_Pratap_Singh and welcome in the MongoDB Community !Cosmos is Cosmos and MongoDB is MongoDB. Cosmos tries to imitates some features of MongoDB but is severely lagging behind as you can see (we are about to release MongoDB 6.0 in the next few weeks).If you want the real MongoDB with ALL the features fully implemented and supported, take a real MongoDB cluster on MongoDB Atlas.You can also use Atlas directly from the Azure Marketplace.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "This is the same thread as MongoServerError: let not supported database is on Azure - #2 by steevej.", "username": "steevej" }, { "code": "\"from\":\"<collection>.find({'field':'<value>})\",", "text": "I have never seen the following syntax.\"from\":\"<collection>.find({'field':'<value>})\",Could you provide some working examples including sample documents?", "username": "steevej" } ]
$lookup and let and pipeline
2022-06-08T11:49:41.255Z
$lookup and let and pipeline
2,724
https://www.mongodb.com/…4_2_1024x512.png
[]
[ { "code": "", "text": "I am trying to implement sharding using mongodb. I found the below doc but I think it doesn’t mention how to enable the query router and how to use it for sharding purpose.Can anyone please tell me how to enable mongodb query router?thx", "username": "KK_Leung" }, { "code": "", "text": "", "username": "Kobe_W" } ]
How to enable mongodb query router for sharding
2023-05-12T08:18:44.091Z
How to enable mongodb query router for sharding
730
null
[]
[ { "code": "", "text": "I followed the guide here Install & Configure MongoDB on the Raspberry Pi | MongoDB but it doesn’t seem to work any longer - I get “Unable to locate package mongodb-org” error…What can I do?", "username": "Sasha_Sirotkin" }, { "code": "mongodb-orgsudo apt-get install -y mongodb-org\nUnable to locate package mongodb-orgmongodb-org", "text": "Hello @Sasha_Sirotkin,Welcome to the MongoDB Community forums Can you confirm that you have followed the tutorial steps and have all the prerequisites, such as “Ubuntu Server 20.04 (64-bit) for Raspberry Pi 3/4”, not the 32-bit version as mentioned in the tutorial?Can you also please confirm if it was working previously? If so, what has changed?I believe you are executing the following command to install the mongodb-org package:And, if you encounter the Unable to locate package mongodb-org error, then it might be possible that the latest version of MongoDB** downloaded by this command is not supported by the Ubuntu OS version you are using. Could you please provide your Ubuntu version, or refer to the Platform Support Notes for recommended MongoDB version for your operating system?**Note, the MongoDB documentation states that:“The official mongodb-org package always contains the latest version of MongoDB and is available from its dedicated repository.”Sharing some similar threads below which might be useful for you:I hope it helps!Regards,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "You can sidestep this error, which is relevant if you are trying to run MongoDB 4.4. If you would like to run the latest versions of MongoDB on your Pi, see this thread.", "username": "Matt_Kneiser" } ]
Raspberry pi (again)
2023-04-18T20:06:52.034Z
Raspberry pi (again)
885
null
[ "installation" ]
[ { "code": "", "text": "Getting the below error while installing Mongo 6.0\nProblem: package mongodb-org-6.0.5-1.el8.x86_64 requires mongodb-org-database, but none of the providers can be installed - package mongodb-org-database-6.0.0-1.el8.x86_64 requires mongodb-org-database-tools-extra, but none of the providers can be installed - package mongodb-org-database-6.0.1-1.el8.x86_64 requires mongodb-org-database-tools-extra, but none of the providers can be installed - package mongodb-org-database-6.0.2-1.el8.x86_64 requires mongodb-org-database-tools-extra, but none of the providers can be installed - package mongodb-org-database-6.0.3-1.el8.x86_64 requires mongodb-org-database-tools-extra, but none of the providers can be installed - package mongodb-org-database-6.0.4-1.el8.x86_64 requires mongodb-org-database-tools-extra, but none of the providers can be installed - package mongodb-org-database-6.0.5-1.el8.x86_64 requires mongodb-org-database-tools-extra, but none of the providers can be installed - cannot install the best candidate for the job - nothing provides python3 needed by mongodb-org-database-tools-extra-6.0.0-1.el8.x86_64 - nothing provides python3 needed by mongodb-org-database-tools-extra-6.0.1-1.el8.x86_64 - nothing provides python3 needed by mongodb-org-database-tools-extra-6.0.2-1.el8.x86_64 - nothing provides python3 needed by mongodb-org-database-tools-extra-6.0.3-1.el8.x86_64 - nothing provides python3 needed by mongodb-org-database-tools-extra-6.0.4-1.el8.x86_64 - nothing provides python3 needed by mongodb-org-database-tools-extra-6.0.5-1.el8.x86_64My Linux platform architecture is\nOperating System: CentOS Linux 8 (Core) CPE OS Name: cpe:/o:centos:centos:8 Kernel: Linux 4.18.0-193.el8.x86_64 Architecture: x86-64", "username": "Shilpi_Priya_Sen" }, { "code": "yum install python3", "text": "What happens when you yum install python3 as that is the missing dependency.", "username": "chris" } ]
Problem during installation of Mongo 6.0: package mongodb-org-6.0.5-1.el8.x86_64 requires mongodb-org-database, but none of the providers can be installed
2023-05-11T06:01:15.261Z
Problem during installation of Mongo 6.0: package mongodb-org-6.0.5-1.el8.x86_64 requires mongodb-org-database, but none of the providers can be installed
1,110
null
[ "atlas-device-sync", "realm-web" ]
[ { "code": "", "text": "Howdy folks,Product for Realm here, we are interested in doing some user research on where we could take our Realm-Web SDK. Are you building a web app using MongoDB Realm? Today, the Realm-Web SDK provides a bunch of convenience APIs for accessing data stored on MongoDB Atlas and proxied through the MongoDB Realm platform which provides authentication and authorization as well as static hosting and severless functions which abstract away a bunch of the middleware logic that a web developer would normally need to write and deliver. But the actual fetching of data must still be performed by the developer with a traditional request/response model. Wouldn’t it be cool if you could leverage the same Realm Sync primitives that the mobile Realm SDKs leverage? We are even kicking around ideas of persisting the data in the browser which would enable developers to write fully offline PWAs with Realm Javascript!If you could help us answer some questions then we can look to add this to the product roadmap. If you could please email me at [email protected] we can set up a time to quickly chat about your use case.-Ian", "username": "Ian_Ward" }, { "code": "", "text": "The major point currently is the absence of Realm Sync in Realm WebHaving full browser-side Realm support is a MUST. Not only for PWAs with service worker and other features but also for SPAs, web pages. In short, everything that runs in the browser and must have the same functionality as the mobile or desktop version.", "username": "Michel_Chouinard" } ]
The Evolution of Realm-Web with Sync?
2020-11-21T09:05:50.425Z
The Evolution of Realm-Web with Sync?
2,923
null
[ "atlas-cluster", "golang", "serverless" ]
[ { "code": "", "text": "We are frequently experiencing broken pipe errors with our Serverless MongoDB Atlas instance. We have a golang application hosted in GCP (Cloud Run container) and connect from there to the MongoDB instance. It’s working fine in one environment with out any issues, but in another environment we are getting a few broken pipe errors a day (maybe 0.1 - 1 % of requests). Usually a retry succeeds directly after.Any idea what the reason might be? I don’t think there is a way to access the logs on an Atlas serverless instance, right? Wondering if we need to set specific connection parameters to prevent these issues.", "username": "Jan-Gerrit_Harms" }, { "code": "", "text": "Hey Jan! How is it going?I’m having the same issue I don’t have a solution for this apart of changing the database type or applying try and catches everywhere the database client fires a query - sad -. However, I wanted to share my thoughts in case somebody else faces the same issue.In my case, our team is running a NodeJs container + Prisma.io ORM to handle the database work over a GCP Cloud Run instance too.This issue arises very consistently when multiple queries hit the db serverless instance - around 3 read consecutive/parallel operations would trigger it fairly easily and consistently -, causing the same Broken pipe error you have found.My hypothesis is that the issue lies on how serverless instances are handling connections and its consequent scaling/provisioning whenever a query tries to hit the db. Looking at the charts mongodb atlas provides, I found that that error matches a connection drop on the database side (see the snapshot attached).\nimage3130×754 154 KB\nThis connection, of course, would have been closed by the db itself. Open thoughts:1.- GCP Cloud Run runs containers, which means that connections pools would be set and used/reused as long as the container is active and running while using MongoClient. Not knowing what specifically triggers a mongodb serverless instantiation, leaves me thinking if those reused connections could be causing this if, for example, mongodb serverless expect 1 query = 1 connection.2.- In my case, 3 queries are being fired, where the third one more often is the one that gets this Broken Pipe error. I wonder if queries 1 and 2 gets its own connection while the latter gets, let’s say, connection 1 reused and that could cause mongodb serverless to close the connection (e.g. 1 connection = 1 response and then closes).3.- I haven’t found documentation on any of this nor options to configure the scaling behaviour of mongodb serverless instances. I’ve read that min connections could be set at the client side on this doc https://www.mongodb.com/docs/manual/reference/connection-string/#mongodb-urioption-urioption.minPoolSize4.- Nevertheless, that’s an option for the client side and I don’t know if that could impact somehow the behaviour of mongodb serverless scaling (one way i think it could affect tho, is if the connections on the pool gets to 0 on the client for X ms while opening a new one and that somehow indicates mongodb serverless instance to scale down, dropping the connections and causing the consequent error)It’d be nice to have someone form mongodb to give some light on these issues. I know that serverless instances are now in preview, so we’re aware stuff like this can happen.In the meantime, to stays in the safe side, we’ve moved prod to a shared cluster, and it’s working like a charm.Hope this helps get the discussion moving ", "username": "Ian_Sebastian" }, { "code": "", "text": "MongoDB Go Driver engineer here:\nIt’s expected that the Atlas Serverless infrastructure will sometimes close in-use network sockets. We typically expect that the retryable reads and retryable writes behaviors of MongoDB drivers should allow any in-progress operations to be retried on another connection automatically.@Jan-Gerrit_Harms or @Ian_Sebastian, do you know if retryable reads and writes are enabled in your MongoDB driver configurations? Note that retryable reads/writes are enabled by default in all recent versions of the MongoDB Go Driver (and should be in all official MongoDB drivers).", "username": "Matt_Dale" }, { "code": "\tserverAPIOptions := options.ServerAPI(options.ServerAPIVersion1)\n\tclientOptions := options.Client().\n\t\tApplyURI(viper.GetString(config.MongoDBUrl)).\n\t\tSetServerAPIOptions(serverAPIOptions).\n SetMinPoolSize(viper.GetUint64(config.MongoDBMinPoolSize))\n\n\tclient, err := mongo.Connect(context.Background(), clientOptions)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"unable to connect to mongodb\")\n\t}\n\terr = client.Ping(context.Background(), nil)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"not connected to mongo\")\n\t}\nretryWrites=true&w=majorityretryReads", "text": "Thanks for you replies, both of you!@Matt_Dale We don’t explicitly disable retryable reads and write as far as I know. This is how we establish a connection:We even try to enable it for the writes. Our connection string has the following parameters:\nretryWrites=true&w=majority.Maybe we should also explicitly enable the retryReads, although if I understand you correct, it should be enabled by default.", "username": "Jan-Gerrit_Harms" }, { "code": "", "text": "@Jan-Gerrit_Harms @Ian_SebastianI would also advise bringing this up with the Atlas chat support team. They may be able to check if anything else on the Atlas side could have possibly caused this broken pipe message. In saying so, if a chat support is raised, please provide them with the following:Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "Hey @Jason_Tran @Matt_Dale !First of all, thanks for your answers . I haven’t had time to come back to this lately, so has been a while here.Coming back to this, in my case we are using prisma.io ORM to handle the database connections, which uses latest RUST native mongo driver to handle the connections in the back.As per this tread, it seems that everything prisma does it does it using transactions on aggregation pipelines, which could explain the lack of retries - and writeConflicts we’ve been seen around it -.One question I’d like to ask, is if the behavior of retries indeed changes when using transactions. As per the documentation shared by @Matt_Dale, it might seems so. Would that be correct ? does also RUST driver differ in this behavior in some way or another that could be affecting the retry behavior ?Thanks a lot!", "username": "Ian_Sebastian" }, { "code": "", "text": "Hi,@Ian_Sebastian Did you got the solution?, I am also facing this issue of broken-pipe using mongo-go driver. As suggested I have enabled retryReads and retryWrites, also have a minPoolSize of 20. But after this also, I am facing broken-pipe issue. I am not sure what to do to fix this.", "username": "Abhinav_Singh2" }, { "code": "go.mongodb.org/mongo-driver v1.11.3", "text": "I’m running into the same issue. I’m using the Go driver, go.mongodb.org/mongo-driver v1.11.3. It seems to happen after my server is inactive for a while. The first request will return this error, but subsequent requests are fine.", "username": "Sean_Johnson" } ]
Connection(<project-id>.mongodb.net:27017) unable to write wire message to network: write tcp <some-ip> -> <mongo atlas ip>: write: broken pipe
2022-11-30T15:03:35.128Z
Connection(&lt;project-id&gt;.mongodb.net:27017) unable to write wire message to network: write tcp &lt;some-ip&gt; -&gt; &lt;mongo atlas ip&gt;: write: broken pipe
5,270
null
[ "change-streams" ]
[ { "code": "var cursor = rulesCollection.Watch(options).limit(2)\n", "text": "Hi Team,I am trying to add limit for changestream watch, however it is throwing below error.\nkindly help me to fix this issue.My code:Error:\nCS1016: IChangeStreamCursor<ChangeStreamDocument> does not contain a definition for limit and no accessible extension method. Limit accepting a first argument of type IChangeStreamCursor<ChangeStreamDocument> could be found (are you missing a using directive or an assembly reference?)Thanks,\nLalitha.C", "username": "Lalitha_Chevuru" }, { "code": "limit{\n _id: ObjectId(\"645cbb9f0119476b03b2ba0e\"),\n DaysSinceLastSale: 81,\n IsResidential: true,\n Owner1NameFull: 'Henry Hunter',\n AddressCity: 'Newport News',\n AddressState: 'Hawaii',\n AddressZIP: 264142,\n position: [ -50.32564, 69.42805 ],\n year: '2020'\n }\nfrom pymongo import MongoClient\n import itertools\n \n client = MongoClient('mongodb://localhost:27017')\n db = client['test']\n collection = db['sample']\n \n pipeline = [ { '$match': { 'year': '2020' } }, { '$project': { '_id': 0 } }]\n \n change_stream = collection.watch()\n \n for change in itertools.islice(change_stream, 7):\n print(change)\nreplset [direct: primary] test> db.sample.updateMany( { DaysSinceLastSale: { $gt: 40}}, { $set: { year: '2027'}})\n{\n acknowledged: true,\n insertedId: null,\n matchedCount: 12002,\n modifiedCount: 12002,\n upsertedCount: 0\n}\nLimit()", "text": "Hi @Lalitha_Chevuru and welcome to MongoDB community forums!!Although I attempted to use the limit function while watching the change stream, it appears that the current implementation does not support limiting the display of updated fields.To further explore this issue, I loaded a sample document in my local environment.and tried the following python code:with the update command as:The change stream retuned exactly 7 documents.Therefore, as Limit() is not a built-in feature of change streams, you can implement it at the application code level to achieve the desired functionality.Let us know if you have further questions.Regards\nAasawari", "username": "Aasawari" } ]
Limit option is not working for changestream.watch
2023-05-09T16:50:44.675Z
Limit option is not working for changestream.watch
785
null
[ "upgrading" ]
[ { "code": "", "text": "Hi Team,\nWhen i upgrade existing mongodb 4.0.25 to 6.0.4, i get below errorThis version of MongoDB is too recent to start up on the existing data files. Try MongoDB 4.2 or earlier.\nis there any better way to do upgrade from 4.0.25 to 6.0.4", "username": "Tirumala_Mannaru" }, { "code": "", "text": "You can’t skip major version in MongoDB. The upgrade path would be.\n4.0.25 → 4.2.x → 4.4.x → 5.0.x → 6.0.x", "username": "tapiocaPENGUIN" }, { "code": "", "text": "@tapiocaPENGUIN , Thanks for your answer.\nWe actually missed this path 4.0.25 → 4.2.x → 4.4.x → 5.0.x → 6.0.x.\nWe are running mongodb community version in kubernetes environment\nWhat we plan to do is,", "username": "Tirumala_Mannaru" }, { "code": "", "text": "It is not a supported or tested upgrade method. The documented one is.Some have had luck (or at least not come back to the forum) with export/import as your are planning.", "username": "chris" } ]
Cannot upgrade mongodb community version from 4.0.25 to 6.0.4
2023-05-10T09:02:48.163Z
Cannot upgrade mongodb community version from 4.0.25 to 6.0.4
1,010
null
[ "aggregation", "atlas-search" ]
[ { "code": "[\n {\n $search: {\n index: \"datasets\",\n compound: {\n must: [\n {\n text: {\n query: \"subscription\",\n path: \"name\",\n fuzzy: {\n maxEdits: 1,\n prefixLength: 3,\n },\n },\n },\n ],\n should: [\n {\n wildcard: {\n path: \"tags\",\n query: \"*\",\n allowAnalyzedField: true,\n score: {\n constant: { value: 100 },\n },\n },\n },\n ],\n },\n highlight: {\n path: [\"name\"],\n },\n },\n },\n {\n $addFields: {\n highlight: {\n $meta: \"searchHighlights\",\n },\n },\n },\n]\nsubscriptionnametagsstringnamecs_subscription_v2- value: \"cs\", type: \"hit\"\n- value: \"_\", type: \"text\"\n- value: \"subscription\", type: \"hit\"\n- value: \"_\", type: \"text\"\n- value: \"v2\", type: \"hit\"\nshould", "text": "We are using a search query:This query finds subscription in entity name, and also boosts the score if the entity has any tag. Both name and tags are string fields. The highlight should return matches in name.\nHowever, for name cs_subscription_v2 the highlight response isThere are 3 hits, while it should only be 1.We also notice that if we remove the should section using wildcard match in the query, then the highlight works correctly.Anyone knows why this happens, and how to walk around it? Is this a bug? Thanks!", "username": "Yi_Wang" }, { "code": "{ 'name' : 'cs_subscription_v2' }", "text": "Hi @Yi_Wang,Thanks for providing those details. Can you also share the following:Regards,\nJason", "username": "Jason_Tran" }, { "code": "[{\n \"entityId\": \"DATASET~17127EC430CE8D0D89D0EEDB808B2A40\",\n \"name\": \"metaphor-data.test.k_20220112\",\n \"description\": \"gator baits\",\n \"tags\": null\n},\n{\n \"entityId\": \"DATASET~17127EC430CE8D0D89D0EEDB808B2A40\",\n \"name\": \"metaphor-data.test.cs_subscription_v2\",\n \"description\": \"subscriptions v2\",\n \"tags\": [\"customer\"]\n},\n{\n \"entityId\": \"DATASET~17127EC430CE8D0D89D0EEDB808B2A40\",\n \"name\": \"metaphor-data.prod.subscription_replacement\",\n \"description\": \"subscription replacement\",\n \"tags\": [\"customer\", \"GOLD\"]\n}]\n{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"description\": {\n \"type\": \"string\"\n },\n \"tags\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"stringFacet\"\n }\n ],\n \"name\": [\n {\n \"analyzer\": \"delimiter_pattern\",\n \"multi\": {\n \"keyword\": {\n \"analyzer\": \"keyword_lowercase\",\n \"type\": \"string\"\n }\n },\n \"type\": \"string\"\n }\n ]\n }\n },\n \"analyzers\": [\n {\n \"name\": \"delimiter_pattern\",\n \"tokenFilters\": [\n {\n \"type\": \"lowercase\"\n }\n ],\n \"tokenizer\": {\n \"pattern\": \"[ \\\\.\\\\/\\\\-_,;:]+\",\n \"type\": \"regexSplit\"\n }\n },\n {\n \"name\": \"keyword_lowercase\",\n \"tokenFilters\": [\n {\n \"type\": \"lowercase\"\n }\n ],\n \"tokenizer\": {\n \"type\": \"keyword\"\n }\n }\n ]\n}\n", "text": "Hi Jason, thanks for looking into this issue!Some sample documents:index spec", "username": "Yi_Wang" }, { "code": "cs_subscription_v2- value: \"cs\", type: \"hit\"\n- value: \"_\", type: \"text\"\n- value: \"subscription\", type: \"hit\"\n- value: \"_\", type: \"text\"\n- value: \"v2\", type: \"hit\"\n\"text\": {\n \"path\": \"description\",\n \"query\": \"varieties\"\n }\n\"varierties\"\"description\"highlight\"description\"\"summary\"\"highlights\" : [\n {\n \"path\" : \"summary\", /// <--- summary path\n \"texts\" : [\n {\n \"value\" : \"Pear \",\n \"type\" : \"text\"\n },\n {\n \"value\" : \"varieties\",\n \"type\" : \"hit\" /// <--- hit on summary path even though it is not part of the `text` operator path\n }\n ],\n \"score\" : 1.3891443014144897 },\n {\n \"path\" : \"description\",\n \"texts\" : [\n {\n \"value\" : \"Bosc and Bartlett are the most common \",\n \"type\" : \"text\"\n },\n {\n \"value\" : \"varieties\",\n \"type\" : \"hit\"\n },\n {\n \"value\" : \" of pears.\",\n \"type\" : \"text\"\n }\n ],\n \"score\" : 1.2691514492034912\n }\nshould\"*\"\"hit\"\"name\"\"c*\"[\n {\n _id: ObjectId(\"645c6c375d5c23c575e620b8\"),\n entityId: 'DATASET~17127EC430CE8D0D89D0EEDB808B2A40',\n name: 'metaphor-data.test.cs_subscription_v2',\n description: 'subscriptions v2',\n tags: [ 'customer' ],\n highlight: [\n {\n score: 1.321254014968872,\n path: 'name',\n texts: [\n { value: 'metaphor-data.test.', type: 'text' },\n { value: 'cs', type: 'hit' },\n { value: '_subscription_v2', type: 'text' }\n ]\n }\n ]\n },\n {\n _id: ObjectId(\"645c6c375d5c23c575e620b9\"),\n entityId: 'DATASET~17127EC430CE8D0D89D0EEDB808B2A40',\n name: 'metaphor-data.prod.subscription_replacement',\n description: 'subscription replacement',\n tags: [ 'customer', 'GOLD' ],\n highlight: []\n }\n]\n", "text": "Hi @Yi_Wang,Thanks for providing those details and sample documents.However, for name cs_subscription_v2 the highlight response isThere are 3 hits, while it should only be 1.I believe the behaviour you’re experiencing here can also be shown in the following example highlighted in our documentation. More specifically related to the example, the query itself only matches for:i.e. Documents that match the text \"varierties\" for the path \"description\".Yet, since the highlight is on both \"description\" and \"summary\", the example shows hits for both:In short, the search highlighting metadata feature by design will return a \"hit \" for any of the above terms occurring in ANY of the below highlight paths from the matching result set.We also notice that if we remove the should section using wildcard match in the query, then the highlight works correctly.As for your particular example, the should with the wildcard query value \"*\" is a \"hit\" on the values you provided in your highlight response relating to the path \"name\". If I were to change it to \"c*\" then the highlight response would be (for the sample documents you provided):Hope the above helps. If it’s not the desired behaviour then you can raise a feedback post in regards to your use case.Regards,\nJason", "username": "Jason_Tran" } ]
Search highlight is incorrect when using wildcard query
2023-05-09T20:24:18.410Z
Search highlight is incorrect when using wildcard query
703
null
[ "charts" ]
[ { "code": "", "text": "Hi,Is there a way to resize the charts on the dashboard to any size that I like? currently it seems there are predefined sizes the chart snapped to when I resize.\nThx.", "username": "YuvalW" }, { "code": "", "text": "Not on dashboards. The grid is there to make it easy to align charts without resorting to pixel-pushing. When you embed charts you can decide the exact dimensions you want.", "username": "tomhollander" }, { "code": "", "text": "Thank you. It would be nice to have smaller increments for the snapping.\n@tomhollander is there a place to write feature requests? besides this it would also be nice to have a better “cross” when hovering chart data, currently it is really difficult to see the data points using a mouse on the chartHere is an example from another charting system, no matter where my cursor is I can see the data point. With mongodb charts my cursor has to be exactly on the chart itself, very difficult…\n", "username": "YuvalW" }, { "code": "", "text": "Thanks @YuvalW. We have a site feedback.mongodb.com for feature requests. The suggestion you made is a good one and we’ll see if we can get this added in.", "username": "tomhollander" } ]
Disable chart snapping when resizing in dashboard
2023-05-10T14:17:18.504Z
Disable chart snapping when resizing in dashboard
830
https://www.mongodb.com/…4_2_1024x512.png
[ "queries", "compass", "mongodb-shell", "time-series" ]
[ { "code": "{\n cursor: {\n id: Long(\"0\"),\n ns: 'aether.$cmd.listCollections',\n firstBatch: [\n [Object], [Object],\n [Object], [Object],\n [Object], [Object],\n [Object]\n ]\n },\n ok: 1\n}\n", "text": "Hi All,listCollections doesn’t appear to give the correct documented output in mongosh (used in Compass).So I have 3 time series collections in a database XXXI’ve switched to XXX with this command. use XXXThen tried: db.runCommand( { listCollections: 1.0 } )and got:I’m expecting an output like the one documented here:Anyone know what’s going on here?Chris", "username": "Chris_Swainson" }, { "code": "mongoshinspectDepthmongoshinspectDepth", "text": "Hey @Chris_Swainson listCollections doesn’t appear to give the correct documented output in mongosh (used in Compass).So it’s showing correctly in Compass but not via the mongosh? (Or at least it’s not displaying as you are expecting?)I haven’t tested this yet but it might be possibly due to the inspectDepth setting on mongosh. Can you try configuring the inspectDepth to a higher value and test again?Let me know how it goes.Regards,\nJason", "username": "Jason_Tran" }, { "code": "inspectDepth2db> db.runCommand({listCollections:1.0})\n{\n cursor: {\n id: Long(\"0\"),\n ns: 'db.$cmd.listCollections',\n firstBatch: [ [Object], [Object] ]\n },\n ok: 1,\n...\ninspectDepthdb> config.set('inspectDepth',10)\nSetting \"inspectDepth\" has been changed\n\ndb> db.runCommand({listCollections:1.0})\n{\n cursor: {\n id: Long(\"0\"),\n ns: 'db.$cmd.listCollections',\n firstBatch: [\n {\n name: 'weather',\n type: 'timeseries',\n options: {\n timeseries: {\n timeField: 'timestamp',\n metaField: 'metadata',\n granularity: 'hours',\n bucketMaxSpanSeconds: 2592000\n }\n },\n info: { readOnly: false }\n },\n {\n name: 'system.buckets.weather',\n type: 'collection',\n options: {\n validator: {\n '$jsonSchema': {\n bsonType: 'object',\n required: [ '_id', 'control', 'data' ],\n properties: {\n _id: { bsonType: 'objectId' },\n control: {\n bsonType: 'object',\n required: [ 'version', 'min', 'max' ],\n properties: {\n version: { bsonType: 'number' },\n min: {\n bsonType: 'object',\n required: [Array],\n properties: [Object]\n },\n max: {\n bsonType: 'object',\n required: [Array],\n properties: [Object]\n },\n closed: { bsonType: 'bool' }\n }\n },\n data: { bsonType: 'object' },\n meta: {}\n },\n additionalProperties: false\n }\n },\n clusteredIndex: true,\n timeseries: {\n timeField: 'timestamp',\n metaField: 'metadata',\n granularity: 'hours',\n bucketMaxSpanSeconds: 2592000\n }\n },\n info: {\n readOnly: false,\n uuid: new UUID(\"eacd1dcd-6891-4679-9178-5103060e1ae1\")\n }\n }\n ]\n },\n ok: 1\n", "text": "Briefly tested using the example timeseries data from the documentation link you provided and I got the following with an inspectDepth of 2:Changed inspectDepth value to 10 and got the following output:Hopefully this is what you were after or helps you out.", "username": "Jason_Tran" }, { "code": ">use xxx\n<'switched to db xxx'\n\n>config.set('inspectDepth',10)\n<'Option \"inspectDepth\" is not available in this environment'\n", "text": "Thanks for your quick reply Jason.Unfortunately I get:Chris", "username": "Chris_Swainson" }, { "code": "mongoshconfigdb>config\nMap(12) {\n 'displayBatchSize' => 20,\n 'maxTimeMS' => null,\n 'enableTelemetry' => true,\n 'editor' => null,\n 'snippetIndexSourceURLs' => 'https://compass.mongodb.com/mongosh/snippets-index.bson.br',\n 'snippetRegistryURL' => 'https://registry.npmjs.org',\n 'snippetAutoload' => true,\n 'inspectCompact' => 3,\n 'inspectDepth' => 10,\n 'historyLength' => 1000,\n 'showStackTraces' => false,\n 'redactHistory' => 'remove'\n}\n", "text": "What version of mongosh are you currently using? I’m currently using 1.6.0 on my test environment (for reference).Additionally, do you get any output when running config? e.g.:Jason", "username": "Jason_Tran" }, { "code": "> config\n< {}\n> version()\n< '1.8.0'\n", "text": "I’m using the _MONGOSH terminal at the bottom of Compass.Chris", "username": "Chris_Swainson" }, { "code": "", "text": "I’m using the _MONGOSH terminal at the bottom of Compass.Ah gotcha - makes sense now.Going to check if inspectDepth can be configured here…Regards,\nJason", "username": "Jason_Tran" }, { "code": "EJSON.stringify(\n db.runCommand( {\n listCollections: 1,\n filter: {}\n })\n)\n", "text": "Does running:work for you?Regards,\nJason", "username": "Jason_Tran" }, { "code": "inspectDepthmongoshmongosh", "text": "Currently it’s not possible to set the inspectDepth in the embedded mongosh shell in MongoDB Compass but hopefully the above workaround suits you for now.Alternatively, you can download the standalone mongosh shell and configure it from there using the commands I had posted previously.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "Thanks for you help Jason,The work around worked.It’s not great that for whatever reason the mongosh shell in Compass won’t handle this. Nothing really indicates it’s ‘light’ or ‘minimal’ or ‘beta’ etc.I’ll get the stand alone version.Thanks for you time. Very prompt responses and much appreciated.Chris", "username": "Chris_Swainson" }, { "code": "", "text": "Glad the workaround worked and thanks for updating the post I understand that it’s not ideal that the embedded mongosh shell isn’t able to configure those particular settings at this stage so I’ve raised this internally as a form of feedback.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "", "username": "Jason_Tran" } ]
listCollections not giving documented output
2023-05-10T21:31:25.839Z
listCollections not giving documented output
825
null
[ "python", "transactions" ]
[ { "code": "pymongo.errors.WriteConcernError: operation exceeded time limit, full error: {'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}}pymongo.errors.ExecutionTimeout: operation exceeded time limit, full error: {'operationTime': Timestamp(1683746467, 1), 'ok': 0.0, 'errmsg': 'operation exceeded time limit', 'code': 50, 'codeName': 'MaxTimeMSExpired', '$clusterTime': {'clusterTime': Timestamp(1683746469, 999), 'signature': {'hash': b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', 'keyId': 0}}}", "text": "I’m using pymongo and mongodb 4.4\nwhen im start transaction as:\nstart_transaction(ReadConcern(“majority”),WriteConcern(“majority”), ReadPreference.PRIMARY, max_commit_time_ms=1)Using this transaction i bulk_insert 1000 document to db, however pymongo gives 2 different exception time to time.In this error even tho it gives error and transaction abort, the data still written on database.In this error transaction aborts and no data written on disk.My question is why i take different exceptions time to time even tho i do same task. And why 1. one do not do rollback?", "username": "ismail_kahraman" }, { "code": "", "text": "This behavior is expected. WriteConcernError means that the commit succeeded on the primary but failed to replicate the commit with the configured writeConcern within the 1ms timeout. In this case your app can recover the state of the transaction by running session.commit_transaction() again.The ExecutionTimeout error means that the server rejected the commit because it failed to succeed before the 1ms timeout. I believe the app can retry the session.commit_transaction() operation in this case as well but it’s possible the server automatically aborts the transaction in this case like you say (I’ll need to test this out to confirm the behavior here).2 different exceptions are possible because the 1ms timeout is non-deterministic, in some cases the server may be able to commit the transaction in time, in other cases there might be resource contention which makes the commit take longer than 1ms.The driver does not automatically retry the commit in either of these cases because these errors mean the configured timeout was exceeded.Is this helpful?", "username": "Shane" }, { "code": "db.adminCommand( { setParameter: 1, transactionLifetimeLimitSeconds: x } )\n", "text": "Thanks for the answer Shane,When i encounter with a error, i abort the transaction via pymongo. In WriteConcernError case even i abort the transaction, the data written into mongodb. Is this thing a case, because i was thinking transaction guarantees when an error occurs database will rollback the previous state.Secondly, when i set transactionLifetimeLimitSeconds in mongo shell, it seems like its guarantees all the time when life time exceeds database rollback to previous state. Is there any mechanism difference between pymongo’s transaction.abort() and this admincommand?", "username": "ismail_kahraman" }, { "code": "", "text": "Transaction is rolled back only when unexpected things happen within a transaction context.transactionLifetimeLimitSeconds controls the max time allowed for a transaction to process. So it is within a transaction context.Write concern error means transaction it self succeeds, but replication fails to finish within the set timeout. So it’s replication context, and has nothing to do with your transaction operations.", "username": "Kobe_W" } ]
Mongo Transaction doesnt rollback on WriteConcernError
2023-05-10T19:55:33.030Z
Mongo Transaction doesnt rollback on WriteConcernError
939
null
[ "node-js", "dot-net", "containers", "field-encryption" ]
[ { "code": "RUN apt-get update\nRUN apt-get install libc6-dev -y\nMongoDB.Driver.Encryption.MongoEncryptionException: Encryption related exception: Exception starting mongocryptd process. Is mongocryptd on the system path?.\n ---> MongoDB.Driver.MongoClientException: Exception starting mongocryptd process. Is mongocryptd on the system path?\n ---> System.ComponentModel.Win32Exception (2): An error occurred trying to start process 'mongocryptd' with working directory '/app'. No such file or directory\nFROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base\nRUN apt-get update\nRUN apt-get install libc6-dev -y\n\nWORKDIR /app\nEXPOSE 80\nEXPOSE 443\n\nFROM mcr.microsoft.com/dotnet/sdk:6.0 AS build\nWORKDIR /src\nCOPY [\"UbkDockerMongoAzSQL.csproj\", \".\"]\nRUN dotnet restore \"./UbkDockerMongoAzSQL.csproj\"\nCOPY . .\nWORKDIR \"/src/.\"\nRUN dotnet build \"UbkDockerMongoAzSQL.csproj\" -c Release -o /app/build\n\nFROM build AS publish\nRUN dotnet publish \"UbkDockerMongoAzSQL.csproj\" -c Release -o /app/publish /p:UseAppHost=false\n\nFROM base AS final\nWORKDIR /app\nCOPY --from=publish /app/publish .\nENTRYPOINT [\"dotnet\", \"UbkDockerMongoAzSQL.dll\"]\n", "text": "Hi All,I have been trying to setup a demo project, with the hope of using CSFLE feature in a production application running in MongoDB Atlas 6.0 version, using .NET 6 C# language.I have seen the installation documentation and various other topics which are available through Google search and MongoDb community forums, however, I am currently stuck with an error which suggests that the mongocryptd is not found in path.\nI am using aspnet:6.0 base image and the demo code found at Microsoft site : Create a web API with ASP.NET Core and MongoDB | Microsoft Learn\nFor the testing I have created a docker container from mongodb/mongodb-enterprise-server:latest image.\nReading and writing data from my POC is working well, without the CSFLE feature.When I tried to add the feature using the code present here: Client-Side Encryption, I found the first problem with libdl library, which I solved by adding following commands in my docker file:Now, that error is gone, but I have the error sayingI understand the issue is with the mongodb-enterprise-cryptd is not installed within my web api container and I need to install it somehow. Checked the example from this page: field-level-encryption-docker/Dockerfile at main · sindbach/field-level-encryption-docker · GitHub, which gave a sample for nodejs, but I am not able to do the same thing in this Microsoft base image. As you can understand I am new to CSFLE, any help will be greatly appreciated. Following is the docker file that came with the demo:", "username": "UB_K" }, { "code": "mongocryptdlibmongocryptmongocryptd", "text": "Hi, @UB_K,Welcome to the MongoDB Community Forums. I understand that you’re having trouble getting FLE configured with mongocryptd in a Docker container. I would suggest that you try using the libmongocrypt shared library instead. The shared library has all the same features as mongocryptd, but is much more Docker-friendly. You can find out more in Automatic Encryption Shared Library for Queryable Encryption and Install libmongocrypt.Sincerely,\nJames", "username": "James_Kovacs" }, { "code": "1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #6 [base 3/7] RUN curl -k -fsSL https://pgp.mongodb.com/server-6.0.asc | sudo gpg -o /usr/share/keyrings/mongodb-server-6.0.gpg --dearmor\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #6 sha256:66bf736adf962edc07bee3ef1e7bdf08f31ebe43385b6e603fd9c6e9a8066df6\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #6 DONE 1.4s\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: \n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #7 [base 4/7] RUN echo \"deb [ signed-by=/usr/share/keyrings/mongodb-server-6.0.gpg ] https://repo.mongodb.com/apt/debian bullseye/mongodb-enterprise/6.0 main\" | sudo tee /etc/apt/sources.list.d/mongodb-enterprise.list\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #7 sha256:3df55a3077719f2335a31cd2405ef634a198012e1bea65ca4d09d59f80f774de\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #7 0.488 deb [ signed-by=/usr/share/keyrings/mongodb-server-6.0.gpg ] https://repo.mongodb.com/apt/debian bullseye/mongodb-enterprise/6.0 main\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #7 DONE 0.5s\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: \n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 [base 5/7] RUN sudo apt-get update\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 sha256:af7698bc3db5636411ee7ab24086559536b79bb69ac00d4474fcbf679a030931\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 0.609 Hit:1 http://deb.debian.org/debian bullseye InRelease\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 0.625 Hit:2 http://deb.debian.org/debian-security bullseye-security InRelease\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 0.650 Hit:3 http://deb.debian.org/debian bullseye-updates InRelease\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 1.077 Err:4 https://repo.mongodb.com/apt/debian bullseye/mongodb-enterprise/6.0 InRelease\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 1.077 Certificate verification failed: The certificate is NOT trusted. The certificate issuer is unknown. Could not handshake: Error in the certificate verification. [IP: 54.X.X.X 443]\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 1.079 Reading package lists...\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 1.451 W: Failed to fetch https://repo.mongodb.com/apt/debian/dists/bullseye/mongodb-enterprise/6.0/InRelease Certificate verification failed: The certificate is NOT trusted. The certificate issuer is unknown. Could not handshake: Error in the certificate verification. [IP: 54.X.X.X 443]\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 1.451 W: Some index files failed to download. They have been ignored, or old ones used instead.\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #8 DONE 1.5s\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: \n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #9 [base 6/7] RUN sudo apt-get install -y mongodb-enterprise-cryptd\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #9 sha256:6adb10ab657da49ae3f4e6d54ccd1ef20583bf4a41733ffd27f7d5d8f27b27e0\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #9 0.493 Reading package lists...\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #9 0.862 Building dependency tree...\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #9 0.980 Reading state information...\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #9 1.069 E: Unable to locate package mongodb-enterprise-cryptd\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: #9 ERROR: executor failed running [/bin/sh -c sudo apt-get install -y mongodb-enterprise-cryptd]: exit code: 100\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: ------\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: > [base 6/7] RUN sudo apt-get install -y mongodb-enterprise-cryptd:\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: ------\n1>C:\\source\\repos\\UbkDockerMongoAzSQL\\Dockerfile : error CTC1014: executor failed running [/bin/sh -c sudo apt-get install -y mongodb-enterprise-cryptd]: exit code: 100\n1>Done building project \"UbkDockerMongoAzSQL.csproj\" -- FAILED.\n# sudo sh -c 'curl -s --location https://www.mongodb.org/static/pgp/libmongocrypt.asc | gpg --dearmor >/etc/apt/trusted.gpg.d/libmongocrypt.gpg'\ngpg: no valid OpenPGP data found.\n# echo \"deb https://libmongocrypt.s3.amazonaws.com/apt/debian buster/libmongocrypt/1.7 main\" | sudo tee /etc/apt/sources.list.d/libmongocrypt.list\ndeb https://libmongocrypt.s3.amazonaws.com/apt/debian buster/libmongocrypt/1.7 main\n# sudo apt-get update\nHit:1 http://deb.debian.org/debian bullseye InRelease\nErr:2 https://repo.mongodb.com/apt/debian bullseye/mongodb-enterprise/6.0 InRelease\n Certificate verification failed: The certificate is NOT trusted. The certificate issuer is unknown. Could not handshake: Error in the certificate verification. [IP: 54.X.X.X 443]\nHit:3 http://deb.debian.org/debian-security bullseye-security InRelease\nErr:4 https://libmongocrypt.s3.amazonaws.com/apt/debian buster/libmongocrypt/1.7 InRelease\n Certificate verification failed: The certificate is NOT trusted. The certificate issuer is unknown. Could not handshake: Error in the certificate verification. [IP: 3.X.X.X 443]\nHit:5 http://deb.debian.org/debian bullseye-updates InRelease\nReading package lists... Done\nW: Failed to fetch https://libmongocrypt.s3.amazonaws.com/apt/debian/dists/buster/libmongocrypt/1.7/InRelease Certificate verification failed: The certificate is NOT trusted. The certificate issuer is unknown. Could not handshake: Error in the certificate verification. [IP: 3.X.X.X 443]\nW: Failed to fetch https://repo.mongodb.com/apt/debian/dists/bullseye/mongodb-enterprise/6.0/InRelease Certificate verification failed: The certificate is NOT trusted. The certificate issuer is unknown. Could not handshake: Error in the certificate verification. [IP: 54.X.X.X 443]\nW: Some index files failed to download. They have been ignored, or old ones used instead.\n# sudo apt-get install -y libmongocrypt\nReading package lists... Done\nBuilding dependency tree... Done\nReading state information... Done\nE: Unable to locate package libmongocrypt\n#\n", "text": "Thank you @James_Kovacs for your quick response. I have seen the new shared library but as per the installation document, it’s still in public preview, whereas we need to deploy the CSLFE solution in production as soon as the POC succeeds.I feel my the issue is with docker not trusting mongodb repo certificate.This same issue is occurring when I try to get the libmongocrypt library using the knowledges pages you referred, I get the same problem.", "username": "UB_K" }, { "code": "Dockerfile\n \n RUN wget -qO - https://www.mongodb.org/static/pgp/server-6.0.asc | sudo apt-key add - \n\n RUN echo \"deb [ arch=amd64,arm64 ] http://repo.mongodb.com/apt/ubuntu focal/mongodb-enterprise/6.0 multiverse\" | sudo tee /etc/apt/sources.list.d/mongodb-enterprise.list\n\n RUN apt-get update && apt-get install -y mongodb-enterprise mongodb-enterprise-cryptd\n\n \n RUN wget -qO - https://www.mongodb.org/static/pgp/server-6.0.asc | sudo apt-key add - \nRUN echo \"deb [ arch=amd64,arm64 ] http://repo.mongodb.com/apt/ubuntu focal/mongodb-enterprise/6.0 multiverse\" | sudo tee /etc/apt/sources.list.d/mongodb-enterprise.list\nRUN apt-get update && apt-get install -y mongodb-enterprise mongodb-enterprise-cryptd\n", "text": "You are correct. Your Dockerfile must add MongoDB’s public key to its listed of trusted sources. Here is a Java example by @wan:The critical lines are:Hopefully this helps get you up and running.Sincerely,\nJames", "username": "James_Kovacs" } ]
MongoDB Client-Side Field Level Encryption with ASP.NET core C# in docker
2023-05-10T20:41:14.719Z
MongoDB Client-Side Field Level Encryption with ASP.NET core C# in docker
984
null
[]
[ { "code": "M10M0│ Error: error creating MongoDB ClusterAdvanced: POST https://cloud.mongodb.com/api/atlas/v1.5/groups/6449771ea35c5c54dad100a1/clusters: 400 (request \"INVALID_ENUM_VALUE\") An invalid enumeration value M0 was specified.\n│\n│ with module.mongodb.mongodbatlas_advanced_cluster.atlas-cluster,\n│ on ../../modules/mongodb/deployment.tf line 58, in resource \"mongodbatlas_advanced_cluster\" \"atlas-cluster\":\n│ 58: resource \"mongodbatlas_advanced_cluster\" \"atlas-cluster\" {\n", "text": "Hi all, I was following this tutorial How to Deploy MongoDB Atlas with Terraform on AWS | MongoDB to set up mongoDB via Terraform, the only change I’ve made was changing the instance type from M10 to M0 and I got the following errorare those shared tier types not supported for this cluster configuration?", "username": "Steven_Leng" }, { "code": "M10M0M10+M0M2/M5provider_nameTENANTbacking_provider_name", "text": "Hi @Steven_Leng - Welcome to the community the only change I’ve made was changing the instance type from M10 to M0 and I got the following errorIf you’re attempting to downgrade an M10 to an M0 then it won’t be possible as per the Free Cluster and Shared Cluster Considerations documentation:In saying the above, I assume since you are following the tutorial, the cluster was not even created to begin with? Please clarify.Please note that you might also need to change the provider_name value to TENANT and set the backing_provider_name as per the terraform docs for Example Tenant Cluster.Look forward to hearing from you.Regards,\nJason", "username": "Jason_Tran" }, { "code": "mongodbatlas_advanced_cluster", "text": "If you’re still encountering issues, would you be able to send a code snippet of the mongodbatlas_advanced_cluster resource from the terraform file you have? Please redact any sensitive information before posting here.", "username": "Jason_Tran" }, { "code": "", "text": "A post was split to a new topic: M0 creation issue", "username": "Jason_Tran" } ]
400 (request "INVALID_ENUM_VALUE") An invalid enumeration value M0 was specified
2023-04-26T21:09:15.996Z
400 (request &ldquo;INVALID_ENUM_VALUE&rdquo;) An invalid enumeration value M0 was specified
1,081