image_url
stringlengths
113
131
tags
list
discussion
list
title
stringlengths
8
254
created_at
stringlengths
24
24
fancy_title
stringlengths
8
396
views
int64
73
422k
null
[ "mdbw22-communitycafe" ]
[ { "code": "", "text": "Discussions on the AMA for MongoDB Champions", "username": "TimSantos" }, { "code": "", "text": "Say Hi to our MongoDB Champions!@Leandro_Domingues , @michael_hoeller , @hpgrahsl , @Arkadiusz_Borucki , @Nuri_Halperin + @Stennie_X", "username": "TimSantos" } ]
AMA: Stump the Champions!
2022-06-06T14:05:43.090Z
AMA: Stump the Champions!
2,677
null
[ "queries" ]
[ { "code": "", "text": "Hey guys, I have a schema that has two fields: score & rank.\nBy default, I insert the documents only with the score, so rank is null.After all items inserted, I pick 200 by 200 items to update the rank based on the score, but I dont know how I can update all the 200 items at once.I mean, i will pick the 200 items from the database sorted by the score, so I will know whats the top 1, top 2… But how can I build a query to update all the items at once providing the rank field to each one?", "username": "foco_radiante" }, { "code": "", "text": "First please followup on", "username": "steevej" } ]
How to build this query to update all items at once?
2022-06-07T18:51:28.424Z
How to build this query to update all items at once?
1,314
null
[ "mdbw22-communitycafe" ]
[ { "code": "", "text": "Join MongoDB Champions and MongoDB User Groups organizers in a discussion about what community means to them, what their journey into the community was like, and what keeps them coming back.", "username": "TimSantos" }, { "code": "", "text": "@Nuri_Halperin @michael_hoeller @hpgrahsl @Arkadiusz_Borucki @Leandro_Domingues + (@Michael_Grayson)\nTalking about the value of community!", "username": "wan" } ]
Champions Chat: What is the value of community?
2022-06-06T14:05:19.905Z
Champions Chat: What is the value of community?
2,608
null
[ "node-js", "mongoose-odm", "connecting" ]
[ { "code": "const mongoose = require(\"mongoose\");\n//const mongoDB = \"mongodb://admin:admin@localhost:27017/ais_mlm?authSource=admin\";\nconst chalk = require('chalk');\nconst path = require('path');\nconst connected = chalk.bold.cyan;\nconst error = chalk.bold.yellow;\nconst disconnected = chalk.bold.red;\nconst termination = chalk.bold.magenta;\nrequire('dotenv').config({ path: path.resolve(__dirname, '../.env') });\nconsole.log('here...', path.resolve(__dirname, '../.env'),process.env.DATA);\n\n mongoose.connect(process.env.DATA,{ useNewUrlParser: true, useUnifiedTopology: true } ,async(err)=>{\n if(err) throw err;\n console.log(\"conncted to db\")\n }\n );\n\nmongoose.connection.on('connected', function(){\n console.log(connected(\"Mongoose default connection is open to \", process.env.DATA));\n});\n\nmongoose.connection.on('error', function(err){\n console.log(error(\"Mongoose default connection has occured \"+err+\" error\"));\n});\n\nmongoose.connection.on('disconnected', function(){\n console.log(disconnected(\"Mongoose default connection is disconnected\"));\n});\n\nprocess.on('SIGINT', function(){\n mongoose.connection.close(function(){\n console.log(termination(\"Mongoose default connection is disconnected due to application termination\"));\n process.exit(0)\n });\n});\n//module.exports.mongoDB = mongoose;\n", "text": "I want to join mongodb by node.js\nlocalhost with authenticated user\nI made this steps as follow\n1- run mongo from bin2- use ais_mlm db3-createUser admin with password admin4- edit config file to authorization enable5- restart mongodb server from task manager\n6- go to node.js project folder and run nodemon\nshow error cannot connect to mongodb\nhere is string url for thisDATA = “mongodb://admin:admin@localhost:27017/ais_mlm?authSource=admin”here is db.js fileplease show me what is wrong and how can connect this\nthanks.\nI wasted so many hour in this error\nI need help.", "username": "kyaw_swar_lin" }, { "code": "", "text": "lol ,noone answer, what kind of community.", "username": "kyaw_swar_lin" }, { "code": "adminais_mlmadminauthSourceadminais_mlmauthSource", "text": "Welcome to the MongoDB Community Forums @kyaw_swar_lin !I suspect the issue is that according to your steps you have created an admin user in the ais_mlm database:2- use ais_mlm db3-createUser admin with password admin… but are using the admin database as your authentication source via the authSource parameter in your connection string:DATA = “mongodb://admin:admin@localhost:27017/ais_mlm?authSource=admin”Creating the user in the admin database or using ais_mlm as the authSource value should resolve this problem.If you are still having trouble, it would be helpful to provide more information:Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Sorry bro, I would have helped you but am new to the MongoDB technology", "username": "kabonge_muhamadi" }, { "code": "mongoose.connect(\"mongodb://rootadmin:'root@123'@localhost:27017/TASK-MANAGER?authSource=admin\", {\n useNewUrlParser: true,\n useUnifiedTopology: true\n })\nmongoose.connect(\"mongodb://rootadmin:root@123@localhost:27017/TASK-MANAGER?authSource=admin\", {\n useNewUrlParser: true,\n useUnifiedTopology: true\n })\n", "text": "I’m facing the same kind of error. My condition is as follows:I create a user in the user database with\nname: “rootadmin”\npwd: “root@123”\nroles roles: [ { role: ‘userAdminAnyDatabase’, db: ‘admin’ } ],I have already created the TASK-MANAGER database.And I’m trying to connect from express js with the following setup:option 1:option 2:I tried both options to connect to DB but it threw an error:TypeError [ERR_INVALID_URL]: Invalid URL\nat new NodeError (node:internal/errors:372:5)\nat URL.onParseError (node:internal/url:553:9)\nat new URL (node:internal/url:629:5)\nat isAtlas (G:\\My Projects\\express_js\\task_manager\\node_modules\\mongoose\\lib\\helpers\\topology\\isAtlas.js:17:17)\nat MongooseServerSelectionError.assimilateError (G:\\My Projects\\express_js\\task_manager\\node_modules\\mongoose\\lib\\error\\serverSelection.js:35:35)\nat G:\\My Projects\\express_js\\task_manager\\node_modules\\mongoose\\lib\\connection.js:813:36\nat processTicksAndRejections (node:internal/process/task_queues:96:5) {\ninput: ‘123:27017’,\ncode: ‘ERR_INVALID_URL’\n}Please help me to fixed it\nThank You.", "username": "Nawaraj_Jaishi" }, { "code": "", "text": "Your password root@123 completely screw up the URI parser because the @ is used as a separator between the credentials and the host part. So it tries with the password root and the host [email protected] not put @ in your passwords.", "username": "steevej" } ]
How can connect to mongo db authentication by node.js
2021-09-17T13:21:00.091Z
How can connect to mongo db authentication by node.js
16,918
null
[ "queries", "replication", "sharding", "containers", "installation" ]
[ { "code": "", "text": "HI allNeed some help ! I am trying to install Mongodb on aws nodes (its a 4 node cluster Rhel 8.5 OS) Using K8s 1.23 Installing Mongodb is faling.\nhelm repo add bitnami from charts bitnami\nhelm install my-release bitnami/mongodb-sharded --namespace=mongodb --set global.storageClass=vxflexos-xfs --set mongodbRootPassword=mongodb123,mongodbUsername=mongodb,mongodbPassword=mongodb123,mongodbDatabase=ycsb --set volumePermissions.enabled=true --set replicaSet.enabled=true --set shards=2 --set configsvr.replicas=1 --set configsvr.persistence.size=100Gi --set mongos.replicas=1 --set shardsvr.dataNode.replicas=1 --set shardsvr.persistence.size=500Gi --set shardsvr.dataNode.resources.requests.memory=64Gi --set shardsvr.dataNode.resources.requests.cpu=16 --set configsvr.resources.requests.memory=16Gi --set mongos.resources.requests.memory=16GiUsing this command however Kubectl logs says the following :[root@Mongo-K8Master ec2-user]# kubectl logs -n mongodb my-release-mongodb-sharded-shard1-data-0\n13:22:29.35 INFO ==> Setting node as primary\nmongodb 13:22:29.38\nmongodb 13:22:29.38 Welcome to the Bitnami mongodb-sharded container\nmongodb 13:22:29.38 Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-mongodb-sharded\nmongodb 13:22:29.38 Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-mongodb-sharded/issues\nmongodb 13:22:29.38\nmongodb 13:22:29.38 INFO ==> ** Starting MongoDB Sharded setup **\nmongodb 13:22:29.42 INFO ==> Validating settings in MONGODB_* env vars…\nmkdir: cannot create directory ‘/bitnami/mongodb’: Permission denied[root@Mongo-K8Master mongobackup]# kubectl exec my-release-mongodb-sharded-shard0-data-0 -n mongodb -it – bash\nDefaulted container “mongodb” out of: mongodb, volume-permissions (init)\nI have no name!@my-release-mongodb-sharded-shard0-data-0:/$\nI have no name!@my-release-mongodb-sharded-shard0-data-0:/$ mongo admin --username root -p mongodb123 --host 10.244.3.36 --port 27017\nMongoDB shell version v5.0.8\nconnecting to: mongodb://10.244.3.36:27017/admin?compressors=disabled&gssapiServiceName=mongodb\nError: couldn’t connect to server 10.244.3.36:27017, connection attempt failed: SocketException: Error connecting to 10.244.3.36:27017 :: caused by :: Connection refused :\nconnect@src/mongo/shell/mongo.js:372:17\n@(connect):2:6\nexception: connect failed\nexiting with code 1\nI have no name!@my-release-mongodb-sharded-shard0-data-0:/$Please help me overcome this", "username": "Shilpa_Agrawal" }, { "code": "", "text": "/bitmani vs /bitnami\nIs that a typo? or that’s what you used while chown?", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Its a type my bad ! but i used the right command and it was out of the container like from the K8s root prompt to be precise", "username": "Shilpa_Agrawal" }, { "code": "", "text": "@Jason_Tran Could you help me here ? or tag someone who could I am stuck with this error badly !", "username": "Shilpa_Agrawal" }, { "code": "", "text": "Also chrisChris DellawayMongoDB Champion may be able to help you\nHe resolved many installation related issues", "username": "Ramachandra_Tummala" }, { "code": "", "text": "@chris Can you please share some inputs on the issue i am facing stuck with this from a few days now", "username": "Shilpa_Agrawal" }, { "code": "", "text": "@Ramachandra_Tummala I am looking for creating a dbuser who can have full access to a database named “ycsb”\nto do so i am using\nuse ycsb\ndb.getSiblingDB(‘admin’).createUser({ user: ‘mongodb’, pwd: ‘mongodb123’, roles: [{role: ‘userAdminAnyDatabase’, db: ‘ycsb’}] });Can you please help me do so something is wrong in above command\nuncaught exception: Error: couldn’t add user: No role named userAdminAnyDatabase@ycsb :All i need is all possible role permission for a user who is non-root and access to that databse fully.", "username": "Shilpa_Agrawal" }, { "code": "", "text": "The role userAdminAnyDatabase is in contraction with db:ycsb. Because userAdminAnyDatabase is for userAdmin on any database. While db:ycsb restrict to a single database.It look like what you want is https://www.mongodb.com/docs/manual/reference/built-in-roles/#mongodb-authrole-userAdmin.", "username": "steevej" }, { "code": "", "text": "Thanks @steevej This is helpful but i am new to this so still cant figure out for my use case\nWhat exact command should i use of creating a dbuser (say “john”) who has full access to a database name (say “HRteam” or “test” )so that when i run the test using that dbuser on that database i do not face any issues…", "username": "Shilpa_Agrawal" }, { "code": "", "text": "I usually see a click count that indicates how often a link I shared is followed by a peer user. I do not see one in my previous post. So I will have to assume you did not go read the documentation I supplied.Like I wrote userAdminAnyDatabase is wrong. What you need is userAdmin.And since you arenew to thisI recommend M103 and M150 from MongoDB Courses and Trainings | MongoDB University", "username": "steevej" }, { "code": "", "text": "@steevej I will somehow figure out, I am not sure why you dont see the click count however I did check the link, and even before posting it here I had read those options roles and permissions but for my use case I was facing issues.Anyway Thanks for the revert!", "username": "Shilpa_Agrawal" }, { "code": "", "text": "Does it work now with userAdmin rather than userAdminAnyDatabase?", "username": "steevej" } ]
Unable to install Mongodb 5.0.8 on K8s mkdir: cannot create directory '/bitnami/mongodb': Permission denied
2022-05-16T09:27:04.578Z
Unable to install Mongodb 5.0.8 on K8s mkdir: cannot create directory ‘/bitnami/mongodb’: Permission denied
7,504
https://www.mongodb.com/…971a300b2773.png
[ "aggregation", "queries", "node-js", "mongoose-odm", "compass" ]
[ { "code": "const containers = await Container.aggregate([\n {\n $match: {\n $or: [\n {\n manufacturingDate: {\n $gte: new Date(startDate),\n $lt: new Date(endDate),\n },\n },\n ],\n },\n },\n]);\nres.status(200).json(containers);\ntype or paste code here\nimport * as mongoose from \"mongoose\";\nconst ContainerType = require(\"./ContainerType\");\n\nconst Schema = mongoose.Schema;\n\nconst ContainerSchema = new mongoose.Schema({\n owner: { type: String, required: true },\n manufacturingNo: { type: String, required: true },\n IdentificationNo: { type: String, required: true },\n manufacturingDate: { type: Date, required: true },\n nextInspectionDate: { type: Date, required: true },\n});\n\nmodule.exports = mongoose.model(\"Containers\", ContainerSchema);\n", "text": "0I want to make searchDate API (I am using Express, Mongoose and Angular). I need to return list of data between two dates.Somehow it is working only if I hardcode date in $gte and $ltMy api code::router.get(“/searchDate”, async (req, res) => {\ntry {\nconst { startDate, endDate } = req.query;} catch (err) {\nres.status(404).json({ success: false, msg: “Container not found” });\n}\n});And my model is:This is how data looks like in MongoDB Compass:\n“manufacturingDate”: “2020-10-14T07:00:00.000Z”,And error in my VS Code compiler\nimage991×646 77.8 KB", "username": "ziga_setar" }, { "code": "module.exports = mongoose.model(\"Containers\", ContainerSchema);await Container.aggregate(...)", "text": "I do not know mongoose very because I stir away from such obstruction abstraction layer.You name your model Containers withmodule.exports = mongoose.model(\"Containers\", ContainerSchema);so I think you need to use Containers rather than Container inawait Container.aggregate(...)", "username": "steevej" }, { "code": "", "text": "I don’t think this has to do anything with my error. My code works fine if I write date by hand and not use $gte: startDate,", "username": "ziga_setar" }, { "code": "const gte_Date = new Date( startDate ) ;\nconst lt_Date = new Date( endDate ) ;\nconst containers = await Container.aggregate([ $gte: new Date(startDate),\n $lt: new Date(endDate),\n$gte : gte_Date ,\n$lt : lt_Date ,\n", "text": "Can you add the linesjust beforeconst containers = await Container.aggregate([and replacewithIf that still fails, I suspect you need to import or require so that you refer to same Date type as the one used by mongoose schema.By the way your $or is useless since you have a single object/condition inside its array.", "username": "steevej" } ]
Issue with querying dates with $gte and $lt
2022-06-07T16:53:22.735Z
Issue with querying dates with $gte and $lt
8,707
null
[ "mdbw22-communitycafe" ]
[ { "code": "", "text": "Discussions about Featured Artist: Dueling Pianos!", "username": "TimSantos" }, { "code": "", "text": "\nimage1920×1440 220 KB\n", "username": "TimSantos" }, { "code": "", "text": "\nimage1920×1440 313 KB\n", "username": "TimSantos" } ]
Featured Artist: Dueling Pianos!
2022-06-06T14:05:02.841Z
Featured Artist: Dueling Pianos!
2,857
null
[ "mdbw22-communitycafe" ]
[ { "code": "", "text": "An interactive Q&A with Rachelle Palmer, Lead Product Manager for Developer Experience, who will provide an overview of the many programming language communities that MongoDB works with.", "username": "TimSantos" }, { "code": "", "text": "\nimage1920×1440 167 KB\nJoin @AngieB and @Rachelle - “State of the MongoDB Developer Community” starting soon!", "username": "Jason_Tran" } ]
State of the MongoDB Developer Community
2022-06-06T14:03:06.231Z
State of the MongoDB Developer Community
2,708
null
[ "mdbw22-communitycafe" ]
[ { "code": "", "text": "AMA with John Page. Who thinks a full stack engineer starts with finding rocks and ends with human psychology experiments.", "username": "TimSantos" }, { "code": "", "text": "Getting started now! \nimage1920×1440 236 KB\n", "username": "TimSantos" }, { "code": "", "text": "All the interesting government classif*** database stories with the mad resident @John_Page\nImage from iOS (5)1920×1440 171 KB\n–\nImage from iOS (6)1920×1440 234 KB\n", "username": "Harshit" } ]
AMA with John Page, Resident Mad Professor
2022-06-06T14:02:41.781Z
AMA with John Page, Resident Mad Professor
2,781
null
[ "node-js", "crud", "typescript" ]
[ { "code": "const list = numberJoined < capacity ? \"joined\" : \"waitingList\";\nconst counterList = list === \"joined\" ? \"waitingList\" : \"joined\";\n\nconst event = await db?.collection(\"events-jordan\").findOneAndUpdate(\n { _id: new ObjectId(eventId) },\n {\n $addToSet: {\n [list]: userId,\n },\n $pull: {\n [counterList]: userId,\n },\n },\n { returnDocument: \"after\" }\n )\n", "text": "Dear Mongoers,\nI need help with some weird TS error I get only when using findOneAndUpdate in Node JS. This sometimes works, sometimes not! The situation is about event management. If a user joins the event, they can either book a spot, or join the waiting list:I am using Node JS with TS. TS sometimes complains about this:I get the following error from TS which I am unable to solve No overload matches this call.\nOverload 2 of 4, ‘(filter: Filter, update: UpdateFilter, options: FindOneAndUpdateOptions): Promise<…> | undefined’, gave the following error.\nType ‘{ [x: string]: string; }’ is not assignable to type ‘SetFields’.\nType ‘{ [x: string]: string; }’ is not assignable to type ‘NotAcceptedFields<Document, readonly any[] | undefined>’.\n‘string’ index signatures are incompatible.\nType ‘string’ is not assignable to type ‘undefined’.\nOverload 2 of 4, ‘(filter: Filter, update: UpdateFilter, options: FindOneAndUpdateOptions): Promise<…> | undefined’, gave the following error.\nType ‘{ [x: string]: string; }’ is not assignable to type ‘PullOperator’.\nType ‘{ [x: string]: string; }’ is not assignable to type ‘{ readonly [x: string]: Partial | { [x: string]: FilterOperators | undefined; } | FilterOperators | undefined; }’.\n‘string’ index signatures are incompatible.\nType ‘string’ is not assignable to type ‘Partial | { [x: string]: FilterOperators | undefined; } | FilterOperators | undefined’.ts(2769)\nmongodb.d.ts(5770, 5): The expected type comes from property ‘$addToSet’ which is declared here on type ‘UpdateFilter’\nmongodb.d.ts(5772, 5): The expected type comes from property ‘$pull’ which is declared here on type ‘UpdateFilter’Any help is very much appreciated.\nThank you ", "username": "Osama_Jamal" }, { "code": "", "text": "Type ‘{ [x: string]: string; }’ is not assignable to type ‘{ readonly [x: string]: Partial | { [x: string]: FilterOperators | undefined; } | FilterOperators | undefined; }’.The above seems to indicate that your variable userId is the culprit. How is it declared?", "username": "steevej" } ]
Typescript error is driving me crazy!
2022-06-04T11:43:43.000Z
Typescript error is driving me crazy!
11,051
null
[ "mdbw22-communitycafe" ]
[ { "code": "", "text": "In today’s highly connected world, more data is being generated at the edge than on-premises and cloud combined. How do we securely manage this data? Industrial companies are struggling to get value out of their IT & OT data leaving unmeasurable value on the table while walking into compliance nightmares. Join this session with MongoDB and Hitachi experts to learn how to create secured industrial data operations using Lumada DataOps and MongoDB Atlas.", "username": "TimSantos" }, { "code": "", "text": "Starting in a few!\nimage1920×1440 138 KB\n", "username": "TimSantos" }, { "code": "", "text": "\nimage1920×1440 269 KB\n", "username": "Jason_Tran" }, { "code": "", "text": "\nimage1920×1440 365 KB\n\n\nimage1920×1440 187 KB\n", "username": "TimSantos" }, { "code": "", "text": "\nHitachi DataOps3463×1713 461 KB\n", "username": "wan" } ]
Partner Showcase: Secure your IT/OT data with Industrial DataOps
2022-06-06T14:01:45.952Z
Partner Showcase: Secure your IT/OT data with Industrial DataOps
3,140
null
[ "aggregation", "node-js", "production", "change-streams", "field-encryption" ]
[ { "code": "npm install –save @mongodb-js/zstdmongodb://host:port/db?compressors=zstdmaxConnectingmaxConnectingconst client = new MongoClient('MONGODB_URL', { maxConnecting: 5 });\nshowExpandedEventsshowExpandedEventscreateIndexesdropIndexesmodifycreateshardCollectionshowExpandedEventsreshardCollectionrefineCollectionShardKeyconst client = new MongoClient('MONGODB_URL');\nawait client.connect();\n\nconst collection = client.db('example-db').collection('example-collection');\nconst changeStream = collection.watch([], { showExpandedEvents: true });\nchangeStreamPreAndPostImagesconst collection = await db.createCollection(‘collectionName’, { changeStreamPreAndPostImages: { enabled: true }} )\nconst changeStream = collection.watch([], { fullDocumentBeforeChange: ‘required’ })\nestimatedDocumentCountcountaggregatecollStatscountinitializeOrderedBulkOpinitializeUnorderedBulkOpconst client = new MongoClient('MONGODB_URL');\n// No need to connect anymore! (see above)\nconst collection = await client.db(‘example-db’).createCollection(‘example-collection’, { \n key: _id,\n unique: true\n});\nMongoClientconst client = new MongoClient(uri, {\n autoEncryption: {\n keyVaultNamespace: 'encryption.__keyVault',\n kmsProviders: {\n local: { key: 'localKey' }\n },\n extraOptions: {\n cryptSharedLibPath: \"/path/to/mongo_crypt_v1.dylib\",\n },\n encryptedFieldsMap: {\n \"default.secretCollection\": {\n [\n {\n keyId: '_id',\n \tpath: 'ssn',\n \tbsonType: 'string',\n \tqueries: { queryType: 'equality' }\n }\n ]\n },\n },\n },\n})\n\n\n\n@expiremental", "text": "The MongoDB Node.js team is pleased to announce version 4.7.0 of the mongodb package! Happy MongoDB World Day!zstd compression is now supported by the NodeJS driver. To enable zstd compression, add it as a dependency in your project: npm install –save @mongodb-js/zstd. The add the option to your URI options: mongodb://host:port/db?compressors=zstd.The Node driver has improved connection storm avoidance by limiting the number of connections that the driver will attempt to open to each server at a time. The number of concurrent connection attempts can be configured with a new MongoClient argument, maxConnecting. The following code example creates a new MongoClient that configures maxConnecting to 5.The collection.watch function now supports a new option, showExpandedEvents. When showExpandedEvents is enabled, change streams will report the following events on servers 6.0 and later:On servers 6.1.0 and later, showExpandedEvents will also show change stream events for the following commands:As an example, the following code creates a change stream that has expanded events enabled on a collection:Change streams now support pre and post images for update events. To enable pre and post images, the collection must be created with the changeStreamPreAndPostImages option enabled:Pre and post images can then be enabled on the change stream when the change stream is created:See the documentation on pre and post images for more information: https://www.mongodb.com/docs/v6.0/changeStreams/#change-streams-with-document-pre--and-post-images.The driver now only processes the most recent server monitoring event if multiple heartbeat events are recorded in sequence before any can be processed. In serverless environments, this results in increased performance when a function is invoked after a period of inactivity as well as lower resource consumption.The 5.0 server compatible release unintentionally broke the estimatedDocumentCount command on views by changing the implementation from the count command to aggregate and a collStats stage. This release fixes estimatedDocumentCount on views by reverting the implementation to use count.Due to an oversight, the count command was omitted from the Stable API in server versions 5.0.0 - 5.0.8 and 5.1.0 - 5.3.1, so users of the Stable API with estimatedDocumentCount are recommended to upgrade their MongoDB clusters to 5.0.9 or 5.3.2 (if on Atlas) or set apiStrict: false when constructing their MongoClients.If an operation is run before MongoClient.connect is called by the client, the driver will now automatically connect along with that first operation. This makes the repl experience much more streamlined, going right from client construction to your first insert or find. However, MongoClient.connect can still be called manually and remains useful for learning about misconfiguration (auth, server not started, connection string correctness) early in your application’s startup.Note: It’s a known limitation that explicit sessions (client.startSession) and initializeOrderedBulkOp, initializeUnorderedBulkOp cannot be used until MongoClient.connect is first called. Look forward to a future patch release that will correct these inconsistencies.Clustered Collections can now be created when creating a collection in the Node driver:More information about clustered indexes can be found on the official documentation page. https://www.mongodb.com/docs/upcoming/core/clustered-collections/To enable the driver to use the new Automatic Encryption Shared Library instead of using mongocryptd, pass the location of the library in the auto-encryption extra options to the MongoClient. Example:Queryable Encryption is a beta feature that enables you to encrypt data in your application before you send it over the network to MongoDB while still maintaining the ability to query the encrypted data. With Queryable Encryption enabled, no MongoDB-managed service has access to your data in an unencrypted form.Checkout the documentation: https://www.mongodb.com/docs/upcoming/core/queryable-encryption/queryable-encryption/ATTENTION: This feature is included in this release as a beta preview. All related APIs marked with @expiremental in the documentation. There are no guarantees that the APIs will not undergo breaking changes without prior notice.Features:Bug FixesWe invite you to try the mongodb library immediately, and report any issues to the NODE project.", "username": "neal" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB Node.js Driver v4.7.0 Released
2022-06-07T14:13:40.386Z
MongoDB Node.js Driver v4.7.0 Released
3,381
null
[ "connecting", "containers", "devops" ]
[ { "code": "mongo --ssl --sslCAFile /etc/mongodb/ssl/mongoCA.crt --host rs0/mongo1:27017,mongo2:27017,mongo3:27017 --sslPEMKeyFile /etc/mongodb/ssl/mongo_client.pem\n\nversion: '3'\nnetworks:\n netBackEnd:\n ipam:\n driver: default\n config:\n - subnet: 192.168.0.0/24\nservices:\n api:\n hostname: api\n build: .\n ports:\n - 8000:8000\n - 8001:8001\n - 8500:8500\n depends_on:\n - mongo1\n - mongo2\n - mongo3\n volumes:\n - \"/etc/mongodb/ssl/client_ip.pem:/data/client_ip.pem:ro\"\n - \"/etc/mongodb/ssl/mongoCA.crt:/data/mongoCA.crt:ro\"\n networks:\n netBackEnd:\n mongo1:\n hostname: mongo1\n container_name: mongo1\n image: mongo:4.2-bionic\n expose:\n - 27017\n ports:\n - 27011:27017\n restart: always\n volumes:\n - \"/etc/mongodb/ssl/mongo1.pem:/data/mongo1.pem:ro\"\n - \"/etc/mongodb/ssl/mongoCA.crt:/data/mongoCA.crt:ro\"\n - \"/usr/local/mongo-volume1:/data/db\"\n entrypoint: ['/usr/bin/mongod', '--replSet', 'rs0', '--sslMode', 'requireSSL', '--clusterAuthMode', 'x509', '--sslClusterFile', '/data/mongo1.pem', '--sslPEMKeyFile', '/data/mongo1.pem', '--sslCAFile', '/data/mongoCA.crt', '--bind_ip', '0.0.0.0']\n networks:\n netBackEnd:\n ipv4_address: 192.168.0.2\n\n mongo2:\n hostname: mongo2\n container_name: mongo2\n image: mongo:4.2-bionic\n expose:\n - 27017\n ports:\n - 27012:27017\n restart: always\n volumes:\n - \"/etc/mongodb/ssl/mongo2.pem:/data/mongo2.pem:ro\"\n - \"/etc/mongodb/ssl/mongoCA.crt:/data/mongoCA.crt:ro\"\n - \"/usr/local/mongo-volume2:/data/db\"\n entrypoint: ['/usr/bin/mongod', '--replSet', 'rs0', '--sslMode', 'requireSSL', '--clusterAuthMode', 'x509', '--sslClusterFile', '/data/mongo2.pem', '--sslPEMKeyFile', '/data/mongo2.pem', '--sslCAFile', '/data/mongoCA.crt', '--bind_ip', '0.0.0.0']\n networks:\n netBackEnd:\n ipv4_address: 192.168.0.3\n\n mongo3:\n hostname: mongo3\n container_name: mongo3\n image: mongo:4.2-bionic\n expose:\n - 27017\n ports:\n - 27013:27017\n restart: always\n volumes:\n - \"/etc/mongodb/ssl/mongo3.pem:/data/mongo3.pem:ro\"\n - \"/etc/mongodb/ssl/mongoCA.crt:/data/mongoCA.crt:ro\"\n - \"/usr/local/mongo-volume3:/data/db\"\n entrypoint: ['/usr/bin/mongod', '--replSet', 'rs0', '--sslMode', 'requireSSL', '--clusterAuthMode', 'x509', '--sslClusterFile', '/data/mongo3.pem', '--sslPEMKeyFile', '/data/mongo3.pem', '--sslCAFile', '/data/mongoCA.crt', '--bind_ip', '0.0.0.0']\n networks:\n netBackEnd:\n ipv4_address: 192.168.0.5\n\n37075e728a2f mongo:4.2-bionic \"/usr/bin/mongod --r…\" 2 hours ago Up About an hour 0.0.0.0:27012->27017/tcp mongo2\n45a84da16c56 mongo:4.2-bionic \"/usr/bin/mongod --r…\" 2 hours ago Up About an hour 0.0.0.0:27011->27017/tcp mongo1\n3615e7b08bf7 mongo:4.2-bionic \"/usr/bin/mongod --r…\" 2 hours ago Up About an hour 0.0.0.0:27013->27017/tcp mongo3\n", "text": "I am running my mongod replica set instances on docker with ssl/tsl security enabled.\nI can connect from mongo sheel and mongocxx on the machine where my docker is running using below connection string.the problem is I couldn’t connect to mongod running on docker from anothe machine.\nBoth of my machines are on same network.I tried to bind_ip to 0.0.0.0 in my docker file but didn’t work for me.\nI also tried this link from mongodb for configuring firewall but still I couldn’t establish connection from my remote machine.does any one what am i doing wrong?\nBelow is my docker filebelow is the docker ps outputThank you", "username": "Anusha_Reddy" }, { "code": "mongodrs0/127.0.0.1:27012,127.0.0.1:27013,127.0.0.1:27014", "text": "Hi @Anusha_Reddy and welcome in the MongoBD Community !I think you have a port issue here.\nInside your containers, mongod runs on port 27017 and you are mapping these ports on your host to 27012, 27013 and 27014 which is fine because you can’t have 3 services running on the same port.\nSo you can’t expose 27017 for each containers.\nWhen you connect to this cluster, you should use the exposed ports so 27012, 27013 and 27014 with the IP address of your host.\nI’m already surprised your host can resolve “mongo1”.\nFrom your host, you should use rs0/127.0.0.1:27012,127.0.0.1:27013,127.0.0.1:27014 I think.I hope this helps.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "With replicaset discovery this will not work as the hosts and ports will not match the replicaset configuration.You will be able to connect to an individual node. Each member and client must be able to resolve and connect to the members as defined in the replicaset configuration.The way I do it is:\nPlease note this is only applicable for testing/development as running 3 replicas on the same host is of no production benefit(ha,redendancy) you may as well run one mongod.", "username": "chris" }, { "code": "127.0.0.1\tlocalhost\n127.0.1.1\txxxxxx-ThinkPad-X270\n192.168.0.2\tmongo1\n192.168.0.3\tmongo2\n192.168.0.5\tmongo3 \n2021-01-10T14:24:08.424+0800 E NETWORK [ReplicaSetMonitor-TaskExecutor] The server certificate does not match the host name. Hostname: 127.0.0.1 does not match CN: mongo1\n\nrs0/127.0.0.1:27012,127.0.0.1:27013,127.0.0.1:27014 connecting to: mongodb://127.0.0.1:27011,127.0.0.1:27012,127.0.0.1:27013/?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0\n{\"t\":{\"$date\":\"2021-01-10T06:37:01.902Z\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4333208, \"ctx\":\"ReplicaSetMonitor-TaskExecutor\",\"msg\":\"RSM host selection timeout\",\"attr\":{\"replicaSet\":\"rs0\",\"error\":\"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \\\"nearest\\\" } for set rs0\"}}\nError: connect failed to replica set rs0/127.0.0.1:27011,127.0.0.1:27012,127.0.0.1:27013 :\n\n", "text": "Hi @MaBeuLux88, thanks for the reply.\nI’m able to connect to ‘mongo1’ or ‘mongo2’ because i added them as hosts in /etc/hosts/ file like belowone more problem with connecting using localhost is ssl/tls certificates, I can not connect using localhost as I needed to sign my certificates with hostnames like ‘mongo1’. I get the below error.without using ssl/tls certificate, I can connect using rs0/127.0.0.1:27012,127.0.0.1:27013,127.0.0.1:27014 from the machine where my docker is running but not from remote client.When I connect from remote client, I get the below errorI am not sure, what went wrong? from the host where my docker is running, it’s working fine.Thank you", "username": "Anusha_Reddy" }, { "code": "config={\"_id\":\"rs0\",\"members\":[{\"_id\":0,\"host\":\"mongo1:27017\",\"priority\":2},{\"_id\":1,\"host\":\"mongo2:27017\"},{\"_id\":2,\"host\":\"mongo3:27017\"}]}\n\nconnecting to: mongodb://127.0.10.1:27017/?compressors=disabled&gssapiServiceName=mongodb&replicaSet=rs0\n{\"t\":{\"$date\":\"2021-01-10T07:24:31.441Z\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4333208, \"ctx\":\"ReplicaSetMonitor-TaskExecutor\",\"msg\":\"RSM host selection timeout\",\"attr\":{\"replicaSet\":\"rs0\",\"error\":\"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \\\"nearest\\\" } for set rs0\"}}\nError: connect failed to replica set rs0/127.0.10.1:27017 :\nconnect@src/mongo/shell/mongo.js:374:17\n\n", "text": "Hi Chris, Thanks for the reply.May i see you replica set initialization? Do we need to update /etc/hosts file for each client where we try to connect to replica set? I did update my hosts file but still I couldn’t connect from remote client.In my case, I do the followingI followed your compose file, everything is working fine from the machine where my docker is running. I still get the same error from remote client.I see that you didn’t use any ip_binding in your compose file, how does it work for you from remote clients? according MongoDB documentation we should either use 0.0.0.0 or ip_address or host-names of client right?Please correct me if i am wrong.I get the following error when connecting from remote clientI completely started with new configuration, was deleted previous /data/db folder and my mongo volumes just to make sure if it wasn’t related to any configuration problem.Thanks", "username": "Anusha_Reddy" }, { "code": "", "text": "Hi Anusha Reddy,Can you share the result of below query.\n1.) rs.conf()\n2.)rs.status()Thanks", "username": "BM_Sharma" }, { "code": "{\n\t\"_id\" : \"rs0\",\n\t\"version\" : 1,\n\t\"protocolVersion\" : NumberLong(1),\n\t\"writeConcernMajorityJournalDefault\" : true,\n\t\"members\" : [\n\t\t{\n\t\t\t\"_id\" : 0,\n\t\t\t\"host\" : \"mongo1:27017\",\n\t\t\t\"arbiterOnly\" : false,\n\t\t\t\"buildIndexes\" : true,\n\t\t\t\"hidden\" : false,\n\t\t\t\"priority\" : 2,\n\t\t\t\"tags\" : {\n\t\t\t\t\n\t\t\t},\n\t\t\t\"slaveDelay\" : NumberLong(0),\n\t\t\t\"votes\" : 1\n\t\t},\n\t\t{\n\t\t\t\"_id\" : 1,\n\t\t\t\"host\" : \"mongo2:27017\",\n\t\t\t\"arbiterOnly\" : false,\n\t\t\t\"buildIndexes\" : true,\n\t\t\t\"hidden\" : false,\n\t\t\t\"priority\" : 1,\n\t\t\t\"tags\" : {\n\t\t\t\t\n\t\t\t},\n\t\t\t\"slaveDelay\" : NumberLong(0),\n\t\t\t\"votes\" : 1\n\t\t},\n\t\t{\n\t\t\t\"_id\" : 2,\n\t\t\t\"host\" : \"mongo3:27017\",\n\t\t\t\"arbiterOnly\" : false,\n\t\t\t\"buildIndexes\" : true,\n\t\t\t\"hidden\" : false,\n\t\t\t\"priority\" : 1,\n\t\t\t\"tags\" : {\n\t\t\t\t\n\t\t\t},\n\t\t\t\"slaveDelay\" : NumberLong(0),\n\t\t\t\"votes\" : 1\n\t\t}\n\t],\n\t\"settings\" : {\n\t\t\"chainingAllowed\" : true,\n\t\t\"heartbeatIntervalMillis\" : 2000,\n\t\t\"heartbeatTimeoutSecs\" : 10,\n\t\t\"electionTimeoutMillis\" : 10000,\n\t\t\"catchUpTimeoutMillis\" : -1,\n\t\t\"catchUpTakeoverDelayMillis\" : 30000,\n\t\t\"getLastErrorModes\" : {\n\t\t\t\n\t\t},\n\t\t\"getLastErrorDefaults\" : {\n\t\t\t\"w\" : 1,\n\t\t\t\"wtimeout\" : 0\n\t\t},\n\t\t\"replicaSetId\" : ObjectId(\"5ffbb4ad5ae5b9142aa2a80c\")\n\t}\n}\n{\n\t\"set\" : \"rs0\",\n\t\"date\" : ISODate(\"2021-01-11T05:31:45.411Z\"),\n\t\"myState\" : 1,\n\t\"term\" : NumberLong(9),\n\t\"syncingTo\" : \"\",\n\t\"syncSourceHost\" : \"\",\n\t\"syncSourceId\" : -1,\n\t\"heartbeatIntervalMillis\" : NumberLong(2000),\n\t\"majorityVoteCount\" : 2,\n\t\"writeMajorityCount\" : 2,\n\t\"optimes\" : {\n\t\t\"lastCommittedOpTime\" : {\n\t\t\t\"ts\" : Timestamp(1610343104, 1),\n\t\t\t\"t\" : NumberLong(9)\n\t\t},\n\t\t\"lastCommittedWallTime\" : ISODate(\"2021-01-11T05:31:44.731Z\"),\n\t\t\"readConcernMajorityOpTime\" : {\n\t\t\t\"ts\" : Timestamp(1610343104, 1),\n\t\t\t\"t\" : NumberLong(9)\n\t\t},\n\t\t\"readConcernMajorityWallTime\" : ISODate(\"2021-01-11T05:31:44.731Z\"),\n\t\t\"appliedOpTime\" : {\n\t\t\t\"ts\" : Timestamp(1610343104, 1),\n\t\t\t\"t\" : NumberLong(9)\n\t\t},\n\t\t\"durableOpTime\" : {\n\t\t\t\"ts\" : Timestamp(1610343104, 1),\n\t\t\t\"t\" : NumberLong(9)\n\t\t},\n\t\t\"lastAppliedWallTime\" : ISODate(\"2021-01-11T05:31:44.731Z\"),\n\t\t\"lastDurableWallTime\" : ISODate(\"2021-01-11T05:31:44.731Z\")\n\t},\n\t\"lastStableRecoveryTimestamp\" : Timestamp(1610343054, 1),\n\t\"lastStableCheckpointTimestamp\" : Timestamp(1610343054, 1),\n\t\"electionCandidateMetrics\" : {\n\t\t\"lastElectionReason\" : \"priorityTakeover\",\n\t\t\"lastElectionDate\" : ISODate(\"2021-01-11T05:30:24.715Z\"),\n\t\t\"electionTerm\" : NumberLong(9),\n\t\t\"lastCommittedOpTimeAtElection\" : {\n\t\t\t\"ts\" : Timestamp(1610343023, 1),\n\t\t\t\"t\" : NumberLong(8)\n\t\t},\n\t\t\"lastSeenOpTimeAtElection\" : {\n\t\t\t\"ts\" : Timestamp(1610343023, 1),\n\t\t\t\"t\" : NumberLong(8)\n\t\t},\n\t\t\"numVotesNeeded\" : 2,\n\t\t\"priorityAtElection\" : 2,\n\t\t\"electionTimeoutMillis\" : NumberLong(10000),\n\t\t\"priorPrimaryMemberId\" : 2,\n\t\t\"numCatchUpOps\" : NumberLong(0),\n\t\t\"newTermStartDate\" : ISODate(\"2021-01-11T05:30:24.727Z\"),\n\t\t\"wMajorityWriteAvailabilityDate\" : ISODate(\"2021-01-11T05:30:25.727Z\")\n\t},\n\t\"electionParticipantMetrics\" : {\n\t\t\"votedForCandidate\" : true,\n\t\t\"electionTerm\" : NumberLong(8),\n\t\t\"lastVoteDate\" : ISODate(\"2021-01-11T05:30:13.358Z\"),\n\t\t\"electionCandidateMemberId\" : 2,\n\t\t\"voteReason\" : \"\",\n\t\t\"lastAppliedOpTimeAtElection\" : {\n\t\t\t\"ts\" : Timestamp(1610342959, 1),\n\t\t\t\"t\" : NumberLong(7)\n\t\t},\n\t\t\"maxAppliedOpTimeInSet\" : {\n\t\t\t\"ts\" : Timestamp(1610342959, 1),\n\t\t\t\"t\" : NumberLong(7)\n\t\t},\n\t\t\"priorityAtElection\" : 2\n\t},\n\t\"members\" : [\n\t\t{\n\t\t\t\"_id\" : 0,\n\t\t\t\"name\" : \"mongo1:27017\",\n\t\t\t\"health\" : 1,\n\t\t\t\"state\" : 1,\n\t\t\t\"stateStr\" : \"PRIMARY\",\n\t\t\t\"uptime\" : 103,\n\t\t\t\"optime\" : {\n\t\t\t\t\"ts\" : Timestamp(1610343104, 1),\n\t\t\t\t\"t\" : NumberLong(9)\n\t\t\t},\n\t\t\t\"optimeDate\" : ISODate(\"2021-01-11T05:31:44Z\"),\n\t\t\t\"syncingTo\" : \"\",\n\t\t\t\"syncSourceHost\" : \"\",\n\t\t\t\"syncSourceId\" : -1,\n\t\t\t\"infoMessage\" : \"\",\n\t\t\t\"electionTime\" : Timestamp(1610343024, 1),\n\t\t\t\"electionDate\" : ISODate(\"2021-01-11T05:30:24Z\"),\n\t\t\t\"configVersion\" : 1,\n\t\t\t\"self\" : true,\n\t\t\t\"lastHeartbeatMessage\" : \"\"\n\t\t},\n\t\t{\n\t\t\t\"_id\" : 1,\n\t\t\t\"name\" : \"mongo2:27017\",\n\t\t\t\"health\" : 1,\n\t\t\t\"state\" : 2,\n\t\t\t\"stateStr\" : \"SECONDARY\",\n\t\t\t\"uptime\" : 101,\n\t\t\t\"optime\" : {\n\t\t\t\t\"ts\" : Timestamp(1610343104, 1),\n\t\t\t\t\"t\" : NumberLong(9)\n\t\t\t},\n\t\t\t\"optimeDurable\" : {\n\t\t\t\t\"ts\" : Timestamp(1610343094, 1),\n\t\t\t\t\"t\" : NumberLong(9)\n\t\t\t},\n\t\t\t\"optimeDate\" : ISODate(\"2021-01-11T05:31:44Z\"),\n\t\t\t\"optimeDurableDate\" : ISODate(\"2021-01-11T05:31:34Z\"),\n\t\t\t\"lastHeartbeat\" : ISODate(\"2021-01-11T05:31:44.747Z\"),\n\t\t\t\"lastHeartbeatRecv\" : ISODate(\"2021-01-11T05:31:44.777Z\"),\n\t\t\t\"pingMs\" : NumberLong(0),\n\t\t\t\"lastHeartbeatMessage\" : \"\",\n\t\t\t\"syncingTo\" : \"mongo3:27017\",\n\t\t\t\"syncSourceHost\" : \"mongo3:27017\",\n\t\t\t\"syncSourceId\" : 2,\n\t\t\t\"infoMessage\" : \"\",\n\t\t\t\"configVersion\" : 1\n\t\t},\n\t\t{\n\t\t\t\"_id\" : 2,\n\t\t\t\"name\" : \"mongo3:27017\",\n\t\t\t\"health\" : 1,\n\t\t\t\"state\" : 2,\n\t\t\t\"stateStr\" : \"SECONDARY\",\n\t\t\t\"uptime\" : 101,\n\t\t\t\"optime\" : {\n\t\t\t\t\"ts\" : Timestamp(1610343104, 1),\n\t\t\t\t\"t\" : NumberLong(9)\n\t\t\t},\n\t\t\t\"optimeDurable\" : {\n\t\t\t\t\"ts\" : Timestamp(1610343104, 1),\n\t\t\t\t\"t\" : NumberLong(9)\n\t\t\t},\n\t\t\t\"optimeDate\" : ISODate(\"2021-01-11T05:31:44Z\"),\n\t\t\t\"optimeDurableDate\" : ISODate(\"2021-01-11T05:31:44Z\"),\n\t\t\t\"lastHeartbeat\" : ISODate(\"2021-01-11T05:31:44.747Z\"),\n\t\t\t\"lastHeartbeatRecv\" : ISODate(\"2021-01-11T05:31:43.886Z\"),\n\t\t\t\"pingMs\" : NumberLong(0),\n\t\t\t\"lastHeartbeatMessage\" : \"\",\n\t\t\t\"syncingTo\" : \"mongo1:27017\",\n\t\t\t\"syncSourceHost\" : \"mongo1:27017\",\n\t\t\t\"syncSourceId\" : 0,\n\t\t\t\"infoMessage\" : \"\",\n\t\t\t\"configVersion\" : 1\n\t\t}\n\t],\n\t\"ok\" : 1,\n\t\"$clusterTime\" : {\n\t\t\"clusterTime\" : Timestamp(1610343104, 1),\n\t\t\"signature\" : {\n\t\t\t\"hash\" : BinData(0,\"AAAAAAAAAAAAAAAAAAAAAAAAAAA=\"),\n\t\t\t\"keyId\" : NumberLong(0)\n\t\t}\n\t},\n\t\"operationTime\" : Timestamp(1610343104, 1)\n}\n\n", "text": "Hi @BM_Sharma ,", "username": "Anusha_Reddy" }, { "code": "version: '3.9'\n\nservices:\n mongo-0-a:\n image: mongo:4.4\n ports:\n - 27017:27017\n volumes:\n - mongo-0-a:/data/db\n restart: unless-stopped\n command: \"--wiredTigerCacheSizeGB 0.25 --replSet rs0\"\n\n mongo-0-b:\n image: mongo:4.4\n ports:\n - 27117:27017\n volumes:\n - mongo-0-b:/data/db\n restart: unless-stopped\n command: \"--wiredTigerCacheSizeGB 0.25 --replSet rs0\"\n\n mongo-0-c:\n image: mongo:4.4\n ports:\n - 27217:27017\n volumes:\n - mongo-0-c:/data/db\n restart: unless-stopped\n command: \"--wiredTigerCacheSizeGB 0.25 --replSet rs0\"\nvolumes:\n mongo-0-a:\n mongo-0-b:\n mongo-0-c:\nrs.initiate(\n { \n \"_id\" : \"rs0\",\n \"members\" : [\n { \n \"_id\" : 0,\n \"host\" : \"this-laptop.domain:27017\"\n },\n { \n \"_id\" : 1,\n \"host\" : \"this-laptop.domain:27117\"\n },\n { \n \"_id\" : 2,\n \"host\" : \"this-laptop.domain:27217\"\n }\n ]\n }\n)\n", "text": "@Anusha_Reddy\nSorry I set you wrong. I read you were trying to connect to 127.0.0.1 and did the mental leap that was all you were trying to do… despite the topic.With that in mind and you still want to run a replicaset on one host…Note: You will need to layer TLS and Authentication in.\nI would mount another volume on each container for a configuration mount for Keys and a mongod.confmongo “mongodb://this-laptop.domain:27217/admin?replicaSet=rs0” --quiet\nWelcome to the MongoDB shell.\nFor interactive help, type “help”.\nFor more comprehensive documentation, see\nhttps://docs.mongodb.com/\nQuestions? Try the MongoDB Developer Community Forums\nMongoDB Developer Community Forums - A place to discover, learn, and grow with MongoDB technologies\nrs0:PRIMARY> db.isMaster().hosts\n[\n“this-laptop.domain:27017”,\n“this-laptop.domain:27117”,\n“this-laptop.domain:27217”", "username": "chris" }, { "code": " extra_hosts:\n - \"test-domain:mongo1\"\n127.0.0.1\tlocalhost\n127.0.1.1\txxxx-ThinkPad-X270\n#127.0.0.1 mongo1 mongo2 mongo3\n192.168.0.2\tmongo1\n192.168.0.3\tmongo2\n192.168.0.4\tmongo3\n{\n\t\"operationTime\" : Timestamp(0, 0),\n\t\"ok\" : 0,\n\t\"errmsg\" : \"No host described in new configuration 1 for replica set rs0 maps to this node\",\n\t\"code\" : 93,\n\t\"codeName\" : \"InvalidReplicaSetConfig\",\n\t\"$clusterTime\" : {\n\t\t\"clusterTime\" : Timestamp(0, 0),\n\t\t\"signature\" : {\n\t\t\t\"hash\" : BinData(0,\"AAAAAAAAAAAAAAAAAAAAAAAAAAA=\"),\n\t\t\t\"keyId\" : NumberLong(0)\n\t\t}\n\t}\n}\n", "text": "Hi Chris,Thanks for the reply.can you let me know bit more about alias/cname to the docker host ?as I already have host-name in my docker file ‘mongo1’, I have tried adding one more entry to mongo1 in my /etc/hosts file and extra_hosts in my docker file and using aliases in networks.with the above syntax i get error during initialization.sorry, this is bit confusing for me. It would be great if you have docker file or an etc/hosts file showing how to add an alias name.Thank you", "username": "Anusha_Reddy" }, { "code": "", "text": "Hi chris,thanks for pointing out the most important factor of replica-set redundancy.We are planning to use this for production. But how do i run mongod on different docker containers on different machines and join them as replica set.I know this is bit out of context to my question, but do you have any starting point where i can know about replica set initialization when running mongod on different hosts.I can only find on internet mostly the way I do it.Thank you.", "username": "Anusha_Reddy" }, { "code": "", "text": "I know this is bit out of context to my question, but do you have any starting point where i can know about replica set initialization when running mongod on different hosts.M103: Basic Cluster Administration is a great place to start.We are planning to use this for production. But how do i run mongod on different docker containers on different machines and join them as replica set.Discover our MongoDB Database Management courses and begin improving your CV with MongoDB certificates. Start training with MongoDB University for free today.", "username": "chris" }, { "code": "", "text": "Thanks, I will look into it.Can you let me know what i was doing wrong for the alias name for the docker host in other reply? I couldn’t get that right.Thank you .", "username": "Anusha_Reddy" }, { "code": "mongod.confreplication:\n replSetName: prod\nnet:\n bindIp: 127.0.0.1\nbindIpmkdir -p ~/mongodb-prod/{config,db}\ncd ~/mongodb-prod\nvim config/mongod.conf # hack your config file\ndocker run -d -p 27017:27017 -h $(hostname) --name mongo -v /home/polux/mongodb-prod/config/:/etc/mongo -v /home/polux/mongodb-prod/db/:/data/db mongo:4.4.3 --config /etc/mongo/mongod.conf\nrs.initiate({your-conf})# connect with\ndocker exec -it mongo mongo\n\n# initiate with\nrs.initiate({\n _id: \"prod\",\n members: [\n { _id: 0, host: \"host1:27017\" },\n { _id: 1, host: \"host2:27017\" },\n { _id: 2, host: \"host3:27017\" }]});\n", "text": "I gave it some thoughts and here is something to get you started.Here is my mongod.conf file which you will definitely want to expend on:You will most probably do something for the logs, add some authentification mechanism and you will also need to bindIp all the IP addresses that will be able to access this cluster, starting with the other nodes in that same cluster.Once you have done the same thing on your X servers with X>=3 for a production environment, you can then connect to one of the node and rs.initiate({your-conf}) with something like:Then your cluster should be ready to work.\nLike I said, don’t follow what I just said to the letter ─ but I think it’s a good starting point at least.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "mongo --ssl --sslCAFile /etc/mongodb/ssl/testing.ca.crt --host rs0/<host-ip-address>:27011,<host-ip-address>:27012,<host-ip-address>:27013 --sslPEMKeyFile /etc/mongodb/ssl/client.pem --authenticationDatabase '$external' --authenticationMechanism 'MONGODB-X509'\n\nmongo --ssl --sslCAFile /etc/mongodb/ssl/testing.ca.crt --host rs0/<host-ip-address>:27011,<host-ip-address>:27012,<host-ip-address>:27013 --sslPEMKeyFile /etc/mongodb/ssl/remote_client.pem --authenticationDatabase '$external' --authenticationMechanism 'MONGODB-X509'\n\n{\"t\":{\"$date\":\"2021-01-19T08:14:08.803Z\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4333208, \"ctx\":\"ReplicaSetMonitor-TaskExecutor\",\"msg\":\"RSM host selection timeout\",\"attr\":{\"replicaSet\":\"rs0\",\"error\":\"FailedToSatisfyReadPreference: Could not find host matching read preference { mode: \\\"nearest\\\" } for set rs0\"}}\nError: connect failed to replica set rs0/<host-ip-address>:27011,<host-ip-address>:27012,<host-ip-address>:27013 :\nconnect@src/mongo/shell/mongo.js:374:17\n@(connect):3:6\nmongo --ssl --sslCAFile /etc/mongodb/ssl/testing.ca.crt --host <host-ip-address>:27011 --sslPEMKeyFile /etc/mongodb/ssl/remote_client.pem --authenticationDatabase '$external' --authenticationMechanism 'MONGODB-X509'\n", "text": "Hi Chris,After spending some time and trying different methods for connecting remote mongodb instance using nginx reverse proxy mechanism or setting bind_ip to 0.0.0.0 and enabling ssl/tls protection. I found out the exact problem.Below command running on the host where all mongod running on docker, which is working fine.Below command is from remote client, which doesn’t workBut surprisingly, I can connect to individual host from remote client but not to replica set. Below command works.I see that, in your previous answer, you also connect to single host. but why is it like that? if i connect to single host, if that host fails, I wouldn’t get replica set benefits right?Do you have any idea why is this happening.", "username": "Anusha_Reddy" }, { "code": "", "text": "I think it was already mentioned by someone above: Because MongoDB is configured in replica set mode it has different communication, replica set uses discovery mechanism for the set, the\nmember name in the configuration of the set and the host in the\nconnection must match. You need to do some kind of tricks with DNS to make this, e.g. host files or split-horizon DNS.", "username": "Yuki_N_A" }, { "code": "", "text": "", "username": "Stennie_X" } ]
Configure remote access to mongodb running on docker with replica set on ubuntu
2021-01-08T10:27:50.359Z
Configure remote access to mongodb running on docker with replica set on ubuntu
52,119
null
[ "mdbw22-communitycafe" ]
[ { "code": "", "text": "Come test your smarts with general MongoDB and #LifeAtMongoDB trivia for bragging rights!", "username": "TimSantos" }, { "code": "", "text": "Ready to start !\nCommunity Cafe starting up1920×1440 171 KB\n", "username": "wan" }, { "code": "", "text": "\nimage1920×1440 257 KB\n\n\nimage1920×2560 508 KB\n\n\nimage1920×2560 401 KB\n", "username": "TimSantos" }, { "code": "", "text": "Congrats to Adrian Kurt for winning a 20 min virtual chat with Mark Porter!\nimage1920×2560 356 KB\n", "username": "TimSantos" } ]
#LifeAtMongoDB Trivia
2022-06-06T13:59:14.923Z
#LifeAtMongoDB Trivia
3,186
null
[ "aggregation", "queries" ]
[ { "code": "", "text": "Hello.I have this type of objects in my collection and I need to be able to group them by attributes. (which is nested array).\nThe problem is the attributes array when unwinded I loose sight of what are neighbouring attributes. So I atm don’t have a good ideas on how to count totals. Any ideas or suggestions how I should transform collection in the first place.and I need to be able to get this output from agregation pipeline.", "username": "kscerbiakas" }, { "code": "", "text": "I am pretty sure you may apply the same solution as", "username": "steevej" }, { "code": "", "text": "You might be able to do something similar to the other question, but what’s not clear to me is where are the numbers in the final document you want going to come from? What is the total of?", "username": "Asya_Kamsky" }, { "code": "", "text": "Hey, thanks for reply. The numbers are totals of values present in the collection across the arrays", "username": "kscerbiakas" }, { "code": "", "text": "There are a few things not clear in your requirement.", "username": "steevej" } ]
How to query nested array of objects and group & count by one of its properties? Tips?
2022-05-29T07:41:50.005Z
How to query nested array of objects and group &amp; count by one of its properties? Tips?
5,372
null
[]
[ { "code": "", "text": "Hi we have a very large collection ~900m documents with an average size of 13.6k. Documents live for around a week then need to be deleted, initially we had a capped collection but we didn’t get good enough performance so we tried having a daemon to monitor the number of records in the DB and delete any over a threshold value. This worked for a year or so but the steadily increasing volume we are receiving means this approach is now failing. Investigating the deletes I was surprised to see how slow they are. While inserts are maxing about 15k/s deletes are only 2k/s and often less.Some typical values from the logs are\nRemoved 79073933 documents in 794.03105mins\nRemoved 92086070 documents in 662.8366666666667minsFor context we receive about 300m documents/day that are stored in mongo.The collection has only 4 single-field indexes in addition to id and the actual delete is performed by javascript loaded into the Mongo shell. The delete logic finds the oldest timestamp in the collection then calculates timestamp2 by stepping forward in time by x minutes until there are enough records where timestamp <= timestamp2 to delete. The actual deletion was by timestamp (indexed field) but we got better performance by using the ID of the latest record we want to delete then executing\ncollection.deleteMany({ “_id”: { $lte: id } });There is only the one server, no replica sets. Thankfully the data is not mission-critical and only stored on a ‘best effort’ basis, but lately that best effort is not good enough.Any thoughts gratefully received.", "username": "RSommer" }, { "code": "", "text": "Hi @RSommer and welcome in the MongoDB Community !So just to be clear, you are running a 900m*13.6k = 12,24 TB collection on a single standalone node with about 300m/24/60/60 = 3472 writes per seconds in average. Is that correct?If that’s correct, 12,24 TB is HUGE. Usually we recommend to start Sharding around 2 TB of uncompressed data. We are 6 times over that value.Can you share more information about the server specs that you are using to support this? I think you just reached the limits of physic and it’s time to shard and distribute the data on multiple nodes.Also, I guess you have tried and this approach and it’s probably not working for you, but did you try the TTL indexes?Oh and also, is this a single node Replica Set or just a stand alone node without an oplog?Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "Hi @MaBeuLux88\nYes correct on the storage side but the writes are very variable much higher during market hours and tailing off steeply at night, it peaks at ~20K inserts and is rarely under 7k during the day.I did set up a sharded cluster a couple of years ago but throughput was disappointing, I can revisit it, I may have done something wrong. I wasn’t aware of the 2TB guideline.TTL indexes we didn’t try yet I thought that might be too hard to manage being time based so a flood of data on a particularly busy day would not get deleted promptly.Standalone machine, no oplog, when I joined the project we had dial node replication and capped collections but we had to remove replication as the volume grew. If we are at the limits of vertical scaling then it’s probably time to revisit shards but should we really expect such a huge discrepancy between insert and delete?the machine spec is\n$lscpu\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nCPU(s): 80\nOn-line CPU(s) list: 0-79\nThread(s) per core: 2\nCore(s) per socket: 20\nSocket(s): 2\nNUMA node(s): 2\nVendor ID: GenuineIntel\nCPU family: 6\nModel: 79\nModel name: Intel(R) Xeon(R) CPU E5-2698 v4 @ 2.20GHz\nStepping: 1\nCPU MHz: 2200.000\nCPU max MHz: 2200.0000\nCPU min MHz: 1200.0000\nBogoMIPS: 4394.81\nVirtualization: VT-x\nL1d cache: 32K\nL1i cache: 32K\nL2 cache: 256K\nL3 cache: 51200K\nNUMA node0 CPU(s): 0-19,40-59\nNUMA node1 CPU(s): 20-39,60-79with 64Gb installedCheers", "username": "RSommer" }, { "code": "", "text": "I suspect a lot of my answer is what you expect to hear from a MongoDB employee - but it’s also what you are going to hear from any expert.(a) Depending on the write concern you are using running a Standalone is not only incredibly dangerous but also potentially a lot slower then running a replica set (A single node replica set only has to journal the oplog not the data) - a 3 node (The minimum required) replica set keeps your data safe.\n(b) 12TB is far too large for a single instance , when you are inserting your writes are batched and written to the disk during a checkpoint mostly sequentially. When deleting it’s quite possible there is a bunch of random access, seeks and the need to read those blocks from disk back into cache to delete them - all of which will be much much slower than inserting.If you want to speed things up here are some options.Make your data a lot smaller (Have you optimised the schema and usage well?)\nEnsure your indexes and ideally all data you edit (including deletes) fits in RAM - this is hard but you need either sharding or much larger servers. 64GB isn’t a very large server these days.Consider not deleting and inserting (depending how you are using the data) but instead overwriting old documents with new ones retaining only _id , you woudl do this based on date and add upsert in case you have to create a new document.Partition your collections by age - drop old whole collections to get rid of older documents - use $unionWith when querying to query multiple collections at once (or move to cloud and use the datalake)This si the sort of thing professional services sort out for companies but there is no getting away from the fact hosting 12+TB of data isn’t cheap.", "username": "John_Page" }, { "code": "", "text": "Thanks John, yes using a standalone is not ideal, we used to have replication but the performance lagged behind what we needed, cutting back to the bare minimum has been the only way we’ve been able to keep up (until now) and as we are only storing data on a best effort basis we’ve been able to put up with the inconvenience. Thanks for the info on the deletes, that could well be what we see, the deletes happen at the start of the collection and the inserts at the end so I can imagine theres a lot of paging from disk happening.Theres not really an option to reduce the data, we are at the mercy of what is being sent to us, we can’t control the json.$unionWith sounds very interesting, I wondered about keeping a collection a day and just dropping a day at a time but I thought my queries would suffer, I’ll certainly look into this.Looks like it’s time to shard, I’ll order more memory for the servers and resurrect my sharding experiments.", "username": "RSommer" } ]
Document deletes very slow, much slower than inserts
2022-05-25T15:06:30.474Z
Document deletes very slow, much slower than inserts
7,888
null
[ "compass" ]
[ { "code": "{\n \"bands\": [{\n \"activity\": \"Split-up\",\n \"bandMembers\": [\"https://www.metal-archives.com/artists/Jon_Powlowski/760544\", \"https://www.metal-archives.com/artists/Ruben_Martinez/760545\", \"https://www.metal-archives.com/artists/Greg_Eickmier/416646\", \"https://www.metal-archives.com/artists/Nedwob/471955\"],\n \"bandName\": \"A // Solution\",\n \"country\": \"United States\",\n \"dateAdded\": \"2018-08-04\",\n \"genre\": \"Crust Punk/Thrash Metal\",\n \"label\": {\n \"labelName\": \"Voltic Records\",\n \"labelUrl\": \"https://www.metal-archives.com/labels/Voltic_Records/47794\"\n },\n \"location\": \"California\",\n \"lyricalThemes\": \"N/A\",\n \"releases\": [{\n \"numReviews\": 0,\n \"releaseName\": \"Butterfly\",\n \"reviewAverage\": null,\n \"type\": \"EP\",\n \"url\": \"https://www.metal-archives.com/albums/A_--_Solution/Butterfly/723154\",\n \"year\": \"1989\"\n }, {\n \"numReviews\": 0,\n \"releaseName\": \"Things to Come\",\n \"reviewAverage\": null,\n \"type\": \"EP\",\n \"url\": \"https://www.metal-archives.com/albums/A_--_Solution/Things_to_Come/723155\",\n \"year\": \"1995\"\n }\n ],\n \"similarArtists\": null,\n \"url\": \"https://www.metal-archives.com/bands/A_--_Solution/3540442600\",\n \"yearFormed\": \"N/A\",\n \"yearsActive\": \"N/A\"\n }, {\n \"activity\": \"Active\",\nUnexpected token : in JSON at position 0 while parsing near ': null,\n", "text": "I am on Windows 10. I recently obtained a large JSON file (200 MB) via webscraping, and I am now trying to import the file to MongoDB using Compass Community via the import data button. However, whenever I try to import the file, I get the following error:Unexpected token l in JSON at position 0 while parsing near 'lHere are the first few lines of the JSON file I am trying to import:Does anyone have an idea on how I can fix this error?EDIT: I just got this error after restarting compass:Is this error related at all to the other one?", "username": "Guy_Berreby" }, { "code": "", "text": "Can you post the whole JSON file here or a link to it so we can download it and test it?", "username": "Massimiliano_Marcon" }, { "code": "", "text": "Here is a link to the dropbox", "username": "Guy_Berreby" }, { "code": "", "text": "Hello,\nI am having the similar problem. I try to import Json file using MongoDB Compass, and getting this error:\nimage734×827 29.9 KB\nI tried several json files, but it never worked out. Also, importing file through Studio 3T does not work.Any idea or solution for this issue?Json files are here:\nforge-rcdb.nodejs/resources/db at master · Autodesk-Forge/forge-rcdb.nodejs · GitHub", "username": "Mete_Boncukcu" }, { "code": " \"_id\": {\n \"$oid\": \"626c18a3d880647a888888ff\"\n }\n\"_id\" : ObjectId( \"626c18a3d880647a888888ff\" )\n", "text": "Object(…) is not pure JSON.Who ever produced that JSON file did not do it correctly. If you try to export a collection, from Compass for example, you will see that the object id is coded asrather thanYou would need to modify you files to use the correct format.", "username": "steevej" } ]
I am getting an error when trying to import a JSON file to MongoDB via Compass
2020-05-29T01:49:13.976Z
I am getting an error when trying to import a JSON file to MongoDB via Compass
10,550
null
[ "connecting", "golang" ]
[ { "code": "", "text": "I’m using golang and mongo-go-driver\nI’m pretty confident that the code has no problem for it is pretty simple test example.\nbut suspecting something could be done with connection url here:client, err := mongo.NewClient(options.Client().ApplyURI(“mongodb+srv://myid:[email protected]/Databases?retryWrites=true&w=majority”))I’ve got peer connection between GCP VPC to Atlas.\nhave 0.0.0.0/0 for IP access list.\nbut I’m gettingserver selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: firstcluster-shard-00-00-pri.ubx7y.mongodb.net:27017, Type: Unknown, Average RTT: 0, Last error: connection() error occured during connection handshake: dial tcp 192.168.208.2:27017: i/o timeout }, { Addr: firstcluster-shard-00-01-pri.ubx7y.mongodb.net:27017, Type: Unknown, Average RTT: 0, Last error: connection() error occured during connection handshake: dial tcp 192.168.208.3:27017: i/o timeout }, { Addr: firstcluster-shard-00-02-pri.ubx7y.mongodb.net:27017, Type: Unknown, Average RTT: 0, Last error: connection() error occured during connection handshake: dial tcp 192.168.208.4:27017: i/o timeout }, ] }I have no idea what causes the problem…", "username": "_Toby_Kim" }, { "code": "", "text": "Hi @_Toby_Kim,Welcome to MongoDB community!The connection string you are using is for Private network peeringHave you established one with your gcp host who runs and use it? The 0.0.0.0 whitelist won’t work with this connection string and you need to use a standard connection from connect tab.Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "After many hours of breaking my head, I found the solution. Thank you.", "username": "Shrimanikandan_Ananthavaidhyanathan" }, { "code": "", "text": "Hi,What was the solution for you?", "username": "Marcel_Heijmans" } ]
GCP connection error
2020-11-13T10:41:57.757Z
GCP connection error
8,875
https://www.mongodb.com/…4_2_1024x512.png
[ "dot-net", "production" ]
[ { "code": "Support for MongoDB server version 6.0.0\n[BETA] Support for Queryable Encryption\nSupport for creating collections with clustered indexes\nUse count command for estimatedDocumentCount\nLINQ3 bug fixes and improvements\nSupport for carry forward ($locf) and numerical/date densification ($densify)\nSupport for limit in $filter expressions\nSupport point-in-time pre-/post-images in change streams\nPreemptively cancel in-progress operations when SDAM heartbeats timeout\n", "text": "This is the general availability release for the 2.16.0 version of the driver.The main new features in 2.16.0 include:EstimatedDocumentCount and Stable APIEstimatedDocumentCount is implemented using the count server command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, the count command, which EstimatedDocumentCount uses in its implementation, was not included in v1 of the Stable API. If you are using the Stable API with EstimatedDocumentCount, you must upgrade to server version 5.0.9+ or set strict: false when configuring ServerApi to avoid encountering errors.For more information about the Stable API see:https://mongodb.github.io/mongo-csharp-driver/2.16/reference/driver/stable_api/The full list of JIRA issues resolved in this release is available at:https://jira.mongodb.org/issues/?jql=project%20%3D%20CSHARP%20AND%20fixVersion%20%3D%202.16.0%20ORDER%20BY%20key%20ASCDocumentation on the .NET driver can be found at:", "username": "Dmitry_Lukyanov" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
.NET Driver 2.16.0 Released
2022-06-07T00:05:33.694Z
.NET Driver 2.16.0 Released
2,007
https://www.mongodb.com/…4_2_1024x595.png
[]
[ { "code": "WARNING in ./node_modules/bson/dist/bson.browser.esm.js 2196:26-55\nModule not found: Error: Can't resolve 'crypto' in '/Users/sourabh/Work/expengo/node_modules/bson/dist'\n\nBREAKING CHANGE: webpack < 5 used to include polyfills for node.js core modules by default.\nThis is no longer the case. Verify if you need this module and configure a polyfill for it.\n\nIf you want to include a polyfill, you need to:\n - add a fallback 'resolve.fallback: { \"crypto\": require.resolve(\"crypto-browserify\") }'\n - install 'crypto-browserify'\nIf you don't want to include a polyfill, you can use an empty module like this:\n resolve.fallback: { \"crypto\": false }\n", "text": "While following the steps mentioned in the tutorial:Learn how to perform Authentication, Authorization, CRUD operations, Analytics & Web App Deployment by building an Expense Manager in…\nReading time: 6 min read\nFaced the following issue when trying to start the React.js Development server:\nScreenshot 2022-06-05 at 18.20.531280×800 116 KB\n", "username": "ayomide_wilfred" }, { "code": "", "text": "Hi @ayomide_wilfred, welcome to the community. \nThank you for pointing this out, since that console message is a warning and NOT an error it won’t break your React app, therefore you should be able to continue with the rest of the tutorial without any issues.\nJust open the browser and visit localhost:3000 and you will be able to see all your components as expected.\nHowever, if you want to resolve the issue stated in the warning, please follow the instructions mentioned in the following post:If you have any doubts, please feel free to reach out to us.Thanks and Regards.\nSourabh Bagrecha,\nMongoDB", "username": "SourabhBagrecha" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Realm-Web warning: Include polyfill for crypto-browserify
2022-06-05T18:24:53.761Z
Realm-Web warning: Include polyfill for crypto-browserify
4,230
https://www.mongodb.com/…9_2_953x1024.png
[ "node-js", "transactions", "typescript" ]
[ { "code": "await session.endSession();endSession()withTransactiontransactionResults withTransactionwithTransactioncatch(e)iftransactionResults", "text": "I’m trying to implement the solution described here, in Node.js typescript.There are 2 things that I would like to have clarified:Thanks so much if you can help explain the above, and then we can maybe talk about fixing the types I inserted a refrence screenshot here:\nScreen Shot 2021-04-21 at 09.50.471150×1235 155 KB\n", "username": "Alex_Bjorlig" }, { "code": "", "text": "Hi @Lauren_Schaefer - sorry for using the mention feature - but would love your answers on the above questions ", "username": "Alex_Bjorlig" }, { "code": " if (transactionResults) {\n console.log(\"The reservation was successfully created.\");\n // This code will be reached if the transaction succeeded\n } else {\n console.log(\"The transaction was intentionally aborted.\");\n // This code will be reached if session.abortTransaction() was called in the transaction\n }\n} catch (e) {\n console.log(\"The transaction was aborted due to an unexpected error: \" + e);\n // This code will be reached if something besides an intentional session.abortTransaction() causes the transaction to be aborted\n", "text": "Hey Alex,Happy to hear you’re trying out the tutorial, but it’s a bummer it’s not working for you! I don’t know Typescript, so, unfortunately, I can’t personally offer much help there. Let me reach out to the Node drivers team, and see if we can get some answers on your first two questions.Here are the docs for endSession: Class: ClientSessionHere are the docs for withTransaction: Class: ClientSessionFor question 3 - Let me see if I can describe it in a different way:", "username": "Lauren_Schaefer" }, { "code": "withTransactionthrow new Errror('I was aborted')", "text": "Ok, it might be a typings issue, because according to typescript withTransaction is void . I actually found this commit in the types repo, where there seems to be a types fix for both the return value and await (question 1 + 2).Thanks for explaining question 3. So if we do throw new Errror('I was aborted') on the else case in your reply → all errors will then be handled in the catch?This is pretty important to me, because when we utilize transactions, I’m mostly interested in knowing if it suceeded or not ", "username": "Alex_Bjorlig" }, { "code": "", "text": "Yes, if you throw an Error in the else { }, the catch() { } will catch it. So then any transaction that was intentionally aborted using abortTransaction() or aborted due to an unexpected error will be caught by the catch() {}.", "username": "Lauren_Schaefer" }, { "code": "withTransactionundefined", "text": "Hi Alex,Thanks for taking a close look at our transaction tutorial. I see there’s a bit of confusion between the community types and our actual functionality. Luckily the examples from the article are accurate as Lauren has clarified withTransaction returns a command result if the transaction was successful and returns nothing or undefined if the transaction was intentionally aborted.I’ve created a ticket for us (NODE-3278) to clarify the return types and add some more descriptive notes to these transaction functions in our documentation.Something to look forward to is our next major release, coming soon, is written in Typescript and we hope that it will offer high quality and accurate type information. It’s not yet production ready yet but you can check out the beta here if that is of interest.", "username": "neal" }, { "code": "", "text": "Hi @neal.I can’t emphasize how important the change to typescript is. We are using a bunch of libraries in our application, and it’s always a big relief when typings are officially supported. It’s the first entry-point of documentation Thanks for sharing the links, and our team is looking very much forward to the release written in Typescript with accurate type information ", "username": "Alex_Bjorlig" }, { "code": "session.withTransaction", "text": "Just updated to v4 → but I think the typings are still off?If I don’t give any type arguments to session.withTransaction it still shows void as returnType, but that is not correct(!)The function does actually return a document with ok, clusterTime etc.", "username": "Alex_Bjorlig" }, { "code": "@types/mongodbwithTransaction<T = void>(fn: WithTransactionCallback<T>, options?: TransactionOptions): ReturnType<typeof fn>;\n// and the callback type is:\ntype WithTransactionCallback<T = void> = (session: ClientSession) => Promise<T>;\nReturnType<typeof fn>withTransactionwithTransaction", "text": "Hey @Alex_Bjorlig, what is the exact version number of the driver you have installed? And have you removed the @types/mongodb package from your project?In 4.0.0 the type definition was updated to be:So your callback should be forced to return a promise and then the ReturnType<typeof fn> should extract the return type of withTransaction from the function you pass in to withTransaction. Is there an issue with typescript knowing the type that your callback returns potentially?", "username": "neal" }, { "code": "4.1.0@types/mongodbwithTransactionwithTransactionwithTransactionwithTransaction", "text": "I use 4.1.0.\nI also removed the @types/mongodb.I think the types still need some work for the withTransaction;", "username": "Alex_Bjorlig" }, { "code": "", "text": "Hi @neal - let’s get this thing demystified. Do you have any answers for the 3 points - or do you wan’t me to help do some digging?/Alex", "username": "Alex_Bjorlig" }, { "code": "withTransation{\n ok: 1,\n '$clusterTime': {\n clusterTime: new Timestamp(117, 1630709522),\n signature: {\n hash: new Binary(Buffer.from(\"0000000000000000000000000000000000000000\", \"hex\"), 0),\n keyId: 0\n }\n },\n operationTime: new Timestamp(116, 1630709522)\n}\n", "text": "So currently, the return type of withTransation is something like the following:", "username": "Alex_Bjorlig" }, { "code": "", "text": "Heyyy… was there any clarification given to this? Did this ever get updated/changed?", "username": "Maxwell_Krause" }, { "code": "", "text": "According to this JIRA ticket, the issue is fixed in 4.7.0", "username": "Alex_Bjorlig" } ]
Transaction blog-post mistakes - in Node.js
2021-04-21T07:52:15.308Z
Transaction blog-post mistakes - in Node.js
7,783
null
[ "database-tools", "backup" ]
[ { "code": "", "text": "Hi, I have this problem. I need to migrate an old database that was running in an old computer. The computer is broken, but I have a backup of it. I need to migrate this database to a new computer. The thing is I can’t do mongodump because the old computer is broken and the old database is not anymore active there. Is there any other way that I can migrate this old datenbank from the back up that I have to a new computer. Thank You !!", "username": "Marvi_Karroci" }, { "code": "", "text": "Hi @Marvi_Karroci welcome to the community!I think as long as you can salvage the old database’s dbpath, you might be able to run it on a new server using the same MongoDB version as the one you’re using in the old server.Having said that, I don’t think this is a question specific to MongoDB, but rather, it’s a general question about restoring from a backup. In order to obtain a copy of the dbpath from the backup, please refer to the backup software’s instructions.Best regards\nKevin", "username": "kevinadi" } ]
Migrate database
2022-06-05T23:59:17.961Z
Migrate database
1,507
null
[ "crud" ]
[ { "code": "ElectricBilll.updateMany(\n {\n company:{$eq:idCompany}\n }, \n {\n $set:\n {\n required_kvar : \"$kw_base\" // this not work\n }\n });\n", "text": "Hello mongodb teamI need to do a bulk update, I need to pass the value of one field to another and multiply it by a factor.I have read the other questions but I did not see an answer to the problem that I expose.Is this possible?This is a very simple code:Thanks", "username": "Victor_Faudoa" }, { "code": "", "text": "The update operator $set is to set the value of a field. If you want tomultiplyyou need $mul.", "username": "steevej" } ]
Update field using value of another field
2022-06-06T22:08:17.829Z
Update field using value of another field
2,295
null
[ "c-driver", "beta" ]
[ { "code": "", "text": "Announcing 1.22.0-beta0 of libbson and libmongoc, the libraries constituting the MongoDB C Driver.Bug fixes:Bug fixes:Improvements:Features:Notes:Thanks to everyone who contributed to this release.", "username": "Kevin_Albertson" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB C Driver 1.22.0-beta0 Released
2022-06-06T13:46:43.995Z
MongoDB C Driver 1.22.0-beta0 Released
2,336
null
[ "node-js", "atlas-device-sync", "react-js" ]
[ { "code": "", "text": "I am working on MERN stack project. Right now I am getting data from one game and storing that data into Mongodb. also displaying that data onto React page. But i need to display realtime data when player join to game data will be stored in database. to see that data refresh page in React. I need to show player real time data when joins game how can i do that?", "username": "Chirag_Patel" }, { "code": "", "text": "Hello @Chirag_Patel ,Welcome to the community!! If I understand the question correctly, you need a way to show realtime data when someone joins the game. Is this correct? When a player joins, where is the data being pushed into? Is it in database?Change streams allow applications to access real-time data changes without the complexity and risk of tailing the oplog. Applications can use change streams to subscribe to all data changes on a single collection, a database, or an entire deployment, and immediately react to them. Because change streams use the aggregation framework, applications can also filter for specific changes or transform the notifications at will.To achieve this, you can take a look at Change Streams Tutorial.Thanks,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
I have one query regarding real time data update
2022-05-13T05:03:42.635Z
I have one query regarding real time data update
4,783
https://www.mongodb.com/…6bacd457692a.png
[ "aggregation", "queries", "atlas-search" ]
[ { "code": "await Collection.aggregate([\n {\n \"$search\": {\n \"index\": \"search\",\n compound: {\n should: [\n {\n autocomplete: {\n query: `${req.query.query}`,\n path: 'name',\n \"fuzzy\": {\n \"maxEdits\": 2,\n \"prefixLength\": 3\n }\n },\n },\n {\n autocomplete: {\n query: `${req.query.query}`,\n path: 'contract',\n \"fuzzy\": {\n \"maxEdits\": 2,\n \"prefixLength\": 3\n }\n },\n },\n ],\n },\n },\n },\n {\n $limit: 15\n }\n ])\n", "text": "Hello, I have the following query while using the search index:The problem is that the results are not very accurate, for example:\n\nCaptura de Tela 2022-06-06 às 12.43.52842×718 73.1 KB\nWhat I wrote is almost exactly the collection name of “BoredGoats” that is showing me as the second result.What can I do to solve this problem? To have more accurate results.", "username": "foco_radiante" }, { "code": "", "text": "Hey there, @foco_radiante – can you share your index definition?One idea is to boost exact matches – there is an explanation of this here and example here.", "username": "Elle_Shwer" }, { "code": "{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"contract\": [\n {\n \"foldDiacritics\": false,\n \"maxGrams\": 7,\n \"minGrams\": 3,\n \"tokenization\": \"edgeGram\",\n \"type\": \"autocomplete\"\n }\n ],\n \"name\": [\n {\n \"foldDiacritics\": false,\n \"maxGrams\": 7,\n \"minGrams\": 3,\n \"tokenization\": \"edgeGram\",\n \"type\": \"autocomplete\"\n }\n ]\n }\n }\n}\n", "text": "", "username": "foco_radiante" } ]
I need more accurate search index results
2022-06-06T15:45:12.655Z
I need more accurate search index results
2,252
null
[ "swift" ]
[ { "code": "", "text": "Hello everyone,\nI am working on a iOS application where i am loading data from mongodb server using @ObservedObject in swift language.Issue is when i delete my app and run again, mostly times it get no data from mongodb server and it started to get after some time.Can anyone tell me why this happening and how it can be solved", "username": "Apeksha_Saraf" }, { "code": "", "text": "@Apeksha_Saraf It would be best to post your question in the Realm and Device Sync forums for a better response -Discussions about developing applications with MongoDB Atlas App Services and Realm, including Realm SDKs, Atlas Device Sync, Atlas Data API, Atlas GraphQL API, and Atlas Triggers.Likely if you check the logs, both on the client and the server there will be some hint as to what is happening", "username": "Ian_Ward" } ]
MongoDB Data load issue in Application
2022-06-06T10:47:15.656Z
MongoDB Data load issue in Application
1,521
null
[ "aggregation", "data-modeling", "charts" ]
[ { "code": "name:\ndata: [\n {date: (ISO)\n numberCorrect: 8\n numberTotal: 10}\n {date: (ISO)\n numberCorrect: 9\n numberTotal: 10}\n {date: (ISO)\n numberCorrect: 4\n numberTotal: 5}\n]\n", "text": "I have a document set up like this:I want to create a chart that will show changes over time to the numberCorrect/numberTotal field. So, in this example, a line graph where the first entry is at 80% for that date, then the next date is 90%, then the next date is 80% again.Can MongoDB charts do this or do I need to structure the data in a different way? I need to keep the # correct and # total, I can’t store as a straight percentage because sometimes totals over a period of time will also need to be calculated (in this example the actual average is 84%, but if I stored as percentages the average would calculate to 90%.)How can I display the data in the way? Thank you!", "username": "Amber_Butler" }, { "code": "[ { $unwind: '$data' } ]\nnumberCorrect / numberTotal", "text": "Hi @Amber_Butler -You should be able to do this. To get the percentage you need to use a calculated field, but the challenge here is that the values are within an array. Rather than use the array reductions on the encoding cards, you probably need to unwind the array in the query bar, i.e. using a query like:This will result in one document per array element, so you should then be able to add a calculated field for numberCorrect / numberTotal and have it work as expected.Tom", "username": "tomhollander" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Integrating charts with data inside an array
2022-06-06T13:44:03.648Z
Integrating charts with data inside an array
2,807
null
[ "replication" ]
[ { "code": "", "text": "Hi! I’m plannig to deploy a replica set of 3 nodes on different machines (different ips)It’s necesary to open (expose) the mongodb port on the firewall before connecting the nodes? anyone knows how to do that correctly?thanks\nAndres", "username": "andres_cozme" }, { "code": "", "text": "Hi @andres_cozme,Members of your replica set will need to be able to communicate with each other on the configured hostname/IPs and ports.Firewall configuration will depend on your O/S and network. There are some example configurations for Linux and Windows in the MongoDB manual: Network Hardening - Firewalls.I recommend reviewing the MongoDB Security Checklist for more available security measures.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Hi Stennie, thankks for que quick reply.The installer opens the default port(27017) on the firewall when it install for the first time? Or it’s neccesary to open it mannually?thanks", "username": "andres_cozme" } ]
Ports on replca set
2022-06-06T12:27:17.415Z
Ports on replca set
3,241
null
[ "installation" ]
[ { "code": "", "text": "I have been having trouble installing MongoDB on Ubuntu 20.04 (x86_64). When I run “sudo nano /var/lib/dpkg/status” , the MongoDB database tools package states:Package: mongodb-database-tools\nStatus: install ok not-installed\nPriority: optional\nSection: database\nArchitecture: amd64Is there any way I can correct this error and also how do i correctly install and utilize MongoDB on my OS after I have fixed the problem. All articles online lead me to the same errors that forced me to uninstall MongoDB several times after multiple installations. Thanks.", "username": "I-Ben" }, { "code": "", "text": "Hi @I-Ben and welcome to the forums,All articles online lead me to the same errors that forced me to uninstall MongoDB several times after multiple installations.Would you be able to share which installation instructions did you follow ?how do i correctly install and utilize MongoDB on my OSPlease see Install MongoDB on Ubuntu to install the current stable version of MongoDB 4.4 Community Edition.If you’re still encountering an issue please provide the installation steps that you tried and any error messages while performing the instructions.Regards,\nWan.", "username": "wan" }, { "code": "sudo apt-get install -y mongodb-org\nReading package lists... Done\nBuilding dependency tree... Done\nReading state information... Done\nSome packages could not be installed. This may mean that you have\nrequested an impossible situation or if you are using the unstable\ndistribution that some required packages have not yet been created\nor been moved out of Incoming.\nThe following information may help to resolve the situation:\n\nThe following packages have unmet dependencies:\n mongodb-org-mongos : Depends: libssl1.1 (>= 1.1.1) but it is not installable\n mongodb-org-server : Depends: libssl1.1 (>= 1.1.1) but it is not installable\n mongodb-org-shell : Depends: libssl1.1 (>= 1.1.1) but it is not installable\nE: Unable to correct problems, you have held broken packages.\n\n", "text": "Hello everyone,\nI’m facing some similar issues while installing MongoDB in Ubuntu 20.04, the path I’m following to install it is according to the official docs.After running the above command I’m facing the following errors:I found an probable solution for this, but don’t know if it is Safe or not.\naskubuntuThanks!", "username": "Raven_Soni" }, { "code": "", "text": "Hi @Raven_Soni.Looks like you have a similar error messages mentioned on: Installing mongodb over Ubuntu 22.04 - #6 by Jack_WoehrRegards,\nWan.", "username": "wan" } ]
MongoDB Installation Difficulties - Ubuntu 20.04
2021-06-28T04:08:29.586Z
MongoDB Installation Difficulties - Ubuntu 20.04
7,418
null
[ "database-tools" ]
[ { "code": "", "text": "Hello! I want to load a CSV file to MongoDB Atlas.How to do this?", "username": "Tudor_Ursu" }, { "code": "", "text": "I would use the command line utility mongimport (connection example. available in the Atlas UI for each under the Command Line Tools tab under the cluster). More detail here https://docs.mongodb.com/manual/reference/program/mongoimport/", "username": "Andrew_Davidson" }, { "code": "mongoimport --uri mongodb+srv://tudor_15:<PASSWORD>@cluster0.acapz.mongodb.net/<DATABASE> --collection <COLLECTION> --type <FILETYPE> --file <FILENAME> \n", "text": "I use this import string from Command Line Tools bar:Here I put my password, database name, collection name, CSV file type and my file name.I run the string in command line, and it throws me the next error:The syntax of the command is incorrect.How can I get rid of this error and import the CSV?", "username": "Tudor_Ursu" }, { "code": "", "text": "Can you paste in your exact command line you are trying to use (except the password)?", "username": "Robert_Walters" }, { "code": "mongoimport --uri mongodb+srv://tudor_15:<PASSWORD>@cluster0.acapz.mongodb.net/<Database1> --collection <Invoice_Train> --type <CSV> --file <invoice_train.csv>", "text": "Sure, this is the line I’m trying to use:", "username": "Tudor_Ursu" }, { "code": "mongoimport --uri mongodb+srv://tudor_15:[email protected]/Database1 --collection Invoice_Train --type CSV --file invoice_train.csv\n", "text": "remove the < > bracketssomething like:", "username": "Robert_Walters" }, { "code": "", "text": "I ran this line of code, without the < > brackets, but I got an error again:The system cannot find the file specified.This is strange, because in command line I am in the file’s folder. I also tried to use the absolute path, but nothing changed.How could I get rid of this error?", "username": "Tudor_Ursu" }, { "code": "", "text": "What is your OS?\nIf it is Windows check how the file was saved\nIt may be having different extension", "username": "Ramachandra_Tummala" }, { "code": "", "text": "It’s Windows 10. The file is saved “invoice_train.csv”.These are the files I use:Client’s billing history", "username": "Tudor_Ursu" }, { "code": "", "text": "Please show the exact file you are trying to load from your system by dir command and show few lines from your fileThe sample .csv files from your link appear to be in excel formatcsv is comma separatedI tried to load a sample .csv file with your command on my Windows machine.It worked fine", "username": "Ramachandra_Tummala" }, { "code": "05/11/2021 01:03 PM <DIR> .\n05/11/2021 01:03 PM <DIR> ..\n08/24/2020 12:30 PM 2,253,632 client_test.csv\n08/24/2020 12:30 PM 5,986,133 client_train.csv\n05/30/2021 12:35 PM 146,577,388 invoice_test.csv\n05/24/2021 08:29 PM 344,346,841 invoice_train.csv\n05/11/2021 01:04 PM 1,105,521,210 invoice_train.json\n08/24/2020 12:30 PM 2,153,008 SampleSubmission (2).csv\n 6 File(s) 1,606,838,212 bytes\n 2 Dir(s) 224,879,640,576 bytes free\nclient_id,invoice_date,tarif_type,counter_number,counter_status,counter_code,reading_remarque,counter_coefficient,consommation_level_1,consommation_level_2,consommation_level_3,consommation_level_4,old_index,new_index,months_number,counter_type\ntrain_Client_0,2014-03-24,11,1335667,0,203,8,1,82,0,0,0,14302,14384,4,ELEC\ntrain_Client_0,2013-03-29,11,1335667,0,203,6,1,1200,184,0,0,12294,13678,4,ELEC\ntrain_Client_0,2015-03-23,11,1335667,0,203,8,1,123,0,0,0,14624,14747,4,ELEC\ntrain_Client_0,2015-07-13,11,1335667,0,207,8,1,102,0,0,0,14747,14849,4,ELEC\ntrain_Client_0,2016-11-17,11,1335667,0,207,9,1,572,0,0,0,15066,15638,12,ELEC\ntrain_Client_0,2017-07-17,11,1335667,0,207,9,1,314,0,0,0,15638,15952,8,ELEC\ntrain_Client_0,2018-12-07,11,1335667,0,207,9,1,541,0,0,0,15952,16493,12,ELEC\ntrain_Client_0,2019-03-19,11,1335667,0,207,9,1,585,0,0,0,16493,17078,8,ELEC\ntrain_Client_0,2011-07-22,11,1335667,0,203,9,1,1200,186,0,0,7770,9156,4,ELEC\ntrain_Client_0,2011-11-22,11,1335667,0,203,6,1,1082,0,0,0,9156,10238,4,ELEC\n", "text": "Here is the output of the “dir” command, run on the folder with the files I’m working with:A few lines from “invoice.csv” file:The sample .csv files from your link appear to be in excel formatYes, my computer also detected this file as suitable for excel format, but since it’s too big for the Excel, I open it with Notepad. Otherwise, the Excel will keep 1 million rows and delete the rest (the file has 4 million rows).", "username": "Tudor_Ursu" }, { "code": "", "text": "Are you sure you are in the correct directory?I downloaded client_test dump and successfully loaded into my clusterThe original file is a zipped file.When you unzip/extract the file it creates a directory client_test.csv\nUnder this you will see the file client_test.csv", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Are you sure you are in the correct directory?Yes, absolutely.Can you show me the command line which you use?", "username": "Tudor_Ursu" }, { "code": "", "text": "You can try yourselves.Create a simple csv file using those 10-15 records from your file and try mongoimport again.It will workIssue could be your path or file itselfThis is what i usedmongoimport --uri mongodb+srv://m001-student:[email protected]/client --collection clienttest --type CSV --file client_test.csv --headerline…\n2021-06-05T19:32:36.025+0530 [########################] client.clienttest 2.15MB/2.15MB (100.0%)\n2021-06-05T19:32:36.026+0530 imported 58069 documents", "username": "Ramachandra_Tummala" }, { "code": "2021-06-06T21:51:05.980+0300 error connecting to host: could not connect to server: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: cluster0-shard-00-02.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-02.acapz.mongodb.net:27017[-190]) incomplete read of message header: EOF }, { Addr: cluster0-shard-00-00.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-00.acapz.mongodb.net:27017[-189]) incomplete read of message header: EOF }, { Addr: cluster0-shard-00-01.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-01.acapz.mongodb.net:27017[-188]) incomplete read of message header: EOF }, ] }mongoimport --uri mongodb+srv://tudor_15:[email protected]/Database1 --collection Invoice_Train --type CSV --file invoice_train.csv --headerline", "text": "mongoimport --uri mongodb+srv://m001-student:[email protected]/client --collection clienttest --type CSV --file client_test.csv --headerlineI ran this command with my identification data and got another error:2021-06-06T21:51:05.980+0300 error connecting to host: could not connect to server: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: cluster0-shard-00-02.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-02.acapz.mongodb.net:27017[-190]) incomplete read of message header: EOF }, { Addr: cluster0-shard-00-00.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-00.acapz.mongodb.net:27017[-189]) incomplete read of message header: EOF }, { Addr: cluster0-shard-00-01.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-01.acapz.mongodb.net:27017[-188]) incomplete read of message header: EOF }, ] }More precisely, this is the command line I ran:", "username": "Tudor_Ursu" }, { "code": "", "text": ":Were you able to connect to your cluster before?\nWhat is the status of your cluster in Atlas.Any error/alerts?\nHave you whitelisted your IP?\nCould be temporary N/W issue or SSL issue\nShare your id/pwd for us to check or create another user and share the creds", "username": "Ramachandra_Tummala" }, { "code": "2021-06-06T21:51:05.980+0300 error connecting to host: could not connect to server: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: cluster0-shard-00-02.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-02.acapz.mongodb.net:27017[-190]) incomplete read of message header: EOF }, { Addr: cluster0-shard-00-00.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-00.acapz.mongodb.net:27017[-189]) incomplete read of message header: EOF }, { Addr: cluster0-shard-00-01.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-01.acapz.mongodb.net:27017[-188]) incomplete read of message header: EOF }, ] }2021-06-07T12:16:27.432+0300 error connecting to host: could not connect to server: connection() : auth error: sasl conversation error: unable to authenticate using mechanism \"SCRAM-SHA-1\": (AtlasError) bad auth : Authentication failed.", "text": "Were you able to connect to your cluster before?Actually, I haven’t tried yet. I intended to upload the date to MongoDB Atlas, and then to connect from Google Colaboratory.mongoimport --uri mongodb+srv://m001-student:[email protected]/client --collection clienttest --type CSV --file client_test.csv --headerlineI ran this command with my identification data and got another error:2021-06-06T21:51:05.980+0300 error connecting to host: could not connect to server: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: cluster0-shard-00-02.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-02.acapz.mongodb.net:27017[-190]) incomplete read of message header: EOF }, { Addr: cluster0-shard-00-00.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-00.acapz.mongodb.net:27017[-189]) incomplete read of message header: EOF }, { Addr: cluster0-shard-00-01.acapz.mongodb.net:27017, Type: Unknown, State: Connected, Average RTT: 0, Last error: connection() : connection(cluster0-shard-00-01.acapz.mongodb.net:27017[-188]) incomplete read of message header: EOF }, ] }I realized recently that I ran this command when I was in another wifi network, and when my IP adress was different. The fact that I didn’t change the IP adress in my cluster could have generated this error.But, when I put my current IP adress and ran the command for uploading the data and the command for checking the cluster status, I got the error:2021-06-07T12:16:27.432+0300 error connecting to host: could not connect to server: connection() : auth error: sasl conversation error: unable to authenticate using mechanism \"SCRAM-SHA-1\": (AtlasError) bad auth : Authentication failed.And when I ran these commands with 0.0.0.0/0 IP adress, I got the same error.My id: tudor_15\nMy password: Centurion15", "username": "Tudor_Ursu" }, { "code": "", "text": "Bad auth means wrong combination of userid & pwd\nThe userid/pwd we are passing in the connect string are database user\nDid you create the user tudor_15 in the database?You should first check your connectivity to cluster then do the load\nHow did you get your connect string?\nFrom Atlas or framed it based on another sample string?Please login to your Atlas and check status of your cluster\nIs it up and running?Is the clusterid given correct?Please check again your steps", "username": "Ramachandra_Tummala" }, { "code": "", "text": "In the ‘Cluster’ tab, there is ‘Command Line Tools’ subtab. There are the prototypes for the main commands. You have to take the command, paste in command prompt, and replace the PASSWORD word with your own password, and in case you have to import data, you replace the other parameters in the string with yours.The user id ‘tudor_15’ was already put in the command line strings in the ‘Command Line Tools’.How to check the status of the cluster if the command line gives me an error?", "username": "Tudor_Ursu" }, { "code": "", "text": "I understood yoru problem.From shell you cannot check\nAsking you to check cluster status from Atlas\nWhen you login to your Atlas account you can see your cluster status.There should be an alerts tab/button\nAfter you setup your Cluster you would have created a user\nWhat is your cluster type Free Tier or paid and what steps you followed\nFor mongo University students there are steps on how to setup Sandbox cluster and load dataI repeat unless you have a database user you cannot connect to your cluster/db from shell nor you can perform other tasks like mongoimport etc", "username": "Ramachandra_Tummala" } ]
How to load a CSV file to MongoDB Atlas?
2021-05-11T19:21:03.470Z
How to load a CSV file to MongoDB Atlas?
23,430
null
[ "swift" ]
[ { "code": "\"user=\\(app.currentUser!.id)\"\"publicRealm=\\(someUID)\".environment(\\.realmConfiguration, app.currentUser!.configuration(partitionValue: \"publicRealm=\\(someUID)\"))", "text": "I have an app entirely in SwiftUI with Realm and Realm Sync.There are 2 ways that users interact with Realm partitions, one just for their user\"user=\\(app.currentUser!.id)\"and one for the public realms that they are accessing\"publicRealm=\\(someUID)\"As my app has become more complex, I have to continually switch back and forth, so I have these environment variables being passed in all over the place:.environment(\\.realmConfiguration, app.currentUser!.configuration(partitionValue: \"publicRealm=\\(someUID)\"))In one LazyVStack there may be a few hundred of these and it’s causing the view to take up to 20 seconds to load, even when nothing has changed and from what I assume, all of the data should be loading from the local realm at that point (for instance, viewing the list and then switching to another view and switching back).Is there a better best practice that doesn’t cause the app to come to a crawl when we get beyond the simple examples?Hoping this isn’t another issue of “Realm is great, just won’t scale well” and just a poor implementation on my part.", "username": "Kurt_Libby1" }, { "code": "", "text": "Hi @Kurt_Libby1 looking at your use case, each realm (user id and public realm) should be downloaded the first time you are syncing and then syncing updates should be effortless (unless there is a big amount of data syncing). As you correctly assume data is loading from the local realm and should be quick to load the data to the view.\nCan you share the code of the view you are trying to show?, maybe I can reproduce your use case and find a solution to your issue.", "username": "Diana_Maria_Perez_Af" }, { "code": "", "text": "Hey @Diana_Maria_Perez_Af.It would be way too complex.I was trying to figure out what I added that made it go so slow, but I couldn’t find anything.SO… what I did was roll back from Realm Swift 10.22.0 back to 10.21.0 and that fixed it. I did some experimenting and 10.21.1 is the first version that introduces the incredibly slow load time for these views.Not sure what I’m doing was either deprecated/introduced/broken in the updates to Realm, but just going back to 10.19.0 made all the view loading instantaneous again.Hope this helps anyone else that runs into this issue.", "username": "Kurt_Libby1" }, { "code": "dp/fix_swiftui_performance_issues", "text": "Hey @Kurt_Libby1 we’ve been checking SwiftUI performance and created a branch with changes that may help to get better UI experience using Realm+SwiftUI.\nIf you don’t mind, would you test this branch (dp/fix_swiftui_performance_issues) from our repository with your project and check if you still have slow loading views?", "username": "Diana_Maria_Perez_Af" }, { "code": "", "text": "Hey Diana,I checked and it’s still loading super slow. ", "username": "Kurt_Libby1" }, { "code": "", "text": "Thanks for the help, I’ll let you know if we make any progress on this.", "username": "Diana_Maria_Perez_Af" }, { "code": "", "text": "Are there any updates already on this? In our app we use SwiftUI and have noticed very slow loading times when accessing a view and passing .environment(…) and receiving with @ObservedResults(Example.self) var examplesThe loading time is proportional to how many elements there are in examples, so for just a few the loading time is almost instant, but at about 20 it’s already a few full seconds", "username": "David_Kessler" }, { "code": "", "text": "I’m continuing to use 10.21.0 and lower.The major issue is not being able to try out Flexible Sync, but I’m waiting until they fix the loading time before using any newer versions.I’m not great with github issues so I didn’t create one. I’ve searched in the issues for anything mentioning this and haven’t seen one. Maybe adding an issue will help get more eyes on it?", "username": "Kurt_Libby1" }, { "code": "", "text": "Rolling back to 10.21.0 completely fixed the issue for us as well. Thanks!! Just created an issue here", "username": "David_Kessler" }, { "code": "", "text": "@David_Kessler @Kurt_Libby1 we’ve been able to identify the issue and we are working and trying to find a fix for it.", "username": "Diana_Maria_Perez_Af" }, { "code": "", "text": "I have the same problem and can confirm that rolling back to 10.21.0 fixes it. In my case, slow performance occurs when presenting and closing a SwiftUI sheet view (for adding or editing a realm object).I just updated to 10.23.0 but the issue is still there.", "username": "horatiu_anghel" }, { "code": "", "text": "Hi @horatiu_anghel, I have a branch with a possible fix for this GitHub - realm/realm-swift at dp/fix_swiftui_performance_issues, if you can tests if this branch fix the issue, it will help us to make sure this works for your particular use case.", "username": "Diana_Maria_Perez_Af" }, { "code": "", "text": "@Kurt_Libby1 I know this is the second time I ask, but can you test this branch GitHub - realm/realm-swift at dp/fix_swiftui_performance_issues and let me know if you still have performance issues?", "username": "Diana_Maria_Perez_Af" }, { "code": "", "text": "Hi Diana,I tested the provided branch but, in my case, the issue is still present.Horatiu", "username": "horatiu_anghel" }, { "code": "", "text": "I just raised and issue on Github about this as we have also run into major performance issues when we upgraded from 10.29.0 to 10.24.2.Some fairly complex reports we generate usually take between 110 seconds and 300 seconds but with 10.24.2 they are taking hours to run, many hours. But they do run.One thing to note here is that when using 10.24.2 in local mode with the exact same realm we don’t see the performance problem. This only occurs with Synced realms.Happy to help debug this if necessary.", "username": "Duncan_Groenewald" }, { "code": "", "text": "I tried using 10.21.0 but I was unable to compile with Xcode 13.3 using SPM.Can you confirm you are able to build with Xcode 13.3 and the 10.21.0 SPM package ?Thanks", "username": "Duncan_Groenewald" }, { "code": "", "text": "Hi @Duncan_Groenewald I saw your post on GitHub and thanks for sharing the info which will really help us to diagnose this, I opened a new issue with a compilation of all SwiftUI performance related issues (SwiftUI Performance issues · Issue #7740 · realm/realm-swift · GitHub), which we are working on. I’ll post any update to this there.", "username": "Diana_Maria_Perez_Af" }, { "code": "", "text": "dp/fix_swiftui_performance_issuesI tried this branch and have the same performance issue", "username": "Duncan_Groenewald" }, { "code": "", "text": "@Diana_Maria_Perez_AfIt looks to me like this performance issue might’ve been introduced in 10.21.1. There are several issues where that seems to be the case. See Slow Performance in Writing after updated to 10.24.2 · Issue #7734 · realm/realm-swift · GitHubAnd as mentioned in this conversation:I have the same problem and can confirm that rolling back to 10.21.0 fixes it.", "username": "Adam_Gerthel" }, { "code": "", "text": "Can you confirm there is still working being done to address this issue ? I got a note from @Thomas_Goyne re 10.27.0 being a partial fix but it is not really addressing the query performance hit which appears to be related to the fact that running multiple queries on separate threads causes severe blocking of the separate threads. This is not happening on the older version (10.21.0 or earlier) and nor does it happen with a non-synced realm.\nI am attaching the CPU performance graphs showing the stark difference between them.\n\nScreen Shot 2022-06-06 at 4.20.06 pm1328×1228 156 KB\n\nScreen Shot 2022-06-06 at 4.15.54 pm1328×1228 132 KB\n", "username": "Duncan_Groenewald" } ]
SwiftUI Slow Loading Views
2022-02-14T13:22:16.716Z
SwiftUI Slow Loading Views
6,539
null
[ "node-js" ]
[ { "code": "\nconst options = {ordered: false}\n\ncsvtojson()\n.fromFile(\"sample.csv\")\n.then(csvData => {\n mongodb.connect(\n url,\n { useNewUrlParser: true, useUnifiedTopology: true },\n (err, client) => {\n if (err) throw err;\n client\n .db(\"databaseName\")\n .collection(\"collectionName\")\n .insertMany(csvData, options);\n }\n );\n});\n", "text": "Hello! I currently have an application running using Node.js and Express that takes data from ParseHub and feeds it into a MongoDB database at certain intervals. I also have a unique index set up with ordered: false so that no duplicate data from different runs will feed into the database. When the application runs I get error code 11000, which is what I want it to do as I want it to catch and not add duplicate entries. However, the server shuts down because of the errors. How can I either keep the server running or restart it after encountering errors? My code is below. I have tried forever and pm2 and didn’t have any luck, the server didn’t restart.", "username": "HD_Roofers" }, { "code": "insertMany()", "text": "Hi @HD_RoofersWhat you need is a try catch statement to catch the error that insertMany() throw, and handle that error accordingly. Please see the insertMany() example, where it shows the use of try catch in the Javascript example.In addition, you might find the page Error handling in Node.js useful.Best regards\nKevin", "username": "kevinadi" } ]
MongoDB Error Code 11000, how to prevent shutdown
2022-06-01T14:52:02.568Z
MongoDB Error Code 11000, how to prevent shutdown
2,446
null
[]
[ { "code": "$set: { \n 'doctors.$[a].patients.$[b].medication.$[c].scriptNotes.1': scriptNotesDetail,\n}\nconst indexValue = 3indexValue", "text": "Hi there,I am using the $set update operator with $[] positional operator to target a nested array item. In the following -… can the index value ‘1’ (for field scriptNotes.1) be a dynamic variable? For example, if const indexValue = 3, can I somehow reference the variable indexValue in place of the hardcoded ‘1’? I would like to use an index value not a field value.Hope that makes sense. Many thanks for your help.", "username": "Caruso_Siza" }, { "code": "", "text": "This is standard javascript. Search for javascript dynamic key name in google.", "username": "steevej" }, { "code": "$set: { \n [`doctors.$[a].patients.$[b].medication.$[c].scriptNotes.${dynamicIndexVariable}`]: scriptNotesDetail,\n}\n", "text": "Perfect! Didn’t even think to acknowledge I was simply working with a JS object. Below syntax worked.", "username": "Caruso_Siza" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Positional operators and dynamic index values
2022-06-05T09:37:03.125Z
Positional operators and dynamic index values
1,990
https://www.mongodb.com/…27ea91bf456.jpeg
[ "node-js", "realm-web" ]
[ { "code": "", "text": "I’m using realm-web on my react app to build a simple task reminder app. But, when I start/build the app it is giving me a warning which says, can’t resolve ‘crypto’ in node_modules/bson/dist react. I’m attaching a screen snip for more info. The app is running fine but I’m having a problem when I deploy the app to Netlify. I have to configure the deploy settings to ignore warnings for now. I just want to know if it is possible not to get this warning in the first place and how to do it? Thanks in advance.\n\ncryptto-error967×361 76 KB\n", "username": "Subhash_Malireddy" }, { "code": "realm-webfallback: { \"crypto\": require.resolve(\"crypto-browserify\") },./node_modules/react-scripts/config/webpack.config.jsreact-scripts v5.0.0...\nmodule.exports = function (webpackEnv) {\n ...\n return {\n ...\n resolve: {\n fallback: { \"crypto\": require.resolve(\"crypto-browserify\") }\n ...\n }\n }\n }\n}\n...\nnpm install crypto-browserify\nnpm install stream\n./fix_crypto_dependency.sh#!/bin/bash\nwebpack_config=\"./node_modules/react-scripts/config/webpack.config.js\"\nwebpack_config_backup=\"./node_modules/react-scripts/config/webpack.config.js.bckp\"\n\nline_number=304\nline_to_add='fallback: { \"crypto\": require.resolve(\"crypto-browserify\") }, // Patch realm-web crypto dependency'\n\necho \"Fixing realm-web crypto dependency...\"\n\n\nif grep -q \"$line_to_add\" $webpack_config\nthen\n echo \"Crypto fallback already added into the file $webpack_config\"\nelse\n echo \"Adding Crypto fallback into the file $webpack_config\"\n cp $webpack_config $webpack_config_backup\n # The '\\' character are for adding the indentation in the new line:\n sed -i \"$line_number i \\ \\ \\ \\ \\ \\ $line_to_add\" $webpack_config\nfi\necho \"Done! realm-web crypto dependency fixed. You can now run 'npm run build' without warnings :)\"\npackage.json...\n \"scripts\": {\n \"start\": \"react-scripts start\",\n \"prebuild\": \"chmod +x ./fix_crypto_dependency.sh; ./fix_crypto_dependency.sh\",\n \"build\": \"react-scripts build\",\n \"test\": \"react-scripts test\",\n \"eject\": \"react-scripts eject\"\n },\n...\n", "text": "Hi @Subhash_Malireddy, I had the same issue and Iwas able to fix it.I was using realm-web 1.6, I updated to 1.7 and the issue still appearing.I basically did what the error message suggest, it doesn’t specify the file that you have to modify but looking around I’ve figured out that you need to add this line:\nfallback: { \"crypto\": require.resolve(\"crypto-browserify\") },Into the file ./node_modules/react-scripts/config/webpack.config.js (line 304 to be precise in the react-scripts v5.0.0) so it looks something like this:You also need to install the required modules:After this it built without warnings in my computer. To make it work in Netlify I made a little bash script that edit the file and run it as a prebuild script.I hope it help you solve the issue.Cheers!", "username": "Emiliano_Tortorella" }, { "code": "", "text": "Thanks for this fix Emiliano_Tortorella", "username": "Chamu_Mutezva" }, { "code": "", "text": "Thanks very much! I’ve been procrastinating to fix this problem for a few weeks now and it’s now fixed thanks to your solution!", "username": "Maxime_Manseau" }, { "code": "resolve: {\n fallback: { \"crypto\": require.resolve(\"crypto-browserify\") }\n", "text": "please have not been able to resolve this issue. please I need a guide, the line in mine is different from the one you referenced", "username": "ayomide_wilfred" }, { "code": "", "text": "Hi @ayomide_wilfred, that’s exactly the issue, the line I’m referencing is what you need to put to make it work, is not existing by default.As you can see from previous comments it worked for other people so the options are:If you follow the steps I’ve posted it should fix your problem. If it doesn’t, you need to provide more information to get help, like what version are you using, what error you get, etc.Cheers!", "username": "Emiliano_Tortorella" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can't resolve 'crypto' in node_modules/bson/dist react
2022-01-22T08:09:54.419Z
Can&rsquo;t resolve &lsquo;crypto&rsquo; in node_modules/bson/dist react
23,094
null
[]
[ { "code": "{\"t\":{\"$date\":\"2022-06-05T02:10:24.563+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":20698, \"ctx\":\"-\",\"msg\":\"***** SERVER RESTARTED *****\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.563+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4915701, \"ctx\":\"-\",\"msg\":\"Initialized wire specification\",\"attr\":{\"spec\":{\"incomingExternalClient\":{\"minWireVersion\":0,\"maxWireVersion\":13},\"incomingInternalClient\":{\"minWireVersion\":0,\"maxWireVersion\":13},\"outgoing\":{\"minWireVersion\":0,\"maxWireVersion\":13},\"isInternalClient\":true}}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.565+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23285, \"ctx\":\"-\",\"msg\":\"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.567+00:00\"},\"s\":\"W\", \"c\":\"ASIO\", \"id\":22601, \"ctx\":\"main\",\"msg\":\"No TransportLayer configured during NetworkInterface startup\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.567+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4648601, \"ctx\":\"main\",\"msg\":\"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize.\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.580+00:00\"},\"s\":\"W\", \"c\":\"ASIO\", \"id\":22601, \"ctx\":\"main\",\"msg\":\"No TransportLayer configured during NetworkInterface startup\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.580+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":5123008, \"ctx\":\"main\",\"msg\":\"Successfully registered PrimaryOnlyService\",\"attr\":{\"service\":\"TenantMigrationDonorService\",\"ns\":\"config.tenantMigrationDonors\"}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.580+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":5123008, \"ctx\":\"main\",\"msg\":\"Successfully registered PrimaryOnlyService\",\"attr\":{\"service\":\"TenantMigrationRecipientService\",\"ns\":\"config.tenantMigrationRecipients\"}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.580+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":5945603, \"ctx\":\"main\",\"msg\":\"Multi threading initialized\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.581+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":4615611, \"ctx\":\"initandlisten\",\"msg\":\"MongoDB starting\",\"attr\":{\"pid\":2175,\"port\":27017,\"dbPath\":\"/var/lib/mongodb\",\"architecture\":\"64-bit\",\"host\":\"localhost\"}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.581+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23403, \"ctx\":\"initandlisten\",\"msg\":\"Build Info\",\"attr\":{\"buildInfo\":{\"version\":\"5.0.9\",\"gitVersion\":\"6f7dae919422dcd7f4892c10ff20cdc721ad00e6\",\"openSSLVersion\":\"OpenSSL 1.1.1l 24 Aug 2021\",\"modules\":[],\"allocator\":\"tcmalloc\",\"environment\":{\"distmod\":\"ubuntu2004\",\"distarch\":\"x86_64\",\"target_arch\":\"x86_64\"}}}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.581+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":51765, \"ctx\":\"initandlisten\",\"msg\":\"Operating System\",\"attr\":{\"os\":{\"name\":\"Ubuntu\",\"version\":\"22.04\"}}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.581+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":21951, \"ctx\":\"initandlisten\",\"msg\":\"Options set by command line\",\"attr\":{\"options\":{\"config\":\"/etc/mongod.conf\",\"net\":{\"bindIp\":\"72.140.8.31\",\"port\":27017},\"processManagement\":{\"timeZoneInfo\":\"/usr/share/zoneinfo\"},\"storage\":{\"dbPath\":\"/var/lib/mongodb\",\"journal\":{\"enabled\":true}},\"systemLog\":{\"destination\":\"file\",\"logAppend\":true,\"path\":\"/var/log/mongodb/mongod.log\"}}}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.582+00:00\"},\"s\":\"E\", \"c\":\"CONTROL\", \"id\":20568, \"ctx\":\"initandlisten\",\"msg\":\"Error setting up listener\",\"attr\":{\"error\":{\"code\":9001,\"codeName\":\"SocketException\",\"errmsg\":\"Cannot assign requested address\"}}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.582+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":4784900, \"ctx\":\"initandlisten\",\"msg\":\"Stepping down the ReplicationCoordinator for shutdown\",\"attr\":{\"waitTimeMillis\":15000}}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":4784901, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the MirrorMaestro\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"SHARDING\", \"id\":4784902, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the WaitForMajorityService\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4784905, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the global connection pool\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4784918, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the ReplicaSetMonitor\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"SHARDING\", \"id\":4784921, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the MigrationUtilExecutor\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"ASIO\", \"id\":22582, \"ctx\":\"MigrationUtil-TaskExecutor\",\"msg\":\"Killing all outstanding egress activity.\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":4784923, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the ServiceEntryPoint\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":4784925, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down free monitoring\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":4784927, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the HealthLog\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":4784928, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the TTL monitor\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":4784929, \"ctx\":\"initandlisten\",\"msg\":\"Acquiring the global lock for shutdown\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"-\", \"id\":4784931, \"ctx\":\"initandlisten\",\"msg\":\"Dropping the scope cache for shutdown\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"FTDC\", \"id\":4784926, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down full-time data capture\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":20565, \"ctx\":\"initandlisten\",\"msg\":\"Now exiting\"}\n{\"t\":{\"$date\":\"2022-06-05T02:10:24.583+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23138, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down\",\"attr\":{\"exitCode\":48}}\n", "text": "Logs:", "username": "aaron_huang" }, { "code": "\"s\":\"E\",", "text": "The line with\"s\":\"E\",is the error. It tells you thar the address you use is not valid.", "username": "steevej" }, { "code": "", "text": "It is valid its my public ip adress", "username": "aaron_huang" }, { "code": "# mongod.conf\n\n# for documentation of all options, see:\n# http://docs.mongodb.org/manual/reference/configuration-options/\n\n# Where and how to store data.\nstorage:\n dbPath: /var/lib/mongodb\n journal:\n enabled: true\n# engine:\n# wiredTiger:\n\n# where to write logging data.\nsystemLog:\n destination: file\n logAppend: true\n path: /var/log/mongodb/mongod.log\n\n# network interfaces\nnet:\n port: 27017\n bindIp: xxx.xxx.xxx.xxx\n\n\n# how the process runs\nprocessManagement:\n timeZoneInfo: /usr/share/zoneinfo\n\n#security:\n\n#operationProfiling:\n\n#replication:\n\n#sharding:\n\n", "text": "", "username": "aaron_huang" }, { "code": "", "text": "IP is censored I don’t want to share it", "username": "aaron_huang" }, { "code": "", "text": "It is valid its my public ip adressI did not write with enough details. I trust the error message produce my mongod. If mongod cannot listen to the IP address you specify then the address is not valid for the purpose of having mongod to listen to. It might be a valid address, it might be a public address.What I suspect, is that you are behind a NAT router or VPN and you try to setup a server that you can access elsewhere, hence the use of the public address. The public address of your NATr or VPN, which you obtain with something like whatismyip, is not a valid IP address for you computer to listen to.If that is the case, then what you want to do instead is have mongod to listen on a valid address of your private network and then have your NATr or VPN do a public address port redirection to your private address used by your computer.", "username": "steevej" }, { "code": "", "text": "I have Nginx thank you!", "username": "aaron_huang" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Code=exited, status=48 after changing ip bind to a public ip
2022-06-05T02:22:25.090Z
Code=exited, status=48 after changing ip bind to a public ip
2,622
null
[ "sharding", "transactions" ]
[ { "code": "[root@cs-c2-db-01 yum.repos.d]# yum install -y mongodb-org\nUpdating Subscription Management repositories.\nLast metadata expiration check: 0:00:40 ago on Sat 04 Jun 2022 12:44:18 PM BST.\nDependencies resolved.\n===============================================================================================================================================================================================================================================\n Package Architecture Version Repository Size\n===============================================================================================================================================================================================================================================\nInstalling:\n mongodb-org x86_64 4.4.14-1.el8 mongodb-org-4 11 k\nInstalling dependencies:\n compat-openssl11 x86_64 1:1.1.1k-4.el9_0 rhel-9-for-x86_64-appstream-rpms 1.5 M\n cyrus-sasl x86_64 2.1.27-20.el9 rhel-9-for-x86_64-baseos-rpms 77 k\n cyrus-sasl-gssapi x86_64 2.1.27-20.el9 rhel-9-for-x86_64-baseos-rpms 29 k\n cyrus-sasl-plain x86_64 2.1.27-20.el9 rhel-9-for-x86_64-baseos-rpms 27 k\n mongodb-database-tools x86_64 100.5.2-1 mongodb-org-4 47 M\n mongodb-org-database-tools-extra x86_64 4.4.14-1.el8 mongodb-org-4 23 k\n mongodb-org-mongos x86_64 4.4.14-1.el8 mongodb-org-4 17 M\n mongodb-org-server x86_64 4.4.14-1.el8 mongodb-org-4 22 M\n mongodb-org-shell x86_64 4.4.14-1.el8 mongodb-org-4 14 M\n mongodb-org-tools x86_64 4.4.14-1.el8 mongodb-org-4 11 k\n\nTransaction Summary\n===============================================================================================================================================================================================================================================\nInstall 11 Packages\n\nTotal download size: 102 M\nInstalled size: 322 M\nDownloading Packages:\n(1/11): mongodb-org-database-tools-extra-4.4.14-1.el8.x86_64.rpm 65 kB/s | 23 kB 00:00 \n(2/11): mongodb-org-4.4.14-1.el8.x86_64.rpm 23 kB/s | 11 kB 00:00 \n(3/11): mongodb-org-mongos-4.4.14-1.el8.x86_64.rpm 18 MB/s | 17 MB 00:00 \n(4/11): mongodb-org-server-4.4.14-1.el8.x86_64.rpm 12 MB/s | 22 MB 00:01 \n(5/11): mongodb-org-tools-4.4.14-1.el8.x86_64.rpm 41 kB/s | 11 kB 00:00 \n(6/11): mongodb-org-shell-4.4.14-1.el8.x86_64.rpm 9.7 MB/s | 14 MB 00:01 \n(7/11): mongodb-database-tools-100.5.2.x86_64.rpm 13 MB/s | 47 MB 00:03 \n(8/11): cyrus-sasl-gssapi-2.1.27-20.el9.x86_64.rpm 29 kB/s | 29 kB 00:00 \n(9/11): compat-openssl11-1.1.1k-4.el9_0.x86_64.rpm 1.1 MB/s | 1.5 MB 00:01 \n(10/11): cyrus-sasl-2.1.27-20.el9.x86_64.rpm 496 kB/s | 77 kB 00:00 \n(11/11): cyrus-sasl-plain-2.1.27-20.el9.x86_64.rpm 135 kB/s | 27 kB 00:00 \n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nTotal 26 MB/s | 102 MB 00:03 \nProblem opening package mongodb-database-tools-100.5.2.x86_64.rpm\nProblem opening package mongodb-org-4.4.14-1.el8.x86_64.rpm\nProblem opening package mongodb-org-database-tools-extra-4.4.14-1.el8.x86_64.rpm\nProblem opening package mongodb-org-mongos-4.4.14-1.el8.x86_64.rpm\nProblem opening package mongodb-org-server-4.4.14-1.el8.x86_64.rpm\nProblem opening package mongodb-org-shell-4.4.14-1.el8.x86_64.rpm\nProblem opening package mongodb-org-tools-4.4.14-1.el8.x86_64.rpm\nThe downloaded packages were saved in cache until the next successful transaction.\nYou can remove cached packages by executing 'yum clean packages'.\nError: GPG check FAILED\n", "text": "Hi,I’m trying to install Mongo DB 4.4 on a Red Hat Linux 9. Is this alreay supported?\nAfter having configured the yum repo as from the official documentation and run the installation command, I’m facing this error:Can anyone help me please?\nRegards,\nSamuel", "username": "Samuel_Rabini" }, { "code": "yum clean all", "text": "Try yum clean all (which will toss all your cache and a bunch of other bookkeeping) and then try again.", "username": "Jack_Woehr" }, { "code": "uname -acat /etc/os-release", "text": "Also show us the output of the following 2 commands, please:", "username": "Jack_Woehr" }, { "code": "yum clean allyum install mongodb-org[root@cs-c2-db-01 yum.repos.d]# yum install mongodb-org\nUpdating Subscription Management repositories.\nMongoDB Repository 973 B/s | 390 B 00:00 \nErrors during downloading metadata for repository 'mongodb-org-4.4':\n - Status code: 404 for https://repo.mongodb.org/yum/redhat/9/mongodb-org/4.4/x86_64/repodata/repomd.xml (IP: 13.226.171.50)\nError: Failed to download metadata for repo 'mongodb-org-4.4': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried\n[root@cs-c2-db-01 yum.repos.d]# uname -a\nLinux cs-c2-db-01 5.14.0-70.13.1.el9_0.x86_64 #1 SMP PREEMPT Thu Apr 14 12:42:38 EDT 2022 x86_64 x86_64 x86_64 GNU/Linux\n[root@cs-c2-db-01 yum.repos.d]# cat /etc/os-release\nNAME=\"Red Hat Enterprise Linux\"\nVERSION=\"9.0 (Plow)\"\nID=\"rhel\"\nID_LIKE=\"fedora\"\nVERSION_ID=\"9.0\"\nPLATFORM_ID=\"platform:el9\"\nPRETTY_NAME=\"Red Hat Enterprise Linux 9.0 (Plow)\"\nANSI_COLOR=\"0;31\"\nLOGO=\"fedora-logo-icon\"\nCPE_NAME=\"cpe:/o:redhat:enterprise_linux:9::baseos\"\nHOME_URL=\"https://www.redhat.com/\"\nDOCUMENTATION_URL=\"https://access.redhat.com/documentation/red_hat_enterprise_linux/9/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 9\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=9.0\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\nREDHAT_SUPPORT_PRODUCT_VERSION=\"9.0\"\n", "text": "Hi Jack,thanks for the replay.\nThe command yum clean all didn’t do the trick.\nHere the new feedback from the yum install mongodb-org and the output of the 2 suggested commands:", "username": "Samuel_Rabini" }, { "code": "404 Not Found\n\n Code: NoSuchKey\n Message: The specified key does not exist.\n Key: yum/redhat/9/mongodb-org/4.4/x86_64/repodata/repomd.xml\n RequestId: CA9M6WCGC930ZH8V\n HostId: ULD4o7XGDoRwppqUBecJB1JkhX4dZGoK6OSOwToxOD9QOAWfcA99xrR7BEr1TXoXTJ8kEzzPvNw=\n", "text": "hi @Samuel_Rabini … I just fed that repository url into my browser and I get:So the yum repo file you’re using is incorrect for some reason. Check the docs for the install and make sure it’s correct, or try the another version of MongoDB or something …", "username": "Jack_Woehr" }, { "code": "", "text": "I grab the repo configuration from the official MongoDB WebSite guide. So I think that’s too early for RHEL 9…", "username": "Samuel_Rabini" }, { "code": "", "text": "I think you’re right. If we go one directory up to MongoDB Repositories we see:Parent Directory\n5\n5Server\n6\n6Server\n7\n7Server\n8\n8Server\nmongodb-org-3.0.repo\nmongodb-org-3.1.repo\nmongodb-org-testing.repo", "username": "Jack_Woehr" } ]
Install MongoDB 4.4 on Red Hat Linux 9
2022-06-04T12:05:54.893Z
Install MongoDB 4.4 on Red Hat Linux 9
5,921
null
[ "queries" ]
[ { "code": "docs = collection.find({\"_id\": _id, \"editors\": \"someguy\"})", "text": "Hi all,Is it possible to determine which filter field did not match any documents? For example, for a simple get query like the one below, can I find out which of the two fields are not a match?docs = collection.find({\"_id\": _id, \"editors\": \"someguy\"})In this case, I would like to know if it’s the _id field or the editors field that did not match the document found. When I run docs.explain() I can see that it examines one document as the _id field matches, but it doesn’t specify that the editors field did not match.I can easily do this in two calls, but I would like to keep the number of calls to a minimum.", "username": "Ozy" }, { "code": "", "text": "The only way I can see is you find with _id only and test your other condition in your code.If _id matches you get a document to test the second condition otherwise you know _id does not match.If you want to know if some other _id has editors:someguy, that is another story.", "username": "steevej" }, { "code": "", "text": "Thanks. I think this is the best way.", "username": "Ozy" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can you determine what filter condition did not match?
2022-06-05T14:38:57.520Z
Can you determine what filter condition did not match?
1,723
null
[ "crud" ]
[ { "code": "\"voteTwentyHourCalcul\": {\n \"twentyHourCalcul\": [\n {\n \"day\": \"2022-05-18\",\n \"hour\": 3,\n \"value\": 1\n },\n {\n \"day\": \"2022-05-19\",\n \"hour\": 3,\n \"value\": 2\n },\n {\n \"day\": \"2022-05-28\",\n \"hour\": 10,\n \"value\": 2\n },\n {\n \"day\": \"2022-05-28\",\n \"hour\": 23,\n \"value\": 5\n },\n {\n \"day\": \"2022-05-29\",\n \"hour\": 4,\n \"value\": 2\n },\n {\n \"day\": \"2022-05-29\",\n \"hour\": 12,\n \"value\": 8\n },\n {\n \"day\": \"2022-05-29\",\n \"hour\": 22,\n \"value\": 5\n },\n {\n \"day\": \"2022-05-29\",\n \"hour\": 23,\n \"value\": 10\n },\n {\n \"day\": \"2022-05-30\",\n \"hour\": 9,\n \"value\": 5\n },\n {\n \"day\": \"2022-06-1\",\n \"hour\": 14,\n \"value\": 2\n },\n {\n \"day\": \"2022-06-2\",\n \"hour\": 5,\n \"value\": 8\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n }\n ]\n },\n \"voteTwentyHour\": 50,\nexports.global = (req, res) => {\n \n let yesterday = new Date();\n yesterday.setDate(yesterday.getDate() - 1);\n \n //Yesterday\n let mondayYesterdayUtc = (yesterday.getUTCMonth() + 1)\n mondayYesterdayUtc = parseInt(mondayYesterdayUtc);\n let dayYesterdayUtc = yesterday.getUTCDate()\n dayYesterdayUtc = parseInt(dayYesterdayUtc);\n if (mondayYesterdayUtc < 10) {\n mondayYesterdayUtc = '0' + mondayYesterdayUtc.toString()\n }\n \n if (dayYesterdayUtc < 10) {\n dayYesterdayUtc = '0' + dayYesterdayUtc.toString()\n }\n \n \n let dateYesterdayUtc = yesterday.getFullYear() + '-' + mondayYesterdayUtc + '-' + dayYesterdayUtc;\n \n \n Project.updateMany(\n {},\n {\n $set: {\n \"voteTwentyHourCalcul.twentyHourCalcul.$[elem].value\": 0\n },\n },\n {\n \n arrayFilters: [\n \n {\n $or: [\n { \"elem.day\": { $lt: dateYesterdayUtc } },\n { $and: [{ \"elem.day\": dateYesterdayUtc }, { \"elem.hour\": { $gte: yesterday.getUTCHours() } }] }\n ]\n }]\n },\n \n (err, response) => {\n if (err) return res.status(500).json({ msg: 'update failed', error: err });\n res.status(200).json({ msg: `document updated`, response: response });\n });\n \n};\n\"voteTwentyHourCalcul\": {\n \"twentyHourCalcul\": [\n {\n \"day\": \"2022-05-18\",\n \"hour\": 3,\n \"value\": 0\n },\n {\n \"day\": \"2022-05-19\",\n \"hour\": 3,\n \"value\": 0\n },\n {\n \"day\": \"2022-05-28\",\n \"hour\": 10,\n \"value\": 0\n },\n {\n \"day\": \"2022-05-28\",\n \"hour\": 23,\n \"value\": 0\n },\n {\n \"day\": \"2022-05-29\",\n \"hour\": 4,\n \"value\": 0\n },\n {\n \"day\": \"2022-05-29\",\n \"hour\": 12,\n \"value\": 0\n },\n {\n \"day\": \"2022-05-29\",\n \"hour\": 22,\n \"value\": 0\n },\n {\n \"day\": \"2022-05-29\",\n \"hour\": 23,\n \"value\": 0\n },\n {\n \"day\": \"2022-05-30\",\n \"hour\": 9,\n \"value\": 0\n },\n {\n \"day\": \"2022-06-1\",\n \"hour\": 14,\n \"value\": 0\n },\n {\n \"day\": \"2022-06-2\",\n \"hour\": 5,\n \"value\": 8\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n },\n {\n \"day\": 0,\n \"hour\": 0,\n \"value\": 0\n }\n ]\n },\n \"voteTwentyHour\": 50,\n", "text": "Hello, I start in mongo DbHere’s what I’m trying to achieveI have a voteTwentyHourCalcul object which contains the current date (Day) , a time (Hour) and a value (Value)and voteTwentyHour which is equal to the sum of all the valuesI am looking to transform the value to 0 for all those who have a date 24 less than the current dateI think I have already succeeded in this step with the following codeI get the following resultRequest made on time at 11:34 UTC 2022-06-2We obtain the value 0 for all the dates older than 24 hours compared to the current dateNow what I would like to do is recalculate “voteTwentyHour” which is equal to the sum of all the values\nAfter the query it should be equal to 8How should I go about this?Thanks to everyone who tries to help me", "username": "Mielpops" }, { "code": "coll{\n _id: ObjectId(\"629919a4c1da581a878e5db3\"),\n voteTwentyHourCalcul: {\n twentyHourCalcul: [\n { day: '2022-05-18', hour: 3, value: 0 },\n { day: '2022-05-19', hour: 3, value: 0 },\n { day: '2022-05-28', hour: 10, value: 0 },\n { day: '2022-05-28', hour: 23, value: 0 },\n { day: '2022-05-29', hour: 4, value: 0 },\n { day: '2022-05-29', hour: 12, value: 0 },\n { day: '2022-05-29', hour: 22, value: 0 },\n { day: '2022-05-29', hour: 23, value: 0 },\n { day: '2022-05-30', hour: 9, value: 0 },\n { day: '2022-06-1', hour: 14, value: 0 },\n { day: '2022-06-2', hour: 5, value: 8 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 }\n ]\n },\n voteTwentyHour: 50\n}\n[\n {\n '$unwind': {\n 'path': '$voteTwentyHourCalcul.twentyHourCalcul'\n }\n }, {\n '$group': {\n '_id': '$_id', \n 'voteTwentyHour': {\n '$sum': '$voteTwentyHourCalcul.twentyHourCalcul.value'\n }\n }\n }, {\n '$merge': {\n 'into': 'coll', \n 'on': '_id', \n 'whenMatched': 'merge', \n 'whenNotMatched': 'fail'\n }\n }\n]\n{\n _id: ObjectId(\"629919a4c1da581a878e5db3\"),\n voteTwentyHourCalcul: {\n twentyHourCalcul: [\n { day: '2022-05-18', hour: 3, value: 0 },\n { day: '2022-05-19', hour: 3, value: 0 },\n { day: '2022-05-28', hour: 10, value: 0 },\n { day: '2022-05-28', hour: 23, value: 0 },\n { day: '2022-05-29', hour: 4, value: 0 },\n { day: '2022-05-29', hour: 12, value: 0 },\n { day: '2022-05-29', hour: 22, value: 0 },\n { day: '2022-05-29', hour: 23, value: 0 },\n { day: '2022-05-30', hour: 9, value: 0 },\n { day: '2022-06-1', hour: 14, value: 0 },\n { day: '2022-06-2', hour: 5, value: 8 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 }\n ]\n },\n voteTwentyHour: 8\n}\nvoteTwentyHour_id$match", "text": "Hi @Mielpops,I inserted this in my collection called coll:Then I execute this aggregation pipeline on the entire collection:Result in my collection:As you can see, my voteTwentyHour has been updated correctly.I only have a single doc here but because I’m grouping on _id, this would update the entire collection correctly. But feel free to add a $match at the beginning of the pipeline to only update a subset of docs.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "{\n _id: ObjectId(\"629919a4c1da581a878e5db3\"),\n voteTwentyHourCalcul: {\n twentyHourCalcul: [\n { day: '2022-05-18', hour: 3, value: 0 },\n { day: '2022-05-19', hour: 3, value: 0 },\n { day: '2022-05-28', hour: 10, value: 0 },\n { day: '2022-05-28', hour: 23, value: 0 },\n { day: '2022-05-29', hour: 4, value: 0 },\n { day: '2022-05-29', hour: 12, value: 0 },\n { day: '2022-05-29', hour: 22, value: 0 },\n { day: '2022-05-29', hour: 23, value: 0 },\n { day: '2022-05-30', hour: 9, value: 0 },\n { day: '2022-06-1', hour: 14, value: 0 },\n { day: '2022-06-2', hour: 5, value: 8 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 },\n { day: 0, hour: 0, value: 0 }\n ]\n },\n voteTwentyHour: 50\n}\n", "text": "Hello, thank you for your answerHow to do if I have several collections, I would like to use your solution for all of my collections with update Many", "username": "Mielpops" }, { "code": "", "text": "You will have to do a FOR loop over your list of collections. You can’t $merge into several collections.\nThe good news is that these can run in parallel if they are running in a multi-threaded program (one thread per collection).", "username": "MaBeuLux88" }, { "code": "db.coll.insertMany\n([\n\n\n\n\nfor (var i = 0; i <= 2; i=i++){\n db.coll.aggregate([\n {\n '$unwind': {\n 'path': '$voteTwentyHourCalcul.twentyHourCalcul'\n }\n }, {\n '$group': {\n '_id': '$_id', \n 'voteTwentyHour': {\n '$sum': '$voteTwentyHourCalcul.twentyHourCalcul.value'\n }\n }\n }, {\n '$merge': {\n 'into': 'coll', \n 'on': '_id', \n 'whenMatched': 'merge', \n 'whenNotMatched': 'fail'\n }\n }\n]\n )\n}\n", "text": "Here’s what I tried, but it doesn’t seem to work\nI must be doing it wrong[image][image]{\n_id: “1”,\nvoteTwentyHourCalcul: {\ntwentyHourCalcul: [\n{ day: ‘2022-05-18’, hour: 3, value: 0 },\n{ day: ‘2022-05-19’, hour: 3, value: 0 },\n{ day: ‘2022-05-28’, hour: 10, value: 0 },\n{ day: ‘2022-05-28’, hour: 23, value: 0 },\n{ day: ‘2022-05-29’, hour: 4, value: 0 },\n{ day: ‘2022-05-29’, hour: 12, value: 0 },\n{ day: ‘2022-05-29’, hour: 22, value: 0 },\n{ day: ‘2022-05-29’, hour: 23, value: 0 },\n{ day: ‘2022-05-30’, hour: 9, value: 0 },\n{ day: ‘2022-06-1’, hour: 14, value: 0 },\n{ day: ‘2022-06-2’, hour: 5, value: 8 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 }\n]\n},\nvoteTwentyHour: 50\n},\n{\n_id: “2”,\nvoteTwentyHourCalcul: {\ntwentyHourCalcul: [\n{ day: ‘2022-05-18’, hour: 3, value: 0 },\n{ day: ‘2022-05-19’, hour: 3, value: 0 },\n{ day: ‘2022-05-28’, hour: 10, value: 0 },\n{ day: ‘2022-05-28’, hour: 23, value: 0 },\n{ day: ‘2022-05-29’, hour: 4, value: 0 },\n{ day: ‘2022-05-29’, hour: 12, value: 0 },\n{ day: ‘2022-05-29’, hour: 22, value: 0 },\n{ day: ‘2022-05-29’, hour: 23, value: 0 },\n{ day: ‘2022-05-30’, hour: 9, value: 0 },\n{ day: ‘2022-06-1’, hour: 14, value: 0 },\n{ day: ‘2022-06-2’, hour: 5, value: 8 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 }\n]\n},\nvoteTwentyHour: 50\n},\n{\n_id: “3”,\nvoteTwentyHourCalcul: {\ntwentyHourCalcul: [\n{ day: ‘2022-05-18’, hour: 3, value: 0 },\n{ day: ‘2022-05-19’, hour: 3, value: 0 },\n{ day: ‘2022-05-28’, hour: 10, value: 0 },\n{ day: ‘2022-05-28’, hour: 23, value: 0 },\n{ day: ‘2022-05-29’, hour: 4, value: 0 },\n{ day: ‘2022-05-29’, hour: 12, value: 0 },\n{ day: ‘2022-05-29’, hour: 22, value: 0 },\n{ day: ‘2022-05-29’, hour: 23, value: 0 },\n{ day: ‘2022-05-30’, hour: 9, value: 0 },\n{ day: ‘2022-06-1’, hour: 14, value: 0 },\n{ day: ‘2022-06-2’, hour: 5, value: 8 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 },\n{ day: 0, hour: 0, value: 0 }\n]\n},\nvoteTwentyHour: 50\n},\n]);", "username": "Mielpops" }, { "code": "", "text": "hum errori++ and not i=i++", "username": "Mielpops" }, { "code": "", "text": "You mentioned havingseveral collections,but your code access only the collection coll.You have the variable i in your for-loop but you do not use it in your code.I feel like you misunderstand the fundamental difference between the terms collection and documents.", "username": "steevej" } ]
Mongo DB updateMany
2022-06-02T13:56:55.235Z
Mongo DB updateMany
2,236
https://www.mongodb.com/…_2_1024x408.jpeg
[ "aggregation", "queries" ]
[ { "code": "Sale.aggregate([\n {\n $match: {\n \"createdAt\": {\n $gte: finalUnix\n }\n }\n },\n {\n $project: {\n contract: 1,\n }\n },\n {\n $group: {\n _id: '$contract',\n totalSales: { $sum: 1 }\n }\n },\n {\n $sort: {\n \"totalSales\": -1\n }\n },\n {\n $limit: 10\n }\n ])\n", "text": "Hey guys, I have my collection with the following indexes:\n\nCaptura de Tela 2022-06-05 às 04.23.551920×765 39.4 KB\nThe problem is that no of my queries are using indexes, since it says 0 usage.\nOne example of query is:This query should be using the createdAt index, shouldnt?\nWhat am I doing wrong?", "username": "foco_radiante" }, { "code": "", "text": "This query should be using the createdAt index, shouldnt?It should. Share the explain plan.", "username": "steevej" } ]
Indexes are not being used
2022-06-05T07:25:15.486Z
Indexes are not being used
1,501
null
[ "aggregation", "queries" ]
[ { "code": "Sale.aggregate([\n {\n $match: {\n seller,\n createdAt: {\n $gte: finalUnix\n }\n }\n }\n ])\n", "text": "I have my collection with less than 10.000 documents and Im running the following query:Seller and createdAt are string and they are already indexes of the collection. This simple query is taking more than 10 seconds to be executed. I have less than 10.000 documents and this query returns less than 1.000 documents.Why is it taking so long?Running a query only with $match (matching a string, index too) takes more than 10 seconds in MongoDB Compass too.", "username": "foco_radiante" }, { "code": "explain()", "text": "@foco_radiante, you can tell the details of the indexes you have created on the collection.You can run the explain() method to generate a Query Plan for the aggregation query. The plan will show details about the index(es) used for the query.", "username": "Prasad_Saya" }, { "code": "{\n \"explainVersion\":\"1\",\n \"queryPlanner\":{\n \"namespace\":\"test.testsales\",\n \"indexFilterSet\":false,\n \"parsedQuery\":{\n \"$and\":[\n {\n \"contract\":{\n \"$eq\":\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\"\n }\n },\n {\n \"createdAt\":{\n \"$gte\":\"1654100284\"\n }\n }\n ]\n },\n \"maxIndexedOrSolutionsReached\":false,\n \"maxIndexedAndSolutionsReached\":false,\n \"maxScansToExplodeReached\":false,\n \"winningPlan\":{\n \"stage\":\"FETCH\",\n \"filter\":{\n \"createdAt\":{\n \"$gte\":\"1654100284\"\n }\n },\n \"inputStage\":{\n \"stage\":\"IXSCAN\",\n \"keyPattern\":{\n \"contract\":1\n },\n \"indexName\":\"contract_1\",\n \"isMultiKey\":false,\n \"multiKeyPaths\":{\n \"contract\":[\n \n ]\n },\n \"isUnique\":false,\n \"isSparse\":false,\n \"isPartial\":false,\n \"indexVersion\":2,\n \"direction\":\"forward\",\n \"indexBounds\":{\n \"contract\":[\n \"[\\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\", \\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\"]\"\n ]\n }\n }\n },\n \"rejectedPlans\":[\n {\n \"stage\":\"FETCH\",\n \"filter\":{\n \"contract\":{\n \"$eq\":\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\"\n }\n },\n \"inputStage\":{\n \"stage\":\"IXSCAN\",\n \"keyPattern\":{\n \"createdAt\":1\n },\n \"indexName\":\"createdAt_1\",\n \"isMultiKey\":false,\n \"multiKeyPaths\":{\n \"createdAt\":[\n \n ]\n },\n \"isUnique\":false,\n \"isSparse\":false,\n \"isPartial\":false,\n \"indexVersion\":2,\n \"direction\":\"forward\",\n \"indexBounds\":{\n \"createdAt\":[\n \"[\\\"1654100284\\\", {})\"\n ]\n }\n }\n },\n {\n \"stage\":\"FETCH\",\n \"inputStage\":{\n \"stage\":\"IXSCAN\",\n \"keyPattern\":{\n \"contract\":1,\n \"createdAt\":1\n },\n \"indexName\":\"contract_1_createdAt_1\",\n \"isMultiKey\":false,\n \"multiKeyPaths\":{\n \"contract\":[\n \n ],\n \"createdAt\":[\n \n ]\n },\n \"isUnique\":false,\n \"isSparse\":false,\n \"isPartial\":false,\n \"indexVersion\":2,\n \"direction\":\"forward\",\n \"indexBounds\":{\n \"contract\":[\n \"[\\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\", \\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\"]\"\n ],\n \"createdAt\":[\n \"[\\\"1654100284\\\", {})\"\n ]\n }\n }\n }\n ]\n },\n \"executionStats\":{\n \"executionSuccess\":true,\n \"nReturned\":270,\n \"executionTimeMillis\":1,\n \"totalKeysExamined\":270,\n \"totalDocsExamined\":270,\n \"executionStages\":{\n \"stage\":\"FETCH\",\n \"filter\":{\n \"createdAt\":{\n \"$gte\":\"1654100284\"\n }\n },\n \"nReturned\":270,\n \"executionTimeMillisEstimate\":1,\n \"works\":271,\n \"advanced\":270,\n \"needTime\":0,\n \"needYield\":0,\n \"saveState\":0,\n \"restoreState\":0,\n \"isEOF\":1,\n \"docsExamined\":270,\n \"alreadyHasObj\":0,\n \"inputStage\":{\n \"stage\":\"IXSCAN\",\n \"nReturned\":270,\n \"executionTimeMillisEstimate\":0,\n \"works\":271,\n \"advanced\":270,\n \"needTime\":0,\n \"needYield\":0,\n \"saveState\":0,\n \"restoreState\":0,\n \"isEOF\":1,\n \"keyPattern\":{\n \"contract\":1\n },\n \"indexName\":\"contract_1\",\n \"isMultiKey\":false,\n \"multiKeyPaths\":{\n \"contract\":[\n \n ]\n },\n \"isUnique\":false,\n \"isSparse\":false,\n \"isPartial\":false,\n \"indexVersion\":2,\n \"direction\":\"forward\",\n \"indexBounds\":{\n \"contract\":[\n \"[\\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\", \\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\"]\"\n ]\n },\n \"keysExamined\":270,\n \"seeks\":1,\n \"dupsTested\":0,\n \"dupsDropped\":0\n }\n },\n \"allPlansExecution\":[\n {\n \"nReturned\":101,\n \"executionTimeMillisEstimate\":1,\n \"totalKeysExamined\":101,\n \"totalDocsExamined\":101,\n \"executionStages\":{\n \"stage\":\"FETCH\",\n \"filter\":{\n \"createdAt\":{\n \"$gte\":\"1654100284\"\n }\n },\n \"nReturned\":101,\n \"executionTimeMillisEstimate\":1,\n \"works\":101,\n \"advanced\":101,\n \"needTime\":0,\n \"needYield\":0,\n \"saveState\":0,\n \"restoreState\":0,\n \"isEOF\":0,\n \"docsExamined\":101,\n \"alreadyHasObj\":0,\n \"inputStage\":{\n \"stage\":\"IXSCAN\",\n \"nReturned\":101,\n \"executionTimeMillisEstimate\":0,\n \"works\":101,\n \"advanced\":101,\n \"needTime\":0,\n \"needYield\":0,\n \"saveState\":0,\n \"restoreState\":0,\n \"isEOF\":0,\n \"keyPattern\":{\n \"contract\":1\n },\n \"indexName\":\"contract_1\",\n \"isMultiKey\":false,\n \"multiKeyPaths\":{\n \"contract\":[\n \n ]\n },\n \"isUnique\":false,\n \"isSparse\":false,\n \"isPartial\":false,\n \"indexVersion\":2,\n \"direction\":\"forward\",\n \"indexBounds\":{\n \"contract\":[\n \"[\\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\", \\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\"]\"\n ]\n },\n \"keysExamined\":101,\n \"seeks\":1,\n \"dupsTested\":0,\n \"dupsDropped\":0\n }\n }\n },\n {\n \"nReturned\":1,\n \"executionTimeMillisEstimate\":0,\n \"totalKeysExamined\":101,\n \"totalDocsExamined\":101,\n \"executionStages\":{\n \"stage\":\"FETCH\",\n \"filter\":{\n \"contract\":{\n \"$eq\":\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\"\n }\n },\n \"nReturned\":1,\n \"executionTimeMillisEstimate\":0,\n \"works\":101,\n \"advanced\":1,\n \"needTime\":100,\n \"needYield\":0,\n \"saveState\":0,\n \"restoreState\":0,\n \"isEOF\":0,\n \"docsExamined\":101,\n \"alreadyHasObj\":0,\n \"inputStage\":{\n \"stage\":\"IXSCAN\",\n \"nReturned\":101,\n \"executionTimeMillisEstimate\":0,\n \"works\":101,\n \"advanced\":101,\n \"needTime\":0,\n \"needYield\":0,\n \"saveState\":0,\n \"restoreState\":0,\n \"isEOF\":0,\n \"keyPattern\":{\n \"createdAt\":1\n },\n \"indexName\":\"createdAt_1\",\n \"isMultiKey\":false,\n \"multiKeyPaths\":{\n \"createdAt\":[\n \n ]\n },\n \"isUnique\":false,\n \"isSparse\":false,\n \"isPartial\":false,\n \"indexVersion\":2,\n \"direction\":\"forward\",\n \"indexBounds\":{\n \"createdAt\":[\n \"[\\\"1654100284\\\", {})\"\n ]\n },\n \"keysExamined\":101,\n \"seeks\":1,\n \"dupsTested\":0,\n \"dupsDropped\":0\n }\n }\n },\n {\n \"nReturned\":101,\n \"executionTimeMillisEstimate\":0,\n \"totalKeysExamined\":101,\n \"totalDocsExamined\":101,\n \"executionStages\":{\n \"stage\":\"FETCH\",\n \"nReturned\":101,\n \"executionTimeMillisEstimate\":0,\n \"works\":101,\n \"advanced\":101,\n \"needTime\":0,\n \"needYield\":0,\n \"saveState\":0,\n \"restoreState\":0,\n \"isEOF\":0,\n \"docsExamined\":101,\n \"alreadyHasObj\":0,\n \"inputStage\":{\n \"stage\":\"IXSCAN\",\n \"nReturned\":101,\n \"executionTimeMillisEstimate\":0,\n \"works\":101,\n \"advanced\":101,\n \"needTime\":0,\n \"needYield\":0,\n \"saveState\":0,\n \"restoreState\":0,\n \"isEOF\":0,\n \"keyPattern\":{\n \"contract\":1,\n \"createdAt\":1\n },\n \"indexName\":\"contract_1_createdAt_1\",\n \"isMultiKey\":false,\n \"multiKeyPaths\":{\n \"contract\":[\n \n ],\n \"createdAt\":[\n \n ]\n },\n \"isUnique\":false,\n \"isSparse\":false,\n \"isPartial\":false,\n \"indexVersion\":2,\n \"direction\":\"forward\",\n \"indexBounds\":{\n \"contract\":[\n \"[\\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\", \\\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\\\"]\"\n ],\n \"createdAt\":[\n \"[\\\"1654100284\\\", {})\"\n ]\n },\n \"keysExamined\":101,\n \"seeks\":1,\n \"dupsTested\":0,\n \"dupsDropped\":0\n }\n }\n }\n ]\n },\n \"command\":{\n \"find\":\"testsales\",\n \"filter\":{\n \"contract\":\"0x248139afb8d3a2e16154fbe4fb528a3a214fd8e7\",\n \"createdAt\":{\n \"$gte\":\"1654100284\"\n }\n },\n \"projection\":{\n \n },\n \"$db\":\"test\"\n },\n \"serverParameters\":{\n \"internalQueryFacetBufferSizeBytes\":104857600,\n \"internalQueryFacetMaxOutputDocSizeBytes\":104857600,\n \"internalLookupStageIntermediateDocumentMaxSizeBytes\":16793600,\n \"internalDocumentSourceGroupMaxMemoryBytes\":104857600,\n \"internalQueryMaxBlockingSortMemoryUsageBytes\":33554432,\n \"internalQueryProhibitBlockingMergeOnMongoS\":0,\n \"internalQueryMaxAddToSetBytes\":104857600,\n \"internalDocumentSourceSetWindowFieldsMaxMemoryBytes\":104857600\n },\n \"ok\":1,\n \"$clusterTime\":{\n \"clusterTime\":{\n \"$timestamp\":\"7105048794433060909\"\n },\n \"signature\":{\n \"hash\":\"JMRPzVCd1dQHuvNBaZ4AI2Tm2Q0=\",\n \"keyId\":{\n \"low\":3,\n \"high\":1645032659,\n \"unsigned\":false\n }\n }\n },\n \"operationTime\":{\n \"$timestamp\":\"7105048794433060909\"\n }\n}\n", "text": "You see it is scanning 270 docs, but its taking like 10 seconds to return me the results.", "username": "foco_radiante" }, { "code": "\"winningPlan\"\"stage\":\"IXSCAN\"\"indexName\":\"contract_1\". \"executionStats\":{\n \"executionSuccess\":true,\n \"nReturned\":270,\n \"executionTimeMillis\":1,\n\"contract_1\"\"indexName\":\"contract_1_createdAt_1\",\n\"indexName\":\"contract_1\",\ncontractcreatedAt", "text": "You see it is scanning 270 docs, but its taking like 10 seconds to return me the results.The \"winningPlan\" has a \"stage\":\"IXSCAN\" and is applying the \"indexName\":\"contract_1\". And the execution time is shown here:It looks like you have two indexes (see below), and you dont need those two. You can delete the \"contract_1\" index and try your query again:Note that for the query to apply the index, you need to use the indexed fields in your query filter - in this case the contract and the createdAt.See the topic on Prefixes to understand why you don’t need to those two indexes:", "username": "Prasad_Saya" }, { "code": "", "text": "What if I have one query using one index and other query using other index?I mean, I have one query using the contract index and other query using the createdAt index.", "username": "foco_radiante" }, { "code": "", "text": "As mentionedSee the topic on Prefixes to understand why you don’t need to those two indexes:You do not havethe createdAt indexYou have a compoud index with createdAt as a secondary key.", "username": "steevej" } ]
Query taking long time to run
2022-06-03T07:07:58.872Z
Query taking long time to run
3,613
null
[ "kotlin" ]
[ { "code": "", "text": "I have searched most of the docs but I couldn’t find anything, do you know how I can close the MongoDB connection in KMongo?Thank you:)", "username": "saketh" }, { "code": "", "text": "Here is a previous discussion on closing a MongoDB connection with Kotlin does this help?", "username": "tapiocaPENGUIN" }, { "code": "get(\"/somePath\") {\n val client= KMongo.createClient(\"mongodb+srv://${System.getenv(\"USER_NAME\")}:${System.getenv(\"PASSWORD\")}@cluster0.2wr7r7n.mongodb.net/?retryWrites=true&w=majority\")\n val clientResponse = client.getDatabase(System.getenv(\"DATABASE_NAME\")).getCollection(System.getenv(\"COLLECTION_NAME\"))\n .find().toList().json \n call.respond(clientResponse).also {\n client.close() // closes the connection after responding\n }\n}\n", "text": "so, I got the solution; if anyone is looking for the same then this may help you:-\nby using .also{} method the problem was solved;", "username": "saketh" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How Can I Close MongoDB Connection In KMongo?
2022-06-03T17:39:42.878Z
How Can I Close MongoDB Connection In KMongo?
2,798
null
[ "cxx" ]
[ { "code": "", "text": "I’m getting this error whenever I insert more than 3 million documents in a bulk insert. I tried executing the bulk insert every 1 million, but I’m still getting the error. I’m running Ubuntu 20.04 and mongodb 5.0.8. Does anyone have any idea? It looks like the buffer is pushing the integer limit in C/C++. Is this the harddrive not keeping up with the lazy writes? Do I need to throttle my upserts somehow?mongo-c-driver-1.21.0/src/libmongoc/src/mongoc/mongoc-buffer.c:139 _mongoc_buffer_append(): precondition failed: (buffer->datalen + data_size) < INT_MAX", "username": "Matthew_Richards" }, { "code": "...\nbulk.append(upsert_op);\ncounter++; \nif (counter>1000000) {\n counter = 0; \n auto result = bulk.execute();\n bulk = voxel_collection.create_bulk_write();\n}\n...\n", "text": "I found a workaround. I was doing a bulk execute every so often, but I wasn’t resetting the bulk container object. I didn’t see a bulk.clear() or anything close, so I just reallocated it.", "username": "Matthew_Richards" } ]
Error when bulk upserting a large number of documents (over 3M)
2022-05-18T21:17:46.196Z
Error when bulk upserting a large number of documents (over 3M)
3,136
null
[ "aggregation", "queries", "node-js", "mongoose-odm" ]
[ { "code": "const routeSchema = new mongoose.Schema(\n {\n Location: {\n from: {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"Location\",\n required: true,\n },\n to: {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"Location\",\n required: true,\n },\n },\n busId: {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"Bus\",\n required: true,\n },\n date: {\n type: String,\n required: true,\n },\n);\nconst bookingSchema = new mongoose.Schema({\n userId: {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"User\",\n required: true,\n },\n routeId: {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"Route\",\n required: true,\n },\n passengers: [\n {\n name: { type: String, required: true, trim: true },\n gender: { type: String, required: true, trim: true },\n age: { type: Number, required: true, trim: true },\n }],\n phone: {\n type: Number,\n required: true,\n },\n email: {\n type: String,\n required: true,\n },\n bookingDate: {\n type: String,\n required: true,\n },\n fare: {\n type: Number,\n required: true,\n },\n seats: {\n type: [Number],\n required: true,\n },\n departureDetails: [\n {\n city: { type: String, required: true, trim: true },\n location: { type: String, required: true, trim: true },\n time: { type: String, required: true, trim: true },\n date: { type: String, required: true, trim: true },\n },\n ],\n arrivalDetails: [\n {\n city: { type: String, required: true, trim: true },\n location: { type: String, required: true, trim: true },\n time: { type: String, required: true, trim: true },\n date: { type: String, required: true, trim: true },\n },\n ],\n},{\n timestamps:true\n});\nrouter.get(\"/trip/single\", async (req, res) => {\n if (!req.query.from || !req.query.to || !req.query.date) {\n return res.send({\n error: \"Please enter the data to get the trip\",\n });\n }\n const { from, to, date } = req.query;\n\n const routes = await Route.find({\n \"Location.from\": from,\n \"Location.to\": to,\n \"date\": date.toString(),\n });\n\n const matchedBus = await routes.filter(() =>{\n return Route.busId === routes._id\n });\n\n const bookings = await Booking.find({\n routeId: { $in: matchedBus.map((matchedBus) => matchedBus._id) },\n });\n const busIdWithSeatsObj = {};\n \n for (let i = 0; i < matchedBus.length; i++) {\n let currentBusSeats = [];\n var busData = matchedBus.map(data => data)\n console.log(busData);\n **// Every thing is working fine till here. busData is returning the data as shown below**\n\n const busBookings = bookings.filter((booking) => {\n return (\n **//something is wrong here but I can not figure out what is wrong**\n booking.date === date.toString() &&\n booking.busId === matchedBus[i]._id\n );\n });\n console.log(busBookings);\n **//here busBookings is returning an empty array which is a big problem.**\n busBookings.forEach(() => {\n currentBusSeats = [...currentBusSeats, ...Booking.seats];\n });\n busIdWithSeatsObj[matchedBus[i]._id] = currentBusSeats;\n }\n\n res.status(200).send({ routes, matchedBus, busIdWithSeatsObj });\n});\n[\n {\n Location: {\n from: new ObjectId(\"6295f0986f9e32990d8b3488\"),\n to: new ObjectId(\"6295f0c06f9e32990d8b348b\")\n },\n _id: new ObjectId(\"6295f12c6f9e32990d8b348e\"),\n busId: new ObjectId(\"6295f0836f9e32990d8b3485\"),\n date: '2022-06-02',\n departureTime: 11,\n arrivalTime: 6.3,\n createdAt: 2022-05-31T10:42:52.785Z,\n updatedAt: 2022-05-31T10:42:52.785Z,\n __v: 0\n }\n]\n127.0.0.1:3000/trip/single?from=6295f0986f9e32990d8b3488&to=6295f0c06f9e32990d8b348b&date=2022-06-02", "text": "Can anyone help me out here?I am building a bus ticket booking app in node.js. After running the GET query I am getting only data about Buses not the seats but I want seats data as well.There are many different tables like bus-table, locations-table, route-table, booking-table and many other as well.In this bus table an admin can enter the data about Bus like bus number and total seats. After that in the location and route table only admin can enter the data and below is the route schema:Both from and to are referenced to same location table where two different collections are stored.After that an authorized user can book tickets by selecting the route, bus and seats by providing necessary details. Here is the booking table schema:But now the problem starts here in the GET request where I want to show every user of my app about the buses which will run on that route and seats available and reserved in every single bus running on that route. Booked seats are stored in the Booking table.I am getting the bus data right and correctly but seats data is not showing. It is returning an empty arrayHere is the GET request to get the bus and seats data:The console log of busData is:I am passing the query like this: 127.0.0.1:3000/trip/single?from=6295f0986f9e32990d8b3488&to=6295f0c06f9e32990d8b348b&date=2022-06-02Now I am comparing the date and busId in the for loop which is not working I guess. What should I do to make it work and get the matched data?", "username": "Mitul_Kheni" }, { "code": "\"6295f0986f9e32990d8b3488\" != new ObjectId(\"6295f0986f9e32990d8b3488\") \n", "text": "Hi @Mitul_Kheni,I think it’s because “String that represent an ObjectId” != ObjectId.So it doesn’t match in your find as you are looking for the String and not the actual ObjectId.Also about the data, I would recommend to use an ISODate date object instead of string that represents a date. This will make the query way easier for range queries on dates, etc.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "How to store time in mongoose or mongoDB?", "username": "Mitul_Kheni" }, { "code": "new Date(\"2019-02-15\")\n", "text": "This will be an ISODate in MongoDB.", "username": "MaBeuLux88" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to get only matched data in node.js with mongoDB? I am getting only partial data back
2022-06-03T05:19:04.443Z
How to get only matched data in node.js with mongoDB? I am getting only partial data back
2,152
null
[ "aggregation", "queries", "views" ]
[ { "code": "aggregate$merge", "text": "I have a query to our DB that was working until yesterday, using the aggregate endpoint for Data API. https://data.mongodb-api.com/app/data-{app_id}/endpoint/data/beta/action/aggregateIt has a $merge stage in this aggregation pipeline.This query was working just fine in both our prod and staging environments for several weeks until yesterday. It now fails with the following error:“Failed to aggregate documents: FunctionError: aggregation stage \"$merge\" is not supported in a user context. A pipeline with this stage must be run as a System User”We have changed nothing on our end. Could you please advise on how to either make our Data API token run as a system user or how to run an aggregation pipeline now using the Data API?Thank you.", "username": "Keeton_Hodgson" }, { "code": "exports = async function (request, response) {\n \n const mongo = context.services.get(<your cluster name>);\n collection = mongo.db(<db name>).collection(<collection>);\n documents = await collection.aggregate(pipeline).toArray();\n return {documents}\nrequest.header[\"api-key\"] = <your SECRET API KEY>", "text": "Hi Keeton - the preview version of the data API was modified slightly to improve our security by forcing application auth due to its ability to open up data in the cluster, however you can still create an endpoint to run the merge pipeline.You can do the folllowing:Since $merge is only available as a system user, you will have to set the associated endpoint as a ‘System’ endpoint, and then do the authentication either within the function (i.e.request.header[\"api-key\"] = <your SECRET API KEY>)Or you can create another wrapper system function that simply invokes the above function that runs as ‘application auth’ it will respect the need for proper API key authentication, but the invoked function will run as system.", "username": "Sumedha_Mehta1" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Data API now fails with A pipeline with this stage must be run as a System User
2022-06-03T21:04:12.550Z
Data API now fails with A pipeline with this stage must be run as a System User
3,028
null
[ "queries", "charts" ]
[ { "code": "ab", "text": "I’m looking to calculate the sum of the a multiplied by sum of the b in the Charts, is that correct?", "username": "Mohammed_Noor" }, { "code": "", "text": "Hi @Mohammed_Noor and welcome in the MongoDB Community !We can’t help you without more information. Please share sample docs and explain exactly the expected output you expect.Thanks,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "\ndb900×742 25.8 KB\n", "username": "Mohammed_Noor" }, { "code": "", "text": "this is a sample dataI need to sum the value of work_hours and multiplied with the sum of the value of required_number and display the value in a number chart", "username": "Mohammed_Noor" }, { "code": "[\n {\n \"$group\":{\n \"_id\":{ },\n \"total_work_hours\":{\n \"$sum\":\"$work_hours\"\n },\n \"total_required_number\":{\n \"$sum\":\"$required_number\"\n }\n }\n },\n {\n \"$set\":{\n \"product\":{\n \"$multiply\":[\n \"$total_work_hours\",\n \"$total_required_number\"\n ]\n }\n }\n }\n]\n", "text": "You can’t do this with calculated fields, but you can do so with an aggregation pipeline in the query bar. Try this:Tom", "username": "tomhollander" }, { "code": "", "text": "that worked like a charm, I can’t be more grateful ", "username": "Mohammed_Noor" } ]
Can some one help me with a Calculated field in mongodb charts data source
2022-06-01T15:59:48.865Z
Can some one help me with a Calculated field in mongodb charts data source
3,743
null
[ "aggregation", "java", "atlas-cluster" ]
[ { "code": " Bson lookup = Aggregates.lookup(\"comments\",lookupStage,\"comments\");\n", "text": "Hi,When I am trying to use lookup stage in my aggregation pipeline with Java for mongodb 4.4 (I am using free tier) , I am getting following error :com.mongodb.MongoCommandException: Command failed with error 8000 (AtlasError): ‘pipeline is not allowed in this atlas tier’ on server mflix-shard-00-02.7jsv0.mongodb.net:27017. The full response is { “ok” : 0, “errmsg” : “pipeline is not allowed in this atlas tier”, “code” : 8000, “codeName” : “AtlasError” }I am using the following java code :List<? extends Bson> lookupStage = Arrays.asList(new Document(\n“pipeline”,Arrays.asList(\nnew Document(“$match”,\nnew Document(“$expr”,\nnew Document(“$eq”,Arrays.asList(movieId,“movie_id”)))),\nnew Document(“$sort”,new Document(“date”,-1L)))));", "username": "Prakhar_Khanna" }, { "code": "package com.mongodb.quickstart;\n\nimport com.mongodb.client.MongoClient;\nimport com.mongodb.client.MongoClients;\nimport com.mongodb.client.MongoCollection;\nimport org.bson.Document;\nimport org.bson.conversions.Bson;\nimport org.bson.json.JsonWriterSettings;\n\nimport java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.List;\nimport java.util.function.Consumer;\n\nimport static com.mongodb.client.model.Aggregates.lookup;\nimport static com.mongodb.client.model.Aggregates.match;\nimport static com.mongodb.client.model.Filters.eq;\nimport static java.util.Arrays.asList;\n\npublic class AggregationFrameworkCommunity {\n\n public static void main(String[] args) {\n String connectionString = System.getProperty(\"mongodb.uri\");\n try (MongoClient mongoClient = MongoClients.create(connectionString)) {\n MongoCollection<Document> persons = mongoClient.getDatabase(\"test\").getCollection(\"persons\");\n System.out.println(\"Dropping collection 'test.persons'\");\n persons.drop();\n System.out.println(\"Insert 2 sample docs...\");\n insertSampleDocs(persons);\n aggregationWithLookup(persons);\n }\n }\n\n private static void insertSampleDocs(MongoCollection<Document> persons) {\n List<Integer> maxFriends = Collections.singletonList(2);\n Document maxime = new Document(\"_id\", 1).append(\"name\", \"Maxime\").append(\"friends\", maxFriends);\n Document prakhar = new Document(\"_id\", 2).append(\"name\", \"Prakhar\");\n persons.insertMany(asList(maxime, prakhar));\n }\n\n private static void aggregationWithLookup(MongoCollection<Document> persons) {\n Bson match = match(eq(\"name\", \"Maxime\"));\n Bson lookup = lookup(\"persons\", \"friends\", \"_id\", \"friends\");\n List<Document> results = persons.aggregate(asList(match, lookup)).into(new ArrayList<>());\n System.out.println(\"==> Print result of Lookup\");\n results.forEach(printDocuments());\n }\n\n private static Consumer<Document> printDocuments() {\n return doc -> System.out.println(doc.toJson(JsonWriterSettings.builder().indent(true).build()));\n }\n}\ntest.personsAtlas Free-shard-0 [primary] test> db.persons.find()\n[\n { _id: 1, name: 'Maxime', friends: [ 2 ] },\n { _id: 2, name: 'Prakhar' }\n]\nDropping collection 'test.persons'\nInsert 2 sample docs...\n==> Print result of Lookup\n{\n \"_id\": 1,\n \"name\": \"Maxime\",\n \"friends\": [\n {\n \"_id\": 2,\n \"name\": \"Prakhar\"\n }\n ]\n}\n", "text": "Hi @Prakhar_Khanna and welcome in the MongoDB Community !There is no such limitation on M0 Free Tier in Atlas.Here is a piece of code that proves it:Result in my test.persons collection:Java program output:Everything seem to work as intended for me.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "Hi @MaBeuLux88 , thanks for such an explanatory response.Maybe I should have framed my question in a better way. I am trying to use following method of Aggregation class in Java :lookup(String from, List<? extends Bson> pipeline, String as);I am unable to use the above mentioned method, as I am getting following error while using a pipeline:com.mongodb.MongoCommandException: Command failed with error 8000 (AtlasError): ‘pipeline is not allowed in this atlas tier’ on server mflix-shard-00-02.7jsv0.mongodb.net:27017. The full response is { “ok” : 0, “errmsg” : “pipeline is not allowed in this atlas tier”, “code” : 8000, “codeName” : “AtlasError” }I hope now I am able to explain the problem (in a better way ), that I am facing.Regards,\nPrakhar", "username": "Prakhar_Khanna" }, { "code": "", "text": "Just like the field name from and the field name as is implied by the method, I suspect that the field pipeline is also implied and should not be specified.Just pass Arrays.asList( new Document( $match… )) rather than Arrays.asList(new Document(pipeline,…).", "username": "steevej" }, { "code": "", "text": "Hi @steevej ,Thanks for the help! It worked after I removed keyword pipeline.Cheers,\nPrakhar", "username": "Prakhar_Khanna" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Unable to use $lookup stage in aggregation pipeline in mongodb4.4
2022-06-03T03:20:30.181Z
Unable to use $lookup stage in aggregation pipeline in mongodb4.4
2,739
null
[ "queries", "node-js" ]
[ { "code": "uidObjectIditemInfoconst item = {\n _id: ObjectID,\n name: 'Test',\n itemInfo : [\n {\n uid: \"ObjectID\",\n itemData: [{}, {}]\n },\n {\n uid: \"ObjectID\",\n itemData: [{}, {}]\n },\n {\n uid: \"ObjectID\",\n itemData: [{}, {}]\n },\n {\n uid: \"ObjectID\",\n itemData: [{}, {}]\n }\n ]\n}\nrootitemInfo const pageSkin = await db\n .find({ \"itemInfo.uid\": { $gt: new ObjectId(\"UidFromItemInfoArray\") } })\n .limit(pgLimit + 1)\n", "text": "I am trying to paginate a nested document in a mongoDB collection.This is a sample of what my data looks like. I added uid field whose value is a mongo ObjectId with the hope that it’s going to point me to the next object in the itemInfo array.However, Running this query below returns the next root objects in my database and not the next object in the itemInfo array.How can i achieve my desired solution ?", "username": "muhammed_ogunsanya" }, { "code": "", "text": "@Imad_Bouteraa hello mate! Could you help me out on this please?", "username": "muhammed_ogunsanya" }, { "code": "", "text": "The find methods return root documents that satisfy the query. If you only want specific elements of an array you must use $filter inside a projection.", "username": "steevej" } ]
Cursor pagination for nested fields in mongoDB using ObjectId
2022-06-01T15:42:45.184Z
Cursor pagination for nested fields in mongoDB using ObjectId
2,464
null
[ "swift" ]
[ { "code": "struct PersonsTableView: View {\n \n @Environment(\\.realm) var realm\n \n @ObservedResults(Person.self, sortDescriptor: SortDescriptor(keyPath: \\Person.name)) var persons\n \n @State private var selection: Person? = nil\n \n var body: some View {\n \n Table(persons, selection: $selection) {\n ...\n }\n .onAppear(perform: setPersonsQuerySubscription)\n }\n \n private func setPersonsQuerySubscription() {\n let subscriptions = realm.subscriptions\n \n subscriptions.write {\n let query = QuerySubscription(name: \"personsList\") { $0._partitionKey == \"userID\" }\n subscriptions.append(query)\n }\n }\n}\n", "text": "Hi,I am exploring the new flexible sync feature and I have an issue regarding updating data after adding a subscription query.I have this PersonTableViewWhen the view appears, the query subscription is created but persons is empty. I have to navigate away and back to this view for the persons to appear inside the table.I believe .onAppear returns before realm downloads new data but, shouldn’t @ObserevedResults update the table as soon as it has new results?Or it could be a macOS Table related issue?Thank you!", "username": "horatiu_anghel" }, { "code": "", "text": "If anyone is interested in this topic, just want to let you know that Realm’s latest update fixed this.Everything works as intended.", "username": "horatiu_anghel" } ]
Flexible sync not updating @ObservedResults in a macOS app
2022-06-02T07:36:56.739Z
Flexible sync not updating @ObservedResults in a macOS app
1,462
null
[]
[ { "code": "", "text": "Hello,I know MongoDb allows this, but I was still wondering whether this is more of an edge case than an everyday feature:Is it okay to have Documents in a Collection that are structurally not identical to each other?I am currently working on a Products model whereby it is possible that some properties are not available (based on some user settings) and thus some fields would be completely missing whereas in other Products documents, again based on user settings, those fields might exist.Is this a good / acceptable practice or is this a sign of poor data planning? Can this create pitfalls for optimisation/ errors down the road?Thank you!", "username": "RENOVATIO" }, { "code": "", "text": "The answer to all your questions is “Yes” I create my collections with validators. If you can’t create a reasonable validator for your collection, that’s probably a sign you’re in trouble.", "username": "Jack_Woehr" }, { "code": "", "text": "Hello,Thanks for your witty answer! Could you kindly elaborate more on how a validator could save me from trouble in this particular scenario?Thank you!p.s. what would you say scales better - default / empty values or completely missing fields?", "username": "RENOVATIO" }, { "code": "", "text": "Validators of course help by establishing the validity of data entered.\nBut they also make you explain your design to yourself. It’s a sanity check on the developer.In the MongoDB model of databasing (really, document management) I’d go for missing fields rather than emulating a RDBMS by having, effectively, NULLs in fields.", "username": "Jack_Woehr" }, { "code": "", "text": "Thank you very much for your kindness and for taking a moment to share your expertise.Much appreciated", "username": "RENOVATIO" }, { "code": "", "text": "You’re welcome … have fun … and when modelling solutions, there are generally no wrong answers, just better answers.", "username": "Jack_Woehr" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Is it okay to have non-symmetrical documents in a collection?
2022-06-03T13:34:46.679Z
Is it okay to have non-symmetrical documents in a collection?
1,734
null
[ "queries", "performance" ]
[ { "code": "{\n \"_id\" : ObjectId(\"60619f84dc46fad865680ab8\"),\n \"uuid\" : \"47aad0e5-3e41-4951-8881-3ae70ae85bcd\",\n \"id_tb\" : ObjectId(\"60619f84dc46fad865680225\"),\n \"code\" : \"\",\n \"ser\" : \"\",\n \"mat\" : \"\",\n \"state\" : \"\",\n \"ts_start\" : ISODate(\"2018-01-01T18:46:56.000Z\"),\n \"ts_end\" : ISODate(\"2018-01-01T18:47:20.000Z\"),\n \"ts_restart\" : ISODate(\"2018-01-01T18:46:56.000Z\"),\n \"values\" : [ \n {\n \"dtype\" : \"i\",\n \"v_int\" : NumberLong(20030606),\n \"key\" : \"x\"\n }, \n {\n \"dtype\" : \"i\",\n \"v_int\" : NumberLong(1208551142),\n \"key\" : \"y\"\n },\n ...\n ]\n}\nvaluesdb.runs.find({ \"ts_start\" : { \"$gte\" : ISODate(\"2018-01-01T23:00:00Z\") }, \"ts_end\" : { \"$lte\" : ISODate(\"2018-01-01T23:59:59Z\") } }).limit(80)\nlimit(80)limit(81)_id_sermatts_startts_endcodestatets_startts_end{ts_start:1, ts_end:1}explain(\"executionStats\"){\n \"op\" : \"query\",\n \"ns\" : \"db.runs\",\n \"command\" : {\n \"find\" : \"runs\",\n \"filter\" : {\n \"ts_start\" : {\n \"$gte\" : ISODate(\"2018-01-01T23:00:00.000Z\")\n },\n \"ts_end\" : {\n \"$lte\" : ISODate(\"2018-01-01T23:59:59.000Z\")\n }\n },\n \"limit\" : 81,\n \"comment\" : \"MyQuery\",\n \"$db\" : \"db\",\n \"lsid\" : {\n \"id\" : UUID(\"562115f5-059d-441a-96ad-e9da6702c962\")\n }\n },\n \"keysExamined\" : 248196,\n \"docsExamined\" : 248196,\n \"fromMultiPlanner\" : true,\n \"replanned\" : true,\n \"replanReason\" : \"cached plan was less efficient than expected: expected trial execution to take 81 works but it took at least 810 works\",\n \"cursorExhausted\" : true,\n \"numYield\" : 73223,\n \"nreturned\" : 80,\n \"queryHash\" : \"8927FA09\",\n \"planCacheKey\" : \"EFB63FF1\",\n \"locks\" : {\n \"ParallelBatchWriterMode\" : {\n \"acquireCount\" : {\n \"r\" : NumberLong(1)\n }\n },\n \"ReplicationStateTransition\" : {\n \"acquireCount\" : {\n \"w\" : NumberLong(73225)\n }\n },\n \"Global\" : {\n \"acquireCount\" : {\n \"r\" : NumberLong(73225)\n }\n },\n \"Database\" : {\n \"acquireCount\" : {\n \"r\" : NumberLong(73224)\n }\n },\n \"Collection\" : {\n \"acquireCount\" : {\n \"r\" : NumberLong(73224)\n }\n },\n \"Mutex\" : {\n \"acquireCount\" : {\n \"r\" : NumberLong(1)\n }\n }\n },\n \"flowControl\" : {},\n \"storage\" : {\n \"data\" : {\n \"bytesRead\" : NumberLong(37794055022),\n \"timeReadingMicros\" : NumberLong(1394728348)\n }\n },\n \"responseLength\" : 4283172,\n \"protocol\" : \"op_msg\",\n \"millis\" : 1418283,\n \"planSummary\" : \"IXSCAN { ts_end: 1 }\",\n \"execStats\" : {\n \"stage\" : \"LIMIT\",\n \"nReturned\" : 80,\n \"executionTimeMillisEstimate\" : 500618,\n \"works\" : 248197,\n \"advanced\" : 80,\n \"needTime\" : 248116,\n \"needYield\" : 0,\n \"saveState\" : 73217,\n \"restoreState\" : 73217,\n \"isEOF\" : 1,\n \"limitAmount\" : 81,\n \"inputStage\" : {\n \"stage\" : \"FETCH\",\n \"filter\" : {\n \"ts_start\" : {\n \"$gte\" : ISODate(\"2018-01-01T23:00:00.000Z\")\n }\n },\n \"nReturned\" : 80,\n \"executionTimeMillisEstimate\" : 500574,\n \"works\" : 248197,\n \"advanced\" : 80,\n \"needTime\" : 248116,\n \"needYield\" : 0,\n \"saveState\" : 73217,\n \"restoreState\" : 73217,\n \"isEOF\" : 1,\n \"docsExamined\" : 248196,\n \"alreadyHasObj\" : 0,\n \"inputStage\" : {\n \"stage\" : \"IXSCAN\",\n \"nReturned\" : 248196,\n \"executionTimeMillisEstimate\" : 707,\n \"works\" : 248197,\n \"advanced\" : 248196,\n \"needTime\" : 0,\n \"needYield\" : 0,\n \"saveState\" : 73217,\n \"restoreState\" : 73217,\n \"isEOF\" : 1,\n \"keyPattern\" : {\n \"ts_end\" : 1\n },\n \"indexName\" : \"ts_end\",\n \"isMultiKey\" : false,\n \"multiKeyPaths\" : {\n \"ts_end\" : []\n },\n \"isUnique\" : false,\n \"isSparse\" : false,\n \"isPartial\" : false,\n \"indexVersion\" : 2,\n \"direction\" : \"forward\",\n \"indexBounds\" : {\n \"ts_end\" : [ \n \"(true, new Date(1514851199000)]\"\n ]\n },\n \"keysExamined\" : 248196,\n \"seeks\" : 1,\n \"dupsTested\" : 0,\n \"dupsDropped\" : 0\n }\n }\n },\n \"ts\" : ISODate(\"2021-05-04T19:13:02.342Z\"),\n \"client\" : \"127.0.0.1\",\n \"allUsers\" : [],\n \"user\" : \"\"\n}\n{ts_end: 1}FETCHLIMITmongod.exe", "text": "Hi everyone,I have 36.5 million documents in my collection. They look like this:Every document has an array values with 730 subdocuments. (The average size of a run document is 53.2 KB)\nNow I want to query those documents like this:This query finishes in less than a second. But if I change limit(80) to limit(81) or anything greater than 80 it will take really long.\nI already have indices on the fields I will filter on: _id_ , ser , mat , ts_start , ts_end , code, state and a compound index for ts_start and ts_end : {ts_start:1, ts_end:1}.\nThe total size of these indices is 2.5 GB.I am using MongoDB 4.2 on a Windows Server 2012 machine with 32 GB of RAM.Here is the output of explain(\"executionStats\") on a query that takes long:What I can see is that it uses the index {ts_end: 1} and that is quite fast, but both FETCH and LIMIT stages are extremely slow.\nWhile running a query like this I could see in Windows resource monitor that mongod.exe seems to start reading from disk.Does anyone have an idea on how to make this faster? Thank you in advance.Edit: @Stennie_X Thanks for adding tags, I could not find that option when creating this topic.", "username": "cb420" }, { "code": "", "text": "I believe you could benefit from a compound index on both ts_start and ts_end, e.g.{ “ts_start”: 1, “ts_end”: 1}You may want to use -1 instead if you normally order from newest to oldest.", "username": "Roman_Gaufman" } ]
Query is very slow
2021-05-05T08:25:52.089Z
Query is very slow
6,226
null
[ "security" ]
[ { "code": "users", "text": "Hi there! I am creating a relatively basic web application that contains sensitive user information in a users table along with other information that would be used in various views on the frontend. I came across this article that discusses the security advantages of creating ROV’s for data in the database. In the article, the author specifically refers to a case where a malicious user somehow gains access to the backend server of the application, and could then use custom queries to access data on the DB. What I don’t understand is that if a user has access to the server via SSH, they could just theoretically query the database for any collection (including non-ROV collections) because the IP address is accepted on the MongoDB side.I also looked to see if there was some DB-side functionality where tables could be “write / modify only” but everything I found suggested abstracting those permissions to server-side code, which runs into the same problem as above.If this is the case, I don’t really see the advantage of creating ROV’s as it doesn’t seem to solve the use-case it is designed to solve.Clearly I am missing something here, and would love some clarification from someone more experienced ", "username": "Jake_Myers" }, { "code": "", "text": "Hi @Jake_Myers ,If the hacker only has access to the application server and the application uses a user that only allowed to query limited views they are only able to access those objectsThe full collection is inaccessible for this user , therefore a hacker will somehow needs to get access to an admin user to do so.Ty", "username": "Pavel_Duchovny" }, { "code": "", "text": "Wouldn’t the application have to be able to update the actual documents on say account creation? At some point the sensitive collections need to be updated with information and there needs to be a server that uses a user with those permissions.", "username": "Jake_Myers" } ]
Understanding the reasoning behind ROV (Read-only Views) for collections
2022-06-01T18:22:02.903Z
Understanding the reasoning behind ROV (Read-only Views) for collections
2,678
null
[ "aggregation", "node-js", "views" ]
[ { "code": "", "text": "I’m working on a project and some doubts about the views arose:\n1- If I have a view and it undergoes several searches and updates in the pipeline, would this cause a competition problem?\nif yes, would the best solution be to divide these views by pipeline behaviors?\n2- Another point is how can I update my views efficiently?\nIs Scheduled Triggers the best option for this?", "username": "Lucas_Amorim_Lima" }, { "code": "", "text": "Hi @Lucas_Amorim_Lima ,Does by views you refer to what we call Materialised views which pipeline ends with $merge to form a persistent collection or the regular views which just store a query and rerun it every time the view is queried?MongoDB on-demand materialized viewThanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Hi @Pavel_Duchovny\nI’m talking about regular views", "username": "Lucas_Amorim_Lima" }, { "code": "", "text": "Hi @Lucas_Amorim_Lima ,A. Regular view is just a metadata of a stored query.Updating it is basically just updating the object holding the stages definition. It doesn’t require any compute resources therefore no expected competition…Updating a view is done by the following instructions:Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "i undestood, thanks @Pavel_Duchovny, Do you have any articles to recommend?\nI’m average in mongoDb and I would like to increase my knowledge, any material that you find interesting to know will be very useful.", "username": "Lucas_Amorim_Lima" } ]
View performance and behavior
2022-05-31T12:08:14.217Z
View performance and behavior
3,054
null
[ "dot-net", "flexible-sync", "unity" ]
[ { "code": "ending session with error: permissions have changed since last use (ProtocolErrorCode=228)\nLogs:\n[\n\"Session was active for: 0s\"\n]\nFunction Call Location:\nIE\nQuery:\n{\n\"ChatMessage\": \"(chatid == \\\"62981d9d267619e259b47ca1\\\" and chattype == 1) OR (chattype == 0)\",\n\"GameState\": \"(userid == \\\"6290f50b76a4386fc870f7f3\\\")\"\n}\nSession Metrics:\n{}\nRemote IP Address:\n(Removed)\nSDK:\nRealm Unity v10.11.1\nPlatform Version:\nUnix 4.9.227.22202282\n", "text": "Hi,since ~8 hours we are getting a lot of sync error messages, although we didn’t change anything since days.\nThe error message that appears is: “ending session with error: permissions have changed since last use (ProtocolErrorCode=228)”.Here’s an excerpt:Since as I stated we didn’t change anything at all, there must be an error on your servers. What could be the reason? Please check it as soon as possible.Thanks in advance!", "username": "MetalMonkey" }, { "code": "", "text": "Hi, this is a known effect of one of the deploys that we just made. We found an issue in the hash function that we use to compare the permissions of a user when they reconnect to sync. In some circumstances, fixing this issue will result in SDKs receiving this error, but the SDK should handle this error appropriately and reconnect to sync immediately. Please let us know if you are seeing any issues associated with this error, or just looking into why this showed up in the first place?Apologies for the change, but this was intentional on our part, should not happen again, and should have resulted in no issues other than a log in the UI and a client-reset on the device.Thanks,\nTyler", "username": "Tyler_Kaye" } ]
Sync error messages: "permissions have changed since last use (ProtocolErrorCode=228)"
2022-06-03T05:55:52.468Z
Sync error messages: &ldquo;permissions have changed since last use (ProtocolErrorCode=228)&rdquo;
2,818
null
[]
[ { "code": "", "text": "Hello everyone.\nI meet a problem with my app, if someone could help me.All my partions were syncing correctlly during a period of time. After adding some data in one syncing collection from atlas, that collection stop syncing. Even data that were visible with realm studio, nothing is visible now.When I open my device with android studio, realm studio see the collections but there is no data in that particular collection.Is there a recommendation about adding data in syncing collection from atlas and realm?\nPlease, can someone help me find a solution?\nThank you all.", "username": "Ody" }, { "code": "https://realm.mongodb.com/groups/5fd280184de1015b9826b7b2/apps/62992dc218b07351ca67473d/dashboard", "text": "Hi, can you send the URL of your application, it will look something like this https://realm.mongodb.com/groups/5fd280184de1015b9826b7b2/apps/62992dc218b07351ca67473d/dashboard and then I can see what might be going on. Additionally if you can provide any of the following that would be helpful:Thanks,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
My partion stop syncing after adding data
2022-06-03T06:03:31.179Z
My partion stop syncing after adding data
1,399
https://www.mongodb.com/…8_2_1024x739.png
[]
[ { "code": "", "text": "I just trying to sync our inventory collection to Realm Sync. The sync writing time is not what we expected. Is it normal? We use M10 and M30 for Atlas cluster but the writing time remain the same.\nimage1293×934 97.7 KB\n", "username": "shcode" }, { "code": "", "text": "after changing the provider from GCP to AWS, the write sync around 60ms.", "username": "shcode" }, { "code": "", "text": "Hi, sync times can vary a little bit depending on the workload. Important things include:Sounds like you were able to get much better performance by moving to AWS which is good to hear. We are planning on expanding Atlas Device Sync to other cloud providers soon, so stay tuned for that!", "username": "Tyler_Kaye" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Realm Sync - Is it normal that Sync -> Write needs 2s to complete?
2022-06-02T07:51:40.527Z
Realm Sync - Is it normal that Sync -&gt; Write needs 2s to complete?
1,505
null
[ "aggregation", "crud" ]
[ { "code": "", "text": "Hello,It seems the doc does not refer to that particular return of the UpdateMany with aggregation pipeline\nhere => https://www.mongodb.com/docs/manual/reference/method/db.collection.updateMany\nand here => https://www.mongodb.com/docs/manual/tutorial/update-documents-with-aggregation-pipelineDo you think it’s possible to say to MongoDB to not return all documents from the aggregation pipeline? I have some stupid amount of documents returned and I’m a bit concerned about the next few months of my application. I’m afraid of blowing up my atlas server.", "username": "huneau_romain" }, { "code": "updateManyacknowledgedtruefalsematchedCountmodifiedCountupsertedId_id", "text": "Hello @huneau_romain, welcome to the MongoDB community forum!As per the documentation, I see that: The updateMany method returns a document that contains:You may want to include some more information about the update method you are trying (for example, the code and also a sample document of the collection , after any redaction), to figure what the issue is.", "username": "Prasad_Saya" }, { "code": "{\n \"command\": {\n \"q\": {\n \"companyId\": \"cjwrneklvn\"\n },\n \"u\": [\n {\n \"$set\": {\n \"daysLate\": {\n \"$cond\": {\n \"if\": {\n \"$ifNull\": [\n \"$closingDate\",\n false\n ]\n },\n \"then\": {\n \"$round\": [\n {\n \"$divide\": [\n {\n \"$subtract\": [\n \"$closingDate\",\n \"$dueDate\"\n ]\n },\n {\n \"$multiply\": [\n 1000,\n 3600,\n 24\n ]\n }\n ]\n }\n ]\n },\n \"else\": {\n \"$round\": [\n {\n \"$divide\": [\n {\n \"$subtract\": [\n {\n \"$date\": \"2022-06-03T02:34:03.306Z\"\n },\n \"$dueDate\"\n ]\n },\n {\n \"$multiply\": [\n 1000,\n 3600,\n 24\n ]\n }\n ]\n }\n ]\n }\n }\n }\n }\n }\n ],\n \"multi\": true,\n \"upsert\": false\n },\n \"planSummary\": [\n {\n \"IXSCAN\": {\n \"companyId\": 1,\n \"id\": 1\n }\n }\n ],\n \"keysExamined\": 104968,\n \"docsExamined\": 104968,\n \"nMatched\": 104968,\n \"nModified\": 15569,\n \"keysInserted\": 0,\n \"keysDeleted\": 0,\n \"numYields\": 827,\n \"queryHash\": \"7AE1622F\",\n \"planCacheKey\": \"FB3DF234\",\n \"locks\": {\n \"ParallelBatchWriterMode\": {\n \"acquireCount\": {\n \"r\": 828\n }\n },\n \"ReplicationStateTransition\": {\n \"acquireCount\": {\n \"w\": 828\n }\n },\n \"Global\": {\n \"acquireCount\": {\n \"w\": 828\n }\n },\n \"Database\": {\n \"acquireCount\": {\n \"w\": 828\n }\n },\n \"Collection\": {\n \"acquireCount\": {\n \"w\": 828\n }\n },\n \"Mutex\": {\n \"acquireCount\": {\n \"r\": 15570\n }\n }\n },\n \"flowControl\": {\n \"acquireCount\": 828,\n \"timeAcquiringMicros\": 850\n },\n \"storage\": {\n \"data\": {\n \"bytesRead\": 33901684,\n \"timeReadingMicros\": 66216\n }\n },\n \"millis\": 3213,\n \"v\": \"4.2.20\"\n}\n", "text": "all requetes from mongodb atlas\nimage1345×1066 134 KB\nquerry", "username": "huneau_romain" }, { "code": "planSummary \"nMatched\": 104968,\n \"nModified\": 15569,\n", "text": "@huneau_romain, the screenshot you are looking at is the Atlas UIs Profiler TAB. And, the update query shows the query filter and the update. The planSummary is the query plan details.This detail tell how many documents are matched and are updated/modified.Additional details about the Profiler:", "username": "Prasad_Saya" }, { "code": "1049681049680", "text": "Yes, I agree with you but the profiler told us that 104968 key was examined and 104968 docs were returned.\nI’m concerned about docs returned that have to be 0.", "username": "huneau_romain" }, { "code": "nreturned104968", "text": "I’m just figured it out that nreturned field is not present inside the MongoDB explain thing.This morning I launched the query ant I got the 104968 result. Now I relaunched it and I got nothing", "username": "huneau_romain" }, { "code": "", "text": "@huneau_romain here is the link to Query Plan output examples. You can see various fields in the output and their meaning. Hope it clarifies some things.", "username": "Prasad_Saya" } ]
UpdateMany with aggregation pipeline returns all documents updated
2022-06-03T08:36:12.578Z
UpdateMany with aggregation pipeline returns all documents updated
3,356
null
[ "sharding" ]
[ { "code": "", "text": "Hi guys, the question is simple: can I use reference (aka foreign key for SQL-like db) in my database with sharding?I found this article What is MongoDB Sharding and the Best Practices? that says:Since there’s no support in databases to point to entities outside the current database (well, even a different database on the same machine is not supported, so forget about a database on a different machine), the concept of foreign keys goes for a toss as well. Suddenly, the database becomes “dumb,” and data integrity is your problem.But I haven’t found other sources regarding this topic, not even on the official documentation.Thank u ", "username": "Elia_Gaviraghi" }, { "code": "", "text": "Hi @Elia_Gaviraghi and welcome in the MongoDB Community !From the 6.0 document (upcoming version of MongoDB).Starting in MongoDB 5.1, $lookup works across sharded collections.So as long as your related data are in the same database (which makes sense), you can use $lookup to join them together in a query and manipulate the data as you please in your aggregation pipeline.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "Excellent news! I am currently using 5.0.8 community edition, I’ll wait for the official release to upgrade.\nThank u so much Maxime ", "username": "Elia_Gaviraghi" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Sharding and reference
2022-06-02T12:28:27.771Z
Sharding and reference
1,654
null
[ "api" ]
[ { "code": "", "text": "I need to find a way for an external viewer to pull data using a rest API. The deployment is local using Mongo community edition 5.0. I see stuff related to atlas, but no information to pull data this way using the community edition. Any help/guidance would be greatly appreciated!", "username": "T-roy" }, { "code": "", "text": "Hello @T-roy and welcome to the community!!The Data API feature is only available for MongoDB atlas which allows to read and write to the databases using the API.\nFor more: Read and Write with Data API (Preview)\nHowever, please note that this feature is only available for Preview and hence the feature/properties might change/upgrade in the near future.Thanks\nAasawari", "username": "Aasawari" } ]
Need to pull data from community edition using API
2022-05-19T14:42:09.357Z
Need to pull data from community edition using API
2,889
null
[ "replication", "migration" ]
[ { "code": "", "text": "Dear Team,\nMy current setup is MongoDB 3.4 version with replica set ( Arbiter, Master and Slave)\nI am planning to upgrade to latest version of 5.0.2.\nI am using SSL certificate on database level.\nPlease someone advise the steps and precautions .\nNote: I am planning for parallel setup not in place upgrade.\nThank you.", "username": "Mahammad_Jilan" }, { "code": "", "text": "Hello,The best practice would be to follow a rolling upgrade schema, here you can see all the steps neccessary for an upgrade: https://www.mongodb.com/docs/manual/release-notes/5.0-upgrade-replica-set/\nIn your case you would need to perform incremental updates instead of jumping 3.4 → 5.0, for example:All replica set members must be running version 4.4. To upgrade a replica set from an 4.2-series and earlier, first upgrade all members of the replica set to the latest 5.0-series release, and then follow the procedure to upgrade from MongoDB 4.4 to 5.0.", "username": "Tin_Cvitkovic" }, { "code": "", "text": ".Thank you for the response.\nI planned to go with parallel setup with backup and restore method.\nMongoDB running on Windows OS.\nThe same steps we have to follow on Windows OS as well.Can we restore 3.4 backup onto 5.0 directly or do I need to use intermediate 4.4 version.", "username": "Mahammad_Jilan" } ]
Migrate MongoDB 3.4 to 5.0 ( Parallel Migration plan)
2022-05-31T07:11:34.518Z
Migrate MongoDB 3.4 to 5.0 ( Parallel Migration plan)
4,132
null
[ "replication", "transactions" ]
[ { "code": "\"error\": \"NotWritablePrimary: Transaction coordinator is not a primary\",\"error\": \"HostUnreachable: Connection closed by peer\",\"errmsg\": \"operation exceeded time limit\"repository: bitnami/mongodb\ntag: 5.0.7-debian-10-r5\n\nrs0:PRIMARY> db.serverStatus().asserts\n{\n \"regular\" : 0,\n \"warning\" : 0,\n \"msg\" : 0,\n \"user\" : 8628,\n \"tripwire\" : 0,\n \"rollovers\" : 0\n}\n\nrs0:SECONDARY> db.serverStatus().asserts\n{\n \"regular\" : 0,\n \"warning\" : 0,\n \"msg\" : 0,\n \"user\" : 4999,\n \"tripwire\" : 0,\n \"rollovers\" : 0\n}\n\nrs0:SECONDARY> db.serverStatus().asserts\n{\n \"regular\" : 0,\n \"warning\" : 0,\n \"msg\" : 0,\n \"user\" : 4995,\n \"tripwire\" : 0,\n \"rollovers\" : 0\n}\n\ndb.setLogLevel(5)\n\nrs0:PRIMARY> rs.status()\n{\n \"set\" : \"rs0\",\n \"date\" : ISODate(\"2022-05-09T13:25:57.985Z\"),\n \"myState\" : 1,\n \"term\" : NumberLong(2),\n \"syncSourceHost\" : \"\",\n \"syncSourceId\" : -1,\n \"heartbeatIntervalMillis\" : NumberLong(2000),\n \"majorityVoteCount\" : 2,\n \"writeMajorityCount\" : 2,\n \"votingMembersCount\" : 3,\n \"writableVotingMembersCount\" : 3,\n \"optimes\" : {\n \"lastCommittedOpTime\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"lastCommittedWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\"),\n \"readConcernMajorityOpTime\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"appliedOpTime\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"durableOpTime\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"lastAppliedWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\"),\n \"lastDurableWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\")\n },\n \"lastStableRecoveryTimestamp\" : Timestamp(1652102708, 4),\n \"electionCandidateMetrics\" : {\n \"lastElectionReason\" : \"electionTimeout\",\n \"lastElectionDate\" : ISODate(\"2022-05-09T09:40:08.666Z\"),\n \"electionTerm\" : NumberLong(2),\n \"lastCommittedOpTimeAtElection\" : {\n \"ts\" : Timestamp(0, 0),\n \"t\" : NumberLong(-1)\n },\n \"lastSeenOpTimeAtElection\" : {\n \"ts\" : Timestamp(1652089206, 1),\n \"t\" : NumberLong(1)\n },\n \"numVotesNeeded\" : 1,\n \"priorityAtElection\" : 5,\n \"electionTimeoutMillis\" : NumberLong(10000),\n \"newTermStartDate\" : ISODate(\"2022-05-09T09:40:08.668Z\"),\n \"wMajorityWriteAvailabilityDate\" : ISODate(\"2022-05-09T09:40:08.670Z\")\n },\n \"members\" : [\n {\n \"_id\" : 0,\n \"name\" : \"mongodb-0.mongodb-headless.mongodb.svc.cluster.local:27017\",\n \"health\" : 1,\n \"state\" : 1,\n \"stateStr\" : \"PRIMARY\",\n \"uptime\" : 13549,\n \"optime\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"optimeDate\" : ISODate(\"2022-05-09T13:25:49Z\"),\n \"lastAppliedWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\"),\n \"lastDurableWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\"),\n \"syncSourceHost\" : \"\",\n \"syncSourceId\" : -1,\n \"infoMessage\" : \"\",\n \"electionTime\" : Timestamp(1652089208, 1),\n \"electionDate\" : ISODate(\"2022-05-09T09:40:08Z\"),\n \"configVersion\" : 7,\n \"configTerm\" : 2,\n \"self\" : true,\n \"lastHeartbeatMessage\" : \"\"\n },\n {\n \"_id\" : 1,\n \"name\" : \"mongodb-1.mongodb-headless.mongodb.svc.cluster.local:27017\",\n \"health\" : 1,\n \"state\" : 2,\n \"stateStr\" : \"SECONDARY\",\n \"uptime\" : 13501,\n \"optime\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"optimeDurable\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"optimeDate\" : ISODate(\"2022-05-09T13:25:49Z\"),\n \"optimeDurableDate\" : ISODate(\"2022-05-09T13:25:49Z\"),\n \"lastAppliedWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\"),\n \"lastDurableWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\"),\n \"lastHeartbeat\" : ISODate(\"2022-05-09T13:25:57.796Z\"),\n \"lastHeartbeatRecv\" : ISODate(\"2022-05-09T13:25:56.955Z\"),\n \"pingMs\" : NumberLong(0),\n \"lastHeartbeatMessage\" : \"\",\n \"syncSourceHost\" : \"mongodb-0.mongodb-headless.mongodb.svc.cluster.local:27017\",\n \"syncSourceId\" : 0,\n \"infoMessage\" : \"\",\n \"configVersion\" : 7,\n \"configTerm\" : 2\n },\n {\n \"_id\" : 2,\n \"name\" : \"mongodb-2.mongodb-headless.mongodb.svc.cluster.local:27017\",\n \"health\" : 1,\n \"state\" : 2,\n \"stateStr\" : \"SECONDARY\",\n \"uptime\" : 13489,\n \"optime\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"optimeDurable\" : {\n \"ts\" : Timestamp(1652102749, 1),\n \"t\" : NumberLong(2)\n },\n \"optimeDate\" : ISODate(\"2022-05-09T13:25:49Z\"),\n \"optimeDurableDate\" : ISODate(\"2022-05-09T13:25:49Z\"),\n \"lastAppliedWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\"),\n \"lastDurableWallTime\" : ISODate(\"2022-05-09T13:25:49.210Z\"),\n \"lastHeartbeat\" : ISODate(\"2022-05-09T13:25:57.347Z\"),\n \"lastHeartbeatRecv\" : ISODate(\"2022-05-09T13:25:56.084Z\"),\n \"pingMs\" : NumberLong(0),\n \"lastHeartbeatMessage\" : \"\",\n \"syncSourceHost\" : \"mongodb-1.mongodb-headless.mongodb.svc.cluster.local:27017\",\n \"syncSourceId\" : 1,\n \"infoMessage\" : \"\",\n \"configVersion\" : 7,\n \"configTerm\" : 2\n }\n ],\n \"ok\" : 1,\n \"$clusterTime\" : {\n \"clusterTime\" : Timestamp(1652102749, 1),\n \"signature\" : {\n \"hash\" : BinData(0,\"RWRwa4dwXKGCqa4L4iyqbRg1dIw=\"),\n \"keyId\" : NumberLong(\"7095669105549639685\")\n }\n },\n \"operationTime\" : Timestamp(1652102749, 1)\n}\n{\n \"t\": {\n \"$date\": \"2022-05-09T19:14:04.032+00:00\"\n },\n \"s\": \"D1\",\n \"c\": \"-\",\n \"id\": 23074,\n \"ctx\": \"conn5219\",\n \"msg\": \"User assertion\",\n \"attr\": {\n \"error\": \"NotWritablePrimary: Transaction coordinator is not a primary\",\n \"file\": \"src/mongo/db/s/transaction_coordinator_service.cpp\",\n \"line\": 286\n }\n}\n\n{\n \"t\": {\n \"$date\": \"2022-05-09T19:14:02.864+00:00\"\n },\n \"s\": \"D3\",\n \"c\": \"-\",\n \"id\": 4892201,\n \"ctx\": \"conn3436\",\n \"msg\": \"Internal assertion\",\n \"attr\": {\n \"error\": {\n \"code\": 262,\n \"codeName\": \"ExceededTimeLimit\",\n \"errmsg\": \"operation exceeded time limit\"\n },\n \"location\": \"{fileName:\\\"src/mongo/util/interruptible.h\\\", line:395, functionName:\\\"operator()\\\"}\"\n }\n}\n{\n \"t\": {\n \"$date\": \"2022-05-09T19:19:16.329+00:00\"\n },\n \"s\": \"D3\",\n \"c\": \"-\",\n \"id\": 4892201,\n \"ctx\": \"conn8\",\n \"msg\": \"Internal assertion\",\n \"attr\": {\n \"error\": {\n \"code\": 262,\n \"codeName\": \"ExceededTimeLimit\",\n \"errmsg\": \"operation exceeded time limit\"\n },\n \"location\": \"{fileName:\\\"src/mongo/util/interruptible.h\\\", line:395, functionName:\\\"operator()\\\"}\"\n }\n}\n\n{\n \"t\": {\n \"$date\": \"2022-05-09T19:19:09.603+00:00\"\n },\n \"s\": \"D1\",\n \"c\": \"-\",\n \"id\": 23074,\n \"ctx\": \"conn7151\",\n \"msg\": \"User assertion\",\n \"attr\": {\n \"error\": \"HostUnreachable: Connection closed by peer\",\n \"file\": \"src/mongo/transport/service_state_machine.cpp\",\n \"line\": 536\n }\n}\n{\n \"t\": {\n \"$date\": \"2022-05-09T19:21:31.468+00:00\"\n },\n \"s\": \"D1\",\n \"c\": \"-\",\n \"id\": 23074,\n \"ctx\": \"conn7137\",\n \"msg\": \"User assertion\",\n \"attr\": {\n \"error\": \"HostUnreachable: Connection closed by peer\",\n \"file\": \"src/mongo/transport/service_state_machine.cpp\",\n \"line\": 536\n }\n}\n\n{\n \"t\": {\n \"$date\": \"2022-05-09T19:21:31.468+00:00\"\n },\n \"s\": \"D2\",\n \"c\": \"NETWORK\",\n \"id\": 22986,\n \"ctx\": \"conn7137\",\n \"msg\": \"Session from remote encountered a network error during SourceMessage\",\n \"attr\": {\n \"remote\": \"127.0.0.1:50380\",\n \"error\": {\n \"code\": 6,\n \"codeName\": \"HostUnreachable\",\n \"errmsg\": \"Connection closed by peer\"\n }\n }\n}\n", "text": "I have setup a 3 node Replica Set within Kubernetes. I noticed there was a very high number of User Asserts and I’ve listed them about below from the logs after raising the log level to 5.On the Primary they seem to be : \"error\": \"NotWritablePrimary: Transaction coordinator is not a primary\",On the Secondary’s they seem to be : \"error\": \"HostUnreachable: Connection closed by peer\",and then on all of them Internal assertion : \"errmsg\": \"operation exceeded time limit\"mongodb-0mongodb-1mongodb-2", "username": "Tim_Pynegar" }, { "code": "HostUnreachable: Connection closed by peeroperation exceeded time limit", "text": "Hi @Tim_PynegarI noticed there was a very high number of User Asserts and I’ve listed them about below from the logs after raising the log level to 5.Log level 5 is the maximum verbosity level, so it will include a lot of information that could help with debugging a specific issue. The user assertions you posted doesn’t seem like they’re interfering with normal operations. For example, HostUnreachable: Connection closed by peer would mean that a connection was closed unexpectedly by a client, operation exceeded time limit is something hitting a timeout, etc.I don’t believe that those messages means that anything is wrong with your deployment. However, if you set the log level back to 0 and resume your normal operations, are you seeing any issues?Best regards\nKevin", "username": "kevinadi" } ]
User Asserts on Replica Set running on Kubernetes
2022-05-09T19:32:10.880Z
User Asserts on Replica Set running on Kubernetes
2,709
null
[ "node-js", "crud", "mongoose-odm", "time-series" ]
[ { "code": "router.post(\"/insert-many\", async (req, res, next) => {\n try {\n const timeSeries = req.body;\n\n BMTempsTimeSeries.insertMany(timeSeries)\n .then((result) => {\n res.status(200).json({ result });\n })\n .catch((error) => {\n res.status(200).json({ error });\n });\n } catch (err) {\n next(err);\n }\n});\nimport mongoose from \"mongoose\";\n\nconst mongoSchema = new mongoose.Schema(\n {\n timestamp: String,\n A1: String,\n A2: String,\n A3: String,\n A4: String,\n A5: String,\n A6: String,\n A7: String,\n A8: String,\n A9: String,\n A10: String,\n A11: String,\n A12: String,\n B1: String,\n B2: String,\n B3: String,\n B4: String,\n B5: String,\n B6: String,\n B7: String,\n B8: String,\n B9: String,\n B10: String,\n B11: String,\n B12: String,\n },\n {\n timeseries: {\n timeField: \"timestamp\",\n granularity: \"seconds\",\n },\n }\n);\n\nexport const BMTempsTimeSeries = mongoose.models.BMTempsTimeSeries \n\n", "text": "Hey everyone.New to MongoDB but have only heard excellent things so I am excited!I’m using Azure Cosmos DB API for MongoDB.My business recently required the need for recording time-series data. Specifically, temperatures (from 24 different points) every 1 to 2 seconds for 8 hours a day. AKA millions of documents. We orginially recorded to CSV format but quickly discovered timeseries and MongoDB were a much better option.As a result of this late realization, I have about 300,000 temperature points already record to a CSV (new points are being stored directly to the database). So I need to insert these 300,000. However, my insertMany functions take an absurd amount of time. I have tried multiple POST requests with smaller chunks, different schemas, a collection for each point, all points under 1 collection etc. Nothing works. Inserting all 300,000 would take hours.I am sure I am doing something fundamentally wrong. I can wait the hours, but seems wrong.My insert many function:My model with each temperature channel:", "username": "Daniel_Smyth" }, { "code": "mongoimport", "text": "Hello @Daniel_Smyth, welcome to the MongoDB community forum!In case you are loading the data from the CSV into the database, I suggest use the mongoimport command line utility. This tool allows loading data efficiently from CSV and JSON files into the database.", "username": "Prasad_Saya" }, { "code": "", "text": "Hey Prasad, thanks for quick response. Processing/sorting is required before storing in database, I had a script written for this already. But guess I can do this in the CSV.I’m more concered with the insertMany processing time. See image, 13 seconds to insert 726 documents. Is this typical?", "username": "Daniel_Smyth" }, { "code": "", "text": "I’m more concered with the insertMany processing time. See image, 13 seconds to insert 726 documents. Is this typical?I don’t know if its normal or not. You are accessing the database over a network via some APIs and some kind of web/app server? These layers can affect the performance.", "username": "Prasad_Saya" }, { "code": "", "text": "Correct, access database on Azure over network via API. Only concerned because I seen others inserting 100,000 documents in the same time. Maybe a local instance of the DB is better", "username": "Daniel_Smyth" } ]
Inserting 15,000 time-series documents take 15-20 minutes
2022-06-03T02:04:29.332Z
Inserting 15,000 time-series documents take 15-20 minutes
3,654
null
[ "atlas-functions" ]
[ { "code": "", "text": "Is there a way to return the document relationship information when using .find method?\nInstead of getting just the ID in the response is there a way to get all of the information in the related document?", "username": "Paula_farrugia" }, { "code": "", "text": "Hi @Paula_farrugia,You are looking for this:Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can you return all relationship information using .find?
2022-06-01T16:59:39.743Z
Can you return all relationship information using .find?
1,672
null
[ "aggregation", "queries", "java" ]
[ { "code": "", "text": "Hi,We use Java 11+ Spring-boot 2.6.0 and MongoCk.We want to create search index automatically with mongock.It’s possible to create with MongoTemplate (MongoTemplate (Spring Data MongoDB 4.0.1 API)) the search index?Regards, Tobias", "username": "Tobias_Letschka" }, { "code": "", "text": "Hi @Tobias_Letschka and welcome in the MongoDB Community !Atlas Search is a MongoDB Atlas specific feature that’s not available elsewhere because it requires some extra servers & config to host the Lucene indexes + sync processes (transparent for the users).MongoTemplate is specific to MongoDB but not Atlas specific features. So I doubt it’s supported directly in there.Also, it’s also not possible to create an Atlas Search index directly from a MongoDB supported driver (for the same reasons).Currently, if you want to create an Atlas Search index, you have to create it using the:See the doc:Create an Atlas Search index using the Atlas UI, Atlas Search API, Atlas CLI, or Terraform.If Mongock supports API calls, I would probably use Atlas Search API. If it supports system calls, you can use the Atlas CLI.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "Hi @MaBeuLux88 thank you the response. It was really helpful.Regards, Tobias", "username": "Tobias_Letschka" }, { "code": "", "text": "Hi from Mongock team,Just wanted to add that Mongock supports API calls as well as system calls, so you shouldn’t have any problem with this.", "username": "Antonio_Perez_Dieppa" }, { "code": "", "text": "Hi @Antonio_Perez_Dieppa ,do you have any example?", "username": "Tobias_Letschka" }, { "code": "", "text": "Hi @Tobias_Letschka ,what I meant is that Mongock doesn’t stop you from using whatever you want to use(MongoTemplate, MongoDatabase, Custom repositories or any other dependency). If you are using Springboot, you just need to inject it into the Spring context and use it in the Mongock ChangeUnit(legacy ChangeLog) as you would do with the MongoTemplate.", "username": "Antonio_Perez_Dieppa" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Create search index java + Spring-boot + MongoCK
2022-05-30T10:52:17.189Z
Create search index java + Spring-boot + MongoCK
5,016
null
[ "queries" ]
[ { "code": "", "text": "I have updated a couple of documents via a wrong query. My query was very generic and have updated a couple of documents. How do I see the ones I’ve updated via the query I have ran?", "username": "Blaise" }, { "code": "db.people.updateMany(\n { lastName: \"Smith\" }, \n { $set: { country: \"UK\" } } \n)\n{ lastName: \"Smith\" }{ $set: { country: \"UK\" } }db.people.find({ lastName: \"Smith\" })", "text": "How do I see the ones I’ve updated via the query I have ran?A general update statement takes the form, for example, in the following update operation all people with last name “Smith” have their country changed to “UK”:In the above query:The query filter tells which documents are modified and the update tells what fields are updated with what data.So, if you run a query, like the one below, it lists all the documents the previous update has changed:db.people.find({ lastName: \"Smith\" })", "username": "Prasad_Saya" }, { "code": "use local\ndb.oplog.rs.find()\n", "text": "And, if you do not remember the query, there are some things to try. But I am not that knowledgeable this are some ideas…Not valid for standalones as the oplog is not there.A capped collection that stores an ordered history of logical writes to a MongoDB database.The oplog is found in the database local, collection oplog, you would type something like:I believe.You could inspect the log file also.if you run through terminal, just uparrow and see if it is in the history of commands.", "username": "Mah_Neh" }, { "code": "", "text": "Check the log fileAre you sure the update operations are written to the server logs?", "username": "Prasad_Saya" }, { "code": "", "text": "Great question! You are right, they are not.I thought the “write” part would be there but it is not.", "username": "Mah_Neh" }, { "code": "findOne()Atlas local> db.oplog.rs.find()\n[\n {\n op: 'i',\n ns: 'chatApp.messages',\n ui: UUID(\"ec5c9f89-2f0b-4ee5-acfd-ea350fd409e1\"),\n o: { _id: ObjectId(\"6298b40f381f656739e7f74e\"), toRemove: true },\n },\n {\n \n op: 'd',\n o: { _id: ObjectId(\"6298b40f381f656739e7f74e\") },\n prevOpTime: { ts: Timestamp({ t: 0, i: 0 }), t: Long(\"-1\") }\n },\n {\n op: 'u',\n ns: 'chatApp.messages',\n o: { '$v': 2, diff: { u: { tid: 'rem' } } },\n }\n]\n", "text": "Oops, they are indeed logged, I was just using findOne() on the first check.You will see it like this:You can see “i” for insert, “d” for delete and “u” for update and the queries/body carried out For the system records, I am not fully sure, i think this depend on the LogLevel.", "username": "Mah_Neh" }, { "code": "mongoshmongoshmongo", "text": "Another place to look for query details is the mongosh logs. This is useful if you are using the mongosh for running your queries. The details from logs can be viewed using instructions from: Retrieve Shell Logs.In case you are using the older mongo shell, similar logs are available.", "username": "Prasad_Saya" } ]
I made a wrong update query in MongoDB
2022-06-02T08:25:09.279Z
I made a wrong update query in MongoDB
2,421
null
[]
[ { "code": "", "text": "MongoDB shell version v3.6.8connecting to: mongodb://127.0.0.1:270172022-05-27T03:29:16.141-0700 W NETWORK [thread1] Failed to connect to 127.0.0.1:27017, in(checking socket for error after poll), reason: Connection refused2022-05-27T03:29:16.141-0700 E QUERY [thread1] Error: couldn’t connect to server 127.0.0.1:27017, connection attempt failed :connect@src/mongo/shell/mongo.js:257:13@(connect):1:6exception: connect failed", "username": "Stuart_S" }, { "code": "", "text": "I am unable to access mongo shell", "username": "Stuart_S" }, { "code": "", "text": "Your issue is not clear.Your title is Mongo server not starting but you show the output of starting mongosh. These are 2 different things.When you getConnection refusedwhen starting mongosh, it means that mongod is not running at the given address.You have to start mongod first before you try to connect.", "username": "steevej" }, { "code": "", "text": "Sorry newbie error . i tried this but i get .\nsudo service mongod start\nFailed to start mongod.service: Unit mongod.service not found.Actually my main query is - i have to install mongodb server and shell on a machine. but since it has no internet access and the repo contains only a older version. I am transferring .deb packages to the machine and then doing sudo apt-get install the deb packages.\nLike i am not even sure if i am doing the right thing . Please do help me in this regard.", "username": "Stuart_S" }, { "code": "", "text": "service mongod statusUnit mongod.service could not be found.", "username": "Stuart_S" }, { "code": "", "text": "You may always start mongod manually.Otherwise, I would suggest that you install mongod on a machine that has Internet access and then copy over the mongod.service from that machine.My recommendation is to use Atlas.", "username": "steevej" }, { "code": "", "text": "so thats the only option is it ?\nwhat are the steps can i do to manually get all required things like mongo server, shellWhats the advantage of using atlas?", "username": "Stuart_S" }, { "code": "", "text": "What is your os?\nDo you have mongod binary?\nJust issue mongod on command line and see.It should bring up mongod on default port 27017 provided the default dirpath exists\nIf it is up you can connect by issuing mongo\nor you can spinup your mongod from command line\nmongod --port --dbpath --logpath --fork(need to pass values for first 3 parameters)Atlas is a cloud hosted environment where you can create your Sandbox cluster and start using mongodb\nAll you have to do is create your account,create your Sandbox cluster,load sample data.Once you are familiar you can create your own db/collections\nPlease visit mongodb documentation for more details", "username": "Ramachandra_Tummala" }, { "code": "", "text": "let me rephrase. i want to setup mongodb server on one machine with no internet access.So i went to this page MongoDB Community Downloads | MongoDB\ndo i download all this files??", "username": "Stuart_S" }, { "code": "", "text": "Command :mongod --port 2707 —dbpath /var/lib/mongodb --logpath /var/log/mongodb/mongod.log --forkOUTPUT\nabout to fork child process, waiting until server is ready for connections.\nforked process: 265155\nERROR: child process failed, exited with 1\nTo see additional information in this output, start without the “–fork” option.mongod --port 2707 —dbpath /var/lib/mongodb --logpath /var/log/mongodb/mongod.log --fork\nabout to fork child process, waiting until server is ready for connections.\nforked process: 265155\nERROR: child process failed, exited with 1\nTo see additional information in this output, start without the “–fork” option.", "username": "Stuart_S" }, { "code": "", "text": "Output of mongod{“t”:{\"$date\":“2022-05-29T22:52:40.682+05:30”},“s”:“I”, “c”:“NETWORK”, “id”:4915701, “ctx”:\"-\",“msg”:“Initialized wire specification”,“attr”:{“spec”:{“incomingExternalClient”:{“minWireVersion”:0,“maxWireVersion”:13},“incomingInternalClient”:{“minWireVersion”:0,“maxWireVersion”:13},“outgoing”:{“minWireVersion”:0,“maxWireVersion”:13},“isInternalClient”:true}}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.682+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:23285, “ctx”:“main”,“msg”:“Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols ‘none’”}\n{“t”:{\"$date\":“2022-05-29T22:52:40.682+05:30”},“s”:“W”, “c”:“ASIO”, “id”:22601, “ctx”:“main”,“msg”:“No TransportLayer configured during NetworkInterface startup”}\n{“t”:{\"$date\":“2022-05-29T22:52:40.682+05:30”},“s”:“I”, “c”:“NETWORK”, “id”:4648601, “ctx”:“main”,“msg”:“Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize.”}\n{“t”:{\"$date\":“2022-05-29T22:52:40.701+05:30”},“s”:“W”, “c”:“ASIO”, “id”:22601, “ctx”:“main”,“msg”:“No TransportLayer configured during NetworkInterface startup”}\n{“t”:{\"$date\":“2022-05-29T22:52:40.701+05:30”},“s”:“I”, “c”:“REPL”, “id”:5123008, “ctx”:“main”,“msg”:“Successfully registered PrimaryOnlyService”,“attr”:{“service”:“TenantMigrationDonorService”,“ns”:“config.tenantMigrationDonors”}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.701+05:30”},“s”:“I”, “c”:“REPL”, “id”:5123008, “ctx”:“main”,“msg”:“Successfully registered PrimaryOnlyService”,“attr”:{“service”:“TenantMigrationRecipientService”,“ns”:“config.tenantMigrationRecipients”}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.701+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:5945603, “ctx”:“main”,“msg”:“Multi threading initialized”}\n{“t”:{\"$date\":“2022-05-29T22:52:40.701+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:4615611, “ctx”:“initandlisten”,“msg”:“MongoDB starting”,“attr”:{“pid”:248208,“port”:27017,“dbPath”:\"/data/db\",“architecture”:“64-bit”,“host”:“bng-emake-10a”}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.701+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:23403, “ctx”:“initandlisten”,“msg”:“Build Info”,“attr”:{“buildInfo”:{“version”:“5.0.8”,“gitVersion”:“c87e1c23421bf79614baf500fda6622bd90f674e”,“openSSLVersion”:“OpenSSL 1.1.1f 31 Mar 2020”,“modules”:[],“allocator”:“tcmalloc”,“environment”:{“distmod”:“ubuntu2004”,“distarch”:“x86_64”,“target_arch”:“x86_64”}}}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.701+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:51765, “ctx”:“initandlisten”,“msg”:“Operating System”,“attr”:{“os”:{“name”:“Ubuntu”,“version”:“20.04”}}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.701+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:21951, “ctx”:“initandlisten”,“msg”:“Options set by command line”,“attr”:{“options”:{}}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.702+05:30”},“s”:“E”, “c”:“NETWORK”, “id”:23024, “ctx”:“initandlisten”,“msg”:“Failed to unlink socket file”,“attr”:{“path”:\"/tmp/mongodb-27017.sock\",“error”:“Operation not permitted”}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.702+05:30”},“s”:“F”, “c”:\"-\", “id”:23091, “ctx”:“initandlisten”,“msg”:“Fatal assertion”,“attr”:{“msgid”:40486,“file”:“src/mongo/transport/transport_layer_asio.cpp”,“line”:1019}}\n{“t”:{\"$date\":“2022-05-29T22:52:40.702+05:30”},“s”:“F”, “c”:\"-\", “id”:23092, “ctx”:“initandlisten”,“msg”:\"\\n\\n***aborting after fassert() failure\\n\\n\"}", "username": "Stuart_S" }, { "code": "", "text": "Check ls -lrt /tmp/mongodb-27017.sock.It should be owned by mongod\nCould be permissions issue\nmongod is unable to remove the tmp file.May be it was started by a different user before", "username": "Ramachandra_Tummala" }, { "code": "", "text": "ls -lrt /tmp/mongodb-27017.socksrwx------ 1 mongodb mongodb 0 May 29 23:45 /tmp/mongodb-27017.sock", "username": "Stuart_S" }, { "code": "", "text": "Shutdown all mongods if any running\nremove the file and try to start mongod again\nThe port you gave in your command is not correct 2707.It should be 27017\nAlso try to use a totally new dirpath and logpath to avoid clash with previous runs of mongod\nLike your homedir or whatever path you give mongod shoulld be able to write to that directory", "username": "Ramachandra_Tummala" }, { "code": "", "text": "hello, please go to task manager, then service and make sure mongodb is running\n\nCmongoresl1596×833 151 KB\n\nif not, run it ,they try your campus or shell for connection.it worked for me!", "username": "Jjumba_Eric_Benjamin" } ]
Mongo server not starting
2022-05-27T10:30:27.265Z
Mongo server not starting
7,068
null
[ "java", "spring-data-odm", "morphia-odm" ]
[ { "code": "", "text": "I have Spring Boot application. I am using Morphia to connect to Mongodb database. I want to log when a connection is acquired by my Java Thread & when it gets released. Our plan is to log the pattern & use ELK to check if some request is doing long running operation on DB.\nBasically I am looking something similar to this article, but it is for Hibernate with MySQL.", "username": "MdDas123" }, { "code": "100mongosh", "text": "Hello @MdDas123,Welcome to the community!! If I understand the question correctly, you need a way to log connection start & finish from your Java application, as per the example from the blog post you posted. Is this correct?If yes, then if I’m not mistaken the answer would be Java-specific instead of MongoDB-specific, and the solution you’re looking for would not be unique to MongoDB’s Java driver. Having said that, if the ultimate goal is to determining long-running operations (here I assume you mean long-running MongoDB queries initiated by your Java app), then there are two ways MongoDB may be able to help you.Use MongoDB logs - MongoDB maintains a running log of events, including entries such as incoming connections, commands run, and issues encountered. Generally, log messages are useful for diagnosing issues, monitoring your deployment, and tuning performance. You can also customize the profiling level by using db.setProfilingLevel(). You might find the profiling level 1 and the slowms option relevant to your use case.If you are using Atlas then you can use Performance Advisor - The Performance Advisor monitors any operation with a query predicate that MongoDB considers slow and suggests new indexes to improve query performance. For the selected host and time period, the Performance Advisor evaluates up to the 20,000 most recent slow queries found in the logs. It recognizes a query as slow if it takes longer to execute than the value of slowOpThresholdMs. By default, this value is 100 milliseconds. You can change the threshold with either the profile command or the db.setProfilingLevel() mongosh method.Thanks,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to log Mongodb connection acquire & release time in Java
2022-05-17T16:11:27.835Z
How to log Mongodb connection acquire &amp; release time in Java
4,484
https://www.mongodb.com/…8014cbd965a.jpeg
[]
[ { "code": "", "text": "Need to be able to access data remotely. I have set up port forwarding on my router and also followed a tutorial hereThis tutorial walks you through configuring a MongoDB installation to securely allow access from a trusted remote computer. This will involve updating your f…Ufortunately, I am unable to connect and get an error “MongoServerSelectionError: connection timed out”. I have the bindIP set to 0.0.0.0. I have tried everything I can find but I cannot figure this out. I really need help with this. Thanks in advance", "username": "T-roy" }, { "code": "", "text": "Hi @T-roy,Try bindIpAll instead of 0.0.0.0. Both are terrible solutions but at least this will get out of the way.Then it can be anything else. Firewall, antivirus, OS misconfiguration, …Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "Appreciate the reply, @MaBeuLux88 . I tried your idea but still getting the timeout as mentioned above. I know this is a terrible idea, but it’s only temporary until I can find the culprit. Is there any way to set the driver to not timeout when attempting the connection? Or might you have any other possible ideas to this scenario?", "username": "T-roy" }, { "code": "", "text": "You can set the different timeout values in the driver to biggest values. But if you are never able to get a connection through, this isn’t the heart of the problem here.", "username": "MaBeuLux88" }, { "code": "", "text": "The issue ended up being that my home network is behind a CGNAT. Spoke with my ISP and they were able to fix this. Also, I had to switch back to 0.0.0.0 instead of ALL as this was not allowing for connection. Thanks @MaBeuLux88 for your guidance.", "username": "T-roy" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Connecting to MongoDB community edition from external IP
2022-05-27T18:46:35.648Z
Connecting to MongoDB community edition from external IP
2,225
null
[ "aggregation", "queries", "python" ]
[ { "code": "g_reportszip_vendorfind()group()$matchcoll = mongo_conn['all_reports']['zip_vendor']\nresult = coll.aggregate([\n {\n '$match': {\n 'g_client_machinename': 'foo_client'\n },\n '$match': {\n \"g_uploaded_at\": {\n '$gte': dparser.parse(date_from, fuzzy=True),\n '$lte': dparser.parse(date_to, fuzzy=True)\n },\n },\n },\n {\n '$project': {\n 'MyDate': {\n '$dateToString' : {\n 'format': '%Y-%m-%d',\n 'date': {\n '$convert': {\n 'input' : f'${txn_date_feature}',\n 'to' : 'date'\n }\n }\n }\n },\n 'g_client_machinename': 1,\n 'g_vendor_machinename': 1,\n }\n },\n { \n '$group': { \n '_id': '$MyDate', \n 'count': {'$sum': 1 },\n 'g_client_machinename': {'$first': '$g_client_machinename'},\n 'g_vendor_machinename': {'$first': '$g_vendor_machinename'},\n } \n },\n ])\nfoo_client$matchfoo_client'g_client_machinename': {'$first': '$g_client_machinename'}> show dbs\nadmin 0.000GB\nconfig 0.000GB\ng_reports 0.070GB\n[...]\n\n> use g_reports\nswitched to db g_reports\n> show collections\n__schema__\nzip_vendor \n[...]\n(g_client_machinename, g_vendor_machinename)> db.zip_vendor.findOne()\n{\n \"_id\" : ObjectId(\"60a40c26184e56200062257f\"),\n \"date\" : ISODate(\"2021-05-10T08:45:00Z\"),\n \"checkout id\" : \"9bf04b85-f7b0-4a62-b347-08944437a549\",\n \"order number\" : \"5\",\n \"your_store_id\": \"this is a foo client alias\",\n \"user id\" : \"177ff067-86ee-41fa-8d36-48e68cad9972\",\n \"this_is_a_goober_field_name_this_vendor_uses\": \"true\",\n[...]\n \"items\" : \"Eggs Benedict, Green & White Scramble, Side Three Pepper Home Fries (2), Buttermilk Pancakes\",\n \"g_client_machinename\" : \"foo_client\",\n \"g_vendor_machinename\" : \"zip_vendor\",\n \"g_uploaded_at\" : ISODate(\"2021-05-18T18:49:10.293Z\"),\n \"g_unique_id\" : \"6bbd0d4b8fc27800e680691990b99f72d964673f40ab9132623c25cfd3f0007a\"\n}\n", "text": "I’m developing a new aggregation pipeline for a series of queries on my Mongo database (g_reports) and one of it’s collections (zip_vendor). I know the data stored in Mongo is logically consistent, because when I query with normal find() operator and use the data for other purposes I don’t see this behavior.However, this aggregation with group() stage returns records that are logically impossible results based on my first $match stage.Here is the aggregation I’m using (syntax is for python + pymongo):The resulting table contains the fields and data I require and appears to increment the counter +1 for each record grouped by date. The logic-bomb is this: I’m selecting the foo_client in the $match stage at the beginning of my aggregation, but the results show a mix of other client names:I expect the aggregation to have already selected the client name foo_client. Does this line ('g_client_machinename': {'$first': '$g_client_machinename'}) in the group stage intentionally ignore this match stage and claw out of other records in the collection these client names (making false labels) or the whole records (contaminating the results)?To give some MongoDB context to my aggregation example, here is the show for the database and collection names:On the application side–\nEach record, in addition to it’s content fields, is stored with 2+ additional fields used by my app when I insert content and when I retrieve it: (g_client_machinename, g_vendor_machinename).Basically each “batch” of records stored is from one client for a particular vendor. I label (and validate) the record when I store it. So the theory goes, all I need is this 2-tuple to get it out.", "username": "xtian_simon" }, { "code": "$match'$match': {\n \"g_uploaded_at\": {\n '$gte': dparser.parse(date_from, fuzzy=True),\n '$lte': dparser.parse(date_to, fuzzy=True)\n },\n },\nmatch = {\n '$match': {\n 'g_client_machinename': 'foo_client'\n },\n '$match': {\n \"g_uploaded_at\": {\n '$gte': \"dparser.parse(date_from, fuzzy=True)\",\n '$lte': \"dparser.parse(date_to, fuzzy=True)\"\n },\n },\n }\n{\n '$match': {\n g_uploaded_at: {\n '$gte': 'dparser.parse(date_from, fuzzy=True)',\n '$lte': 'dparser.parse(date_to, fuzzy=True)'\n }\n }\n}\n", "text": "impossible results based on my first $match stageWhat do you consider as being your first $match stage?There is only one match stage and it isA query is a JSON document. In most implementation of JSON, only the last occurrence of a key is kept. In your case the second $match overwrites the first one. If you try in the shell the followingand the print the variable match, you will see it is equal to", "username": "steevej" }, { "code": "", "text": "Rookie mistake! 5,4,3,2,1 character min.", "username": "xtian_simon" }, { "code": "", "text": "Rookie mistakeNot rookie, I still fall in this trap from time to time.", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Aggregation pipeline ends in a group returning illogical results
2022-06-02T01:58:58.585Z
Aggregation pipeline ends in a group returning illogical results
1,782
null
[]
[ { "code": "", "text": "if i sort data using date i need to get current date in first order, but if i update date of previous month i get this data in first position instead of current date", "username": "Muqthar_Ahmed_Quraishi_J" }, { "code": "", "text": "Please read Formatting code and log snippets in posts and post sample documents and sample results for your use-case.Also post what you have tried so far and indicates how it fails to deliver expected results. This will save us time by preventing us to pursue and investigate in a direction you already know does not work.", "username": "steevej" }, { "code": "{\ndate:\"...\",\nlastUpdated: \" \",\n}\n", "text": "Then imho you should have 2 date fields, like this:When the doc is created date and lastUpdated are the same number. If you update, you update the lastUpdated field, and you always sort based on lastUpdated.", "username": "Mah_Neh" } ]
Sorting data based on date in descending order
2022-06-02T11:42:47.304Z
Sorting data based on date in descending order
6,452
null
[ "kafka-connector" ]
[ { "code": "", "text": "Hello,I notice whenever I use “ com.mongodb.kafka.connect.sink.cdc.mongodb.ChangeStreamHandler” in a mongoDB sink connector config to move data from a topic to a mongo collection, everything works fine at first, I see the expected documents count in the collection, but after several days the collection ends up empty with 0 document. Can anyone explain why the target collection suddenly becomes empty ? Is it related to the message retention time of the topic?, how can I prevent the collection from becoming empty ?Thank you.", "username": "SVC_RVBRAND" }, { "code": "", "text": "I am not very familiar with the kafka connector.Is it related to the message retention time of the topic?I would be surprised if the message retention time within kafka is directly responsible from emptying the collection in MongoDB.But it might indirectly if you also created a TTL index withing mongo.Please share the indexes you have on the collection.", "username": "steevej" }, { "code": "\"config\": {\n \"connector.class\": \"com.mongodb.kafka.connect.MongoSinkConnector\",\n \"confluent.topic.bootstrap.servers\": \"boot:9092\",\n \"tasks.max\": \"1\",\n \"database\": \"DEV\",\n \"topics.regex\": \"prefix\\\\.DEV.*\",\n \"topic.override.prefix.DEV.meals.collection\": \"meals\",\n \"topic.override.prefix.DEV.recipes.collection\": \"recipes\",\n \"change.data.capture.handler\": \"com.mongodb.kafka.connect.sink.cdc.mongodb.ChangeStreamHandler\"\n},\n", "text": "Hi @steevejThanks a lot for your answer, this is the config I have, I had to redo the entire config.In my\n[SOURCE CLUSTER]\nmongos> db.recipes.countDocuments({})\n10720\nmongos> db.meals.countDocuments({})\n3983and my[DESTINATION CLUSTER]db.recipes.countDocuments({})\n10720db.meals.countDocuments({})\n3983So everything looks pretty good now, documents count match for those collections in both clusters,\nbut if I wait maybe tomorrow or sometimes.in my [DESTINATION CLUSTER], the collection recipes and meals contain 0 documents.This is my sink configuration I haveThose collections are automatically created by the sink connector with no index besides the default id one.\nNot sure, why the collections become empty after a certain time.Thank you.", "username": "SVC_RVBRAND" }, { "code": "", "text": "I have added kafka-connector to the thread tags in hope someone more savvy with kafka will jump in.Because I can only see a limited number of reasons why a collection becomes empty.I could also see a situation where your destination cluster nodes are running on some container without permanent storage and for some reasons they are restarted and empty because the lack of permanent storage.To help find out, you may change the credential of all database user with write access to the 2 collections once you have some data. The culprit will barfs out.If nothing barfs out, it is TTL index. If human, you will be called or emailed. If another process, hopefully logs will gives some clues.", "username": "steevej" } ]
Empty collection issue
2022-05-25T23:47:11.316Z
Empty collection issue
3,038
null
[ "node-js", "production", "performance", "typescript" ]
[ { "code": "proxyHostproxyPortproxyPasswordproxyUsernameMongoClientenableUtf8Validation: false// disable UTF-8 validation globally on the MongoDB client\nconst client = new MongoClient('mongodb://localhost:27017', { enableUtf8Validation: false });\n\n// disable UTF-8 validation for a particular operation\nconst client = new MongoClient('mongodb://localhost:27017');\nconst db = client.db('database name');\nconst collection = db.collection('collection name');\n\nawait collection.find({ name: 'John Doe'}, { enableUtf8Validation: false });\ninterface Human {\n name: string;\n age: number;\n}\n\ninterface Pet {\n name: string\n bestFriend: Human\n}\n\n\nconst pets = client.db().collection<Pet>('pets');\nawait pets.findOne({ 'bestFriend.age': 'young!' }) // typescript error!\nCollection_id_idObjectId_id_id_idpkFactory_idenableUtf8ValidationGridFSBucketWriteStream.prototype.end()this", "text": "The MongoDB Node.js team is pleased to announce version 4.3.0 of the mongodb package!This release includes SOCKS5 support and a couple of other important features and bug fixes that we hope will improve your experience with the node driver.The SOCKS5 options can be configured via the proxyHost, proxyPort, proxyPassword and proxyUsername options in the connection string passed to the MongoClient instance. Big thanks to @addaleax for helping with this feature!The other notable features address performance and TypeScript as detailed below.The original release of the 4.x driver relied on a new version of the BSON library that enables UTF-8 validation by default, resulting in noticeable performance degradation over the 3.x driver when processing over string data. This release introduces an option to opt out of this validation by specifying enableUtf8Validation: false at the client, database, collection, or individual operation level.For example:Thanks to an amazing contribution from @avaly we now have support for key auto-completion and type hinting on nested documents! MongoDB permits using dotted keys to reference nested keys or specific array indexes within your documents as a shorthand for getting at keys beneath the top layer. Typescript’s Template Literal types allow us to take the interface defined on a collection and calculate at compile time the nested keys and indexes available.For example:WARNING: There is a known shortcoming to this feature: recursive types can no longer be used in your schema. For example, an interface that references itself or references another schema that references back to the root schema cannot be used on our Collection generic argument. Unlike at runtime where a “recursive” shaped document has an eventual stopping point we don’t have the tools within the language to declare a base case enumerating nested keys. We hope this does not cause friction when upgrading driver versions: please do not hesitate to reach out with any feedback you have about this feature.We have also enhanced the type inference for the _id type. Now, when performing operations on a collection, the following holds true based on the type of the schema:We invite you to try the mongodb library immediately, and report any issues to the NODE project.", "username": "dariakp" }, { "code": "", "text": "It was extremely convenient to have a model which referenced other models or referenced itself, as these were later decorated by an ORM.Regarding TS, can this workaround not be applied and benefit of best of both worlds? Version 3.4-dev breaks recursive types · Issue #30188 · microsoft/TypeScript · GitHub", "username": "Theodor_Diaconu" }, { "code": "", "text": "Hi there, thank you for reaching out with your feedback - we are going to look into this further and see if we can get some sort of a patch out that allows recursive types to still compile. You can follow our progress here: https://jira.mongodb.org/browse/NODE-3852", "username": "dariakp" }, { "code": "", "text": "I think I found a working example where we can specify the depth of this. https://www.examplefiles.net/cs/25063 this explains the concept and will take you to this playground linkIn theory we can have tree-like structures as long as we set a limit. Now… the problem? You guessed it, it generates a shitload of combinations and how do we define this limit?I’ll keep this in background and once I compute a solution will leave comm here.", "username": "Theodor_Diaconu" }, { "code": "", "text": "Hey @Theodor_Diaconu! We’ve been working on a potential fix to our breaking recursive types in 4.3.0. The changes are included in this PR. Feel free to take a look if you’d like! There are two other small TS fixes included in that PR as well but feel free to ignore those changes.Our solution isn’t perfect - it doesn’t provide true type safety on recursive types. Instead, it just allows recursive types to be used with the node driver (with some limitations). Three limitations to our solution are:This seems to be a good balance between allowing some of the behavior we broke in 4.3 back while keeping our types maintainable for the future though.Let us know if you have any thoughts or feedback!", "username": "Bailey_Pearson" }, { "code": "", "text": "I believe you found the sweet spot for this, I’ve looked through the code and indeed it looks like the perfect compromise. Thank you. Looking forward to 4.3.1 release to see if we have any breaking changes in our code-bases.", "username": "Theodor_Diaconu" }, { "code": " await comments.updateOne(\n { _id: c1.insertedId },\n {\n $set: {\n title: \"Lifecycle Updated\",\n },\n }\n );\nType of property 'author' circularly references itself in mapped type '{ [Key in keyof Post]: Post[Key] extends Post ? [Key] : Post extends Post[Key] ? [Key] : Post[Key] extends readonly (infer ArrayType)[] ? Post extends ArrayType ? [...] : ArrayType extends Post ? [...] : [...] : [...]; }'.ts(2615)\nType of property 'comments' circularly references itself in mapped type '{ [Key in keyof Post]: Post[Key] extends Post ? [Key] : Post extends Post[Key] ? [Key] : Post[Key] extends readonly (infer ArrayType)[] ? Post extends ArrayType ? [...] : ArrayType extends Post ? [...] : [...] : [...]; }'.ts(2615)\nType of property 'comments' circularly references itself in mapped type '{ [Key in keyof User]: User[Key] extends User ? [Key] : User extends User[Key] ? [Key] : User[Key] extends readonly (infer ArrayType)[] ? User extends ArrayType ? [...] : ArrayType extends User ? [...] : [...] : [...]; }'.ts(2615)\nType of property 'post' circularly references itself in mapped type '{ [Key in keyof Comment]: Comment[Key] extends Comment ? [Key] : Comment extends Comment[Key] ? [Key] : Comment[Key] extends readonly (infer ArrayType)[] ? Comment extends ArrayType ? [...] : ArrayType extends Comment ? [...] : [...] : [...]; }'.ts(2615)\nType of property 'posts' circularly references itself in mapped type '{ [Key in keyof User]: User[Key] extends User ? [Key] : User extends User[Key] ? [Key] : User[Key] extends readonly (infer ArrayType)[] ? User extends ArrayType ? [...] : ArrayType extends User ? [...] : [...] : [...]; }'.ts(2615)\nType of property 'posts' circularly references itself in mapped type '{ _id: [\"_id\"]; title: [\"title\"]; posts: [\"posts\", number, ...any[]]; }'.ts(2615)\nType of property 'tags' circularly references itself in mapped type '{ [Key in keyof Post]: Post[Key] extends Post ? [Key] : Post extends Post[Key] ? [Key] : Post[Key] extends readonly (infer ArrayType)[] ? Post extends ArrayType ? [...] : ArrayType extends Post ? [...] : [...] : [...]; }'.ts(2615)\nType of property 'user' circularly references itself in mapped type '{ [Key in keyof Comment]: Comment[Key] extends Comment ? [Key] : Comment extends Comment[Key] ? [Key] : Comment[Key] extends readonly (infer ArrayType)[] ? Comment extends ArrayType ? [...] : ArrayType extends Comment ? [...] : [...] : [...]; }'.ts(2615)\nexport class Post {\n constructor(data: Partial<Post> = {}) {\n Object.assign(this, data);\n }\n\n _id?: ObjectId;\n title: string;\n\n comments: Comment[] = [];\n\n authorId: ObjectId | any;\n author: User;\n\n number?: string | number;\n tags: Tag[] = [];\n tagsIds: ObjectId[] = [];\n}\nexport class Comment {\n _id?: ObjectId;\n title: string;\n date: Date;\n\n // virtual\n titleAndDate: string;\n\n // virtual\n get titleWithUserId() {\n return this.title + \" \" + this.userId;\n }\n\n userId: ObjectId;\n user: User;\n\n postId: ObjectId;\n post: Post;\n}\n\nexport class User {\n constructor(data: Partial<User> = {}) {\n Object.assign(this, data);\n }\n\n _id?: ObjectId;\n name: string;\n title?: string;\n\n comments: Comment[] = [];\n\n posts: Post[] = [];\n}\n", "text": "@Bailey_Pearson sorry to bother you again with this, but this is the errors I get:", "username": "Theodor_Diaconu" }, { "code": "PostUserUserPost[]FilteranyFilter", "text": "Hey @Theodor_Diaconu! Thanks for reaching out and the thorough example.Unfortunately this is a limitation with the current implementation of recursive types in the driver. We currently only support recursive types where the recursion is direct - meaning that a type has a property of the same type. Your example is an example of mutual recursion, which we don’t support. This occurs because the Post class contains a field of type User but the User class also has a field of type Post[].As always, a workaround is to cast your Filter as any.We’re planning on improving the Filter type more in the future but we want to take the time to get it correct, rather than piece-mealing changes onto it release-by-release so the fix might not be ready in the near future.", "username": "Bailey_Pearson" }, { "code": "", "text": "@Bailey_Pearson not sure if helpful for you but the elegant solution I found was to exclude the keys of Types which contain ‘_id’ from the filters list, solving all my problems easily and still keeping the nice “ORM”-like structure", "username": "Theodor_Diaconu" }, { "code": "", "text": "", "username": "system" } ]
MongoDB Node.js Driver 4.3.0 Released
2022-01-07T00:47:46.381Z
MongoDB Node.js Driver 4.3.0 Released
7,340
null
[ "python", "schema-validation" ]
[ { "code": "", "text": "Hey all,just stumbled onto this article: Improved Error Messages for Schema Validation in MongoDB 5.0.…that basically promises improved error messages when jsonSchema validation fails in Mongo 5.?.This sounds really awesome and we can’t wait for this to be improved Tried MongoDB 5.0.8 locally with the newest pymongo drivers and the errors are still very general.Sadly the linked form to participate in the beta program says:Sorry, this survey is not currently active.Any idea how to start testing this cool stuff?", "username": "Ikar_Pohorsky" }, { "code": "MongoServerError: Document failed validation\nAdditional information: {\n failingDocumentId: ObjectId(\"62970b087ea0ed071328a4d4\"),\n details: {\n operatorName: '$jsonSchema',\n schemaRulesNotSatisfied: [\n {\n operatorName: 'required',\n specifiedAs: { required: [ 'name', 'year', 'major', 'address' ] },\n missingProperties: [ 'address' ]\n }\n ]\n }\n}\n", "text": "Hi @Ikar_Pohorsky and welcome to the community!!Th Schema-Validation feature is now available from MongoDB version 5.0.0. The work was done in SERVER-20547.how to start testing this cool stuff?For example on inserting the document using the below command:db.sample.insertOne( { name: “Mihai”, year: 2019, major: “Computer Science” })Following error is observed in MongoDB version 4.4.0 which appears to be less descriptive.MongoServerError: Document failed validationHowever, on latest MongoDb version 5.0.9, on insertion, the following error is observed:Let us know if you have further questionsThanks\nAasawari", "username": "Aasawari" }, { "code": "5.0.818.04.6 LTS1.4.2MongoServerError: Document failed validation\nAdditional information", "text": "Thanks @Aasawari for the reply!Maybe I’ll have to wait a bit. Currently on mongo 5.0.8 (ubuntu 18.04.6 LTS) using mongosh 1.4.2 and the validation error still says:…no Additional information is showing up.Is there any config option to enable the Additional info?", "username": "Ikar_Pohorsky" }, { "code": "", "text": "Hi @Ikar_PohorskyThe feature is available on all versions post MongoDB version 5.0.0 and hence should be available on 5.0.8 too.\nCould you please confirm if there is a possibility that the MongoDB has been upgraded from the 4.4 series to the 5.0 series?There might be a possibility that the Feature Compatibility Version has not been upgraded during the MongoDB binary upgrade. The following documentation will help you to set the compatibility to version 5.0 using the commanddb.adminCommand( { setFeatureCompatibilityVersion: “5.0” } )I hope the above helps or else please let us know for further issue.Thanks\nAasawari", "username": "Aasawari" }, { "code": "", "text": "That was it! Thank you so much @Aasawari ", "username": "Ikar_Pohorsky" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
What is the status on improved json schema error messages?
2022-05-19T13:58:22.733Z
What is the status on improved json schema error messages?
3,736
null
[ "cxx" ]
[ { "code": "", "text": "Just wanted to know if there was some way how I could understand how mongoDB internally handles queries. I’m not talking about the logs of the queries, but actually how the server handles them.\nFor ex. if i fire a “find” query, what actual process is taking place (like what functions from files (the source code C++ files) are being called in what order), something like that.\nI tried running the source code, but couldn’t figure out how to extract this information. Just wanna understand and learn how it actually works.", "username": "Sahil_Chawla1" }, { "code": "src/mongo/db/query/README.mdv5.0v6.0", "text": "Welcome to the MongoDB Community @Sahil_Chawla1 !There are some READMEs in the relevant MongoDB server source code directories that provide more details on the technical implementation and internals. The ultimate reference would be looking at the source code, but for a starting point on queries see src/mongo/db/query/README.md (Query System Internals).There will also be some version-specific differences, so you should make sure you are looking at the branch in GitHub (eg v5.0 or v6.0) corresponding to the version of the MongoDB server you are trying to get more insight into.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Anyway to understand the internal functioning of the MongoDB Server
2022-05-31T14:03:39.558Z
Anyway to understand the internal functioning of the MongoDB Server
2,912
null
[ "python" ]
[ { "code": "", "text": "i create db, its struct dump from another db , i want to verify them , so what I should do ? i write it with pymongo , i finished some part and i coudn’t swith to other program language .", "username": "feng_deng" }, { "code": "mongoexport", "text": "Hi @feng_deng welcome to the community!As I understand it, you dumped a collection from a sharded cluster using Pymongo, and you wanted to see the shard key of that collection?If yes, your Python script would also need to dump the indexes defined on that collection using the index_information() pymongo method as well as the data itself.Notably, mongodump does this (export data + indexes), but not mongoexport, since mongoexport is more concerned about the data themselves and not the metadata (which would include index definitions).If this is not what you’re looking for, could you please elaborate more on what you require? A description of the process and some examples (documents/code) would be most helpful.Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How can i get the shard key of the collections that created in a shard cluster
2022-05-23T10:26:52.615Z
How can i get the shard key of the collections that created in a shard cluster
1,325
null
[ "crud", "transactions" ]
[ { "code": "", "text": "Hello,When working with transactions, how do I achieve lock the reads as well?I am working with a method that replaces the document (using UpdateOneAndAsync does not work in this case), so I need to use transactions to achieve atomicity and not allow multiple writes. This works fine, but I notice that the reads are not locked, meaning that I am able to read the document that I have lock-in another transaction, is there a way to lock the read as well?Regards", "username": "Rebeca_M" }, { "code": "", "text": "Hi @Rebeca_M welcome to the community!When working with transactions, how do I achieve lock the reads as well?In short, I don’t think you can. Transactions mainly concerns document writes and not presenting a state of the database that is not consistent, not really about preventing reads. Preventing readers would likely have a negative effect on your database’s parallelism and performance.However, do you mind elaborating more if you have a specific use case in mind that requires you to prevent reads? Some example scenarios would be helpful to illustrate the point.Best regards\nKevin", "username": "kevinadi" } ]
Read locks in transactions
2022-05-23T10:02:19.305Z
Read locks in transactions
1,646
null
[]
[ { "code": " source: mongodb.log\n host: <host>:27017\n start: 2022 Jun 01 09:03:47.876\n end: 2022 Jun 01 13:35:00.151\n", "text": "mloginfo is encountering an error because of “###” in the mongodb log file.\nwhat is the workaround?date format: iso8601-local\ntimezone: UTC +0800\nlength: 39195\nbinary: mongod\nversion: 4.2.18\nstorage: wiredTigerQUERIES\n[= ] 1.5 % Traceback (most recent call last):\nFile “/usr/local/lib/python3.6/site-packages/mtools-1.7.0-py3.6.egg/mtools/util/logevent.py”, line 835, in _extract_counters\n‘’))\nValueError: invalid literal for int() with base 10: ‘’During handling of the above exception, another exception occurred:Traceback (most recent call last):\nFile “/usr/local/bin/mloginfo”, line 11, in \nload_entry_point(‘mtools==1.7.0’, ‘console_scripts’, ‘mloginfo’)()\nFile “/usr/local/lib/python3.6/site-packages/mtools-1.7.0-py3.6.egg/mtools/mloginfo/mloginfo.py”, line 112, in main\ntool.run()\nFile “/usr/local/lib/python3.6/site-packages/mtools-1.7.0-py3.6.egg/mtools/mloginfo/mloginfo.py”, line 107, in run\nsection.run()\nFile “/usr/local/lib/python3.6/site-packages/mtools-1.7.0-py3.6.egg/mtools/mloginfo/sections/query_section.py”, line 92, in run\nallowDiskUse=le.allowDiskUse)\nFile “/usr/local/lib/python3.6/site-packages/mtools-1.7.0-py3.6.egg/mtools/util/logevent.py”, line 678, in allowDiskUse\nself._extract_counters()\nFile “/usr/local/lib/python3.6/site-packages/mtools-1.7.0-py3.6.egg/mtools/util/logevent.py”, line 884, in _extract_counters\nself._txnNumber = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(’,’, ‘’))\nValueError: invalid literal for int() with base 10: ‘\"###\"’", "username": "Sandie_Zarate1" }, { "code": "###mloginfo --versionmloginfo --queries", "text": "Welcome to the MongoDB Community @Sandie_Zarate1 !Can you share some more details to help reproduce the issue:Thanks,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "offending log line:\n2022-06-01T09:04:39.015+0800 I COMMAND [initandlisten] command local.oplog.rs command: getMore { getMore: “###”, collection: “###”, $db: “###” } originatingCommand: { find: “###”, filter: { ts: { $gte: “###”, $lte: “###” } }, oplogReplay: “###”, $db: “###” } planSummary: COLLSCAN cursorid:8082644926738273313 keysExamined:0 docsExamined:22521 numYields:186 nreturned:22520 reslen:16749135 locks:{ ReplicationStateTransition: { acquireCount: { w: 187 } }, Global: { acquireCount: { r: 187 } }, Database: { acquireCount: { r: 187 } }, Mutex: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 187 } } } flowControl:{ acquireCount: 7, timeAcquiringMicros: 9 } storage:{ data: { bytesRead: 17354199, timeReadingMicros: 1313057 } } protocol:op_msg 1362ms\n2022-06-01T09:04:40.075+0800 I REPL [repl-writer-worker-1] applied op: CRUD { ts: “###”, t: “###”, h: “###”, v: “###”, op: “###”, ns: “###”, ui: “###”, wall: “###”, lsid: { id: “###”, uid: “###” }, txnNumber: “###”, stmtId: “###”, prevOpTime: { ts: “###”, t: “###” }, o: { _id: “###”, startDate: “###”, url: “###”, headers: { common: { Accept: “###” }, delete: {}, get: {}, head: {}, post: { Content-Type: “###” }, put: { Content-Type: “###” }, patch: { Content-Type: “###” } }, method: “###”, typeRequest: “###”, requestBody: { domain: “###”, username: “###”, password: “###”, uncFilePath: “###” }, createdOn: “###” } }, took 972msCommand I run:mtools version 1.7.0 || Python 3.6.8 (default, Aug 13 2020, 07:46:32) [GCC", "username": "Sandie_Zarate1" }, { "code": "", "text": "Hi @Sandie_Zarate1,Thank you for including the requested details. Unfortunately mtools 1.7.0 does not have support for Enterprise log redaction – many of the expected values have been replaced and regex extraction is failing ungracefully similar to your example.There is an open issue in the mtools backlog to address this: https://github.com/rueckstiess/mtools/issues/854Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mloginfo encountered an error
2022-06-02T03:48:10.639Z
Mloginfo encountered an error
3,087
null
[ "python", "time-series" ]
[ { "code": "{\n\"time\" : \"14.04.2022 21:50:59\",\n\"balance\" : 10500.50\n}\nchange_streamspymongo.change_streams()", "text": "Hi,\nI am trying to build a customized dashboard using python plotly-dash which needs to get updated whenever a new record is inserted into the MongoDB Atlas collection on the cloud. I was looking for examples online but could not find any. For example, lets assume the data that gets inserted into mongoDB (asynchronously at random time intervals) is a time series data like:I understand I have to use change_streams to watch for new insertions into the collection, but can anyone show a simple example of showing a simple plotly -DASH line chart & may be a plotly dash datatable that gets updated using plotly callbacks and pymongo.change_streams() whenever a new record is inserted into a MongoDB collection?Best Regards,\nDilip", "username": "Dilip_Rajkumar" }, { "code": "app.pyfrom queue import Queue\nfrom threading import Thread\n\nimport dash\nimport plotly.graph_objects as go\nimport pymongo\nfrom dash import dcc, html\nfrom dash.dependencies import Input, Output\nfrom pymongo.change_stream import ChangeStream\nfrom queue import Empty\n\nclient = pymongo.MongoClient(\"YourAtlasDB\")\nstream = client.graph_stream.scatter.watch()\n\n\ndef read_stream(stream: ChangeStream, q: Queue):\n for change in stream:\n x, y = change[\"fullDocument\"][\"x\"], change[\"fullDocument\"][\"y\"]\n q.put((x, y))\n\n\ndef update_figure(figure, q):\n try:\n x, y = q.get_nowait()\n figure[\"data\"][0].x += tuple([x])\n figure[\"data\"][0].y += tuple([y])\n except Empty:\n pass\n\n\nx = [0]\ny = [0]\ngraph = go.Scatter(x=x, y=y)\nfig = go.Figure(graph)\nfig.layout.autosize = True\n\nq = Queue()\n\nx = Thread(target=read_stream, args=(stream, q))\nx.start()\n\napp = dash.Dash()\napp.layout = html.Div(\n [\n dcc.Graph(figure=fig, id=\"live-update-graph\"),\n dcc.Interval(\n id=\"interval-component\", interval=1 * 1000, n_intervals=0 # in milliseconds\n ),\n ]\n)\n\n\[email protected](\n Output(\"live-update-graph\", \"figure\"), Input(\"interval-component\", \"n_intervals\")\n)\ndef update_graph(n):\n update_figure(fig, q)\n return fig\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, use_reloader=False)\nupdate_db.pyimport pymongo\nimport time\nclient = pymongo.MongoClient(\"YourAtlasDB\")\nfor x,y in zip(range(50), range(50)):\n db.scatter.insert_one({'x':x, 'y':y*y})\n time.sleep(1)\n", "text": "HeyI just tried creating something similar today, and this is the solution I have come up with so far, using a Thread and a Queue to handle the dynamically updated data at intervals.in app.py file:And then an update_db.py file:", "username": "Tobias_Gardhus" }, { "code": "client = pymongo.MongoClient(\"YourAtlasDB\")\nstream = client.graph_stream.scatter.watch()\n\n\ndef read_stream(stream: ChangeStream, q: Queue):\n for change in stream:\n x, y = change[\"fullDocument\"][\"x\"], change[\"fullDocument\"][\"y\"]\n q.put((x, y))\ngraph_stream()change_streamgraph_streamscattergraph_streamdef read_stream()", "text": "Hi Tobias,\nThank you for your reply. I have some queries regarding your code:I can’t find any documentation for this graph_stream() pymongo function, all i could find was documentation for change_stream. So,\nCan you please clarify what is your database name, collection name and field names in this script?1.) Is graph_stream the name of your database in your MongoDB cluster?\n2.) Is scatter the name of your collection in the graph_stream database?\n3.) What is “fullDocument” and [“x”]? in the def read_stream() function?A screenshot your mongoDB → Browse collections page with this collection selected, would also be helpful?Lastly, in my case the x-axis is a time series and the y-axis a floating number, so any guidance on appropriate alterations of your code for that will be very helpfulThanks and Best Regards,\nDilip", "username": "Dilip_Rajkumar" } ]
MongoDB Atlas and Live Chart Update on plotly-dash
2022-04-18T06:21:38.827Z
MongoDB Atlas and Live Chart Update on plotly-dash
4,789
null
[ "aggregation", "dot-net" ]
[ { "code": "AgendaAgendaItem\"Agenda\":[\n {\n \"_id\":\"df12a76a-1e9f-4247-b48d-bbc66d885a1b\",\n \"Name\":\"startup\",\n \"Duration\":10\n },\n {\n \"_id\":\"83c8ee53-ad31-4de3-bed3-292515683fa7\",\n \"Name\":\"warming\",\n \"Duration\":20\n },\n {\n \"_id\":\"afa2b867-7a91-4bda-b6de-d304263ff910\",\n \"Name\":\"running\",\n \"Duration\":30\n },\n {\n \"_id\":\"8b171ccd-cdb4-4123-bf10-825effd56418\",\n \"Name\":\"celebrating\",\n \"Duration\":5\n }\n ]\n[ startup, warming, running, celebrating ][ startup, celebrating, warming, running ]$setindex", "text": "Hello guys!I am stuck here trying to find the best approach to reorder an array of objects.\nimagine having an array Agenda that holds multiple AgendaItem objects and the user is free to reorder this array by dragging and dropping any agendaItem to any position.Here is a sample of what is this array looks like and what I want to do with it.so simply if array is like : [ startup, warming, running, celebrating ]\nand user moved celebrating to the second position\nit will be like: [ startup, celebrating, warming, running ] - (not just swapping)what I thought of is deleting the second object and inserting it back at its target position using $position\nand by that all other elements would be shifted to their new positions.\nbut I think this approach is not completely safe as if the app crashes between these 2 updates I will lose this item.another approach is to $set the entire agenda with every reorder-request but this too doesn’t seem to be right to have a large object sent multiple times.\nI also don’t have the sense yet to judge if this object is actually large or with normal size so I discarded this approach.\n(I also cannot have the array cached and send it every few seconds as the business needs it to have it, more like, real-time across multiple clients)I also thought of adding a new attribute as index to every object and send an array with their new indices after reording and have mongodb handle this sorting by their new indices but I cannot find a method that does so in mongodb.so what would you usually do in this scenario?\nThank you! ", "username": "Hamza_Muhammad" }, { "code": "", "text": "Hello @Hamza_MuhammadAdd one more field position and update whenever the position changes and sort it by the position value.Just check if that works.Thanks,\nSudhesh", "username": "Sudhesh_Gnanasekaran" }, { "code": "", "text": "I would then have to update the entire list’s position values first then sort it by the position values.", "username": "Hamza_Muhammad" } ]
Reorder an array of objects
2022-05-28T07:15:25.307Z
Reorder an array of objects
2,034
null
[ "golang", "beta" ]
[ { "code": "", "text": "The MongoDB Go Driver Team is pleased to release version 1.10.0-beta1 of the MongoDB Go Driver.This release supports several new features introduced in MongoDB server version 6.0 (Release Candidate). For more information please see the 1.10.0-beta1 release notes.You can obtain the driver source from GitHub under the v1.10.0-beta1 tag.Documentation for the Go driver can be found on pkg.go.dev and the MongoDB documentation site. BSON library documentation is also available on pkg.go.dev. Questions and inquiries can be asked on the MongoDB Developer Community. Bugs can be reported in the Go Driver project in the MongoDB JIRA where a list of current issues can be found. Your feedback on the Go driver is greatly appreciated!Thank you,\nThe Go Driver Team", "username": "Qingyang_Hu1" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB Go Driver 1.10.0-beta1 Released
2022-06-01T22:19:56.587Z
MongoDB Go Driver 1.10.0-beta1 Released
2,467
null
[ "node-js", "atlas-data-lake" ]
[ { "code": "", "text": "I had a bit of a broad question… I’m currently working on a project where I store user sign in / subscription info through atlas DB. However, I want to also let users upload and view their OWN user assets (not anyone else’s)… for which I was considering data lake-- though I’m not too sure how to integrate this in terms of connecting each data that is uploaded onto the data lake with each user in the database.For I.e. I was considering azure storage blobs-- the way each user asset is in their own containers though is managed through RBAC/ or Azure active directory… Since I already did all the authn/z through mongo I didn’t want to go and implement it through azure active directory. I didn’t think it was for my usecase. Though it works for azure storage blobs for managing user storage…How would I go about managing atlas data lake in this sense? I’m not a back end dev though I’ve been wanting to integrate this.Thanks for the input.", "username": "tem_N_A" }, { "code": "", "text": "Hey Tem,Unfortunately Atlas Data Lake does not yet support Azure, so you could not use Atlas Data Lake with Azure Blob Storage.What you’re describing sounds like an interesting use case. My initial thought is that you’re probably going to want to handle the uploading of assets in the application tier and then you can choose to push them into object storage and then handle the storing of file links in the database. If you were to take an approach like this you can keep your authn/z model in the DB and just have the object storage be an internal implementation detail, and you don’t really need “Atlas Data Lake” in this context.Best,", "username": "Benjamin_Flast" } ]
Datalake usecase
2022-05-31T18:15:42.927Z
Datalake usecase
2,537
null
[]
[ { "code": "{\n \"name\": \"John Doe\",\n \"address\": \"some street,24\",\n \"groupName\": \"group-a\"\n\n}\n{\n \"name\": \"Group A\",\n \"nameKey\": \"group-a\",\n \"settings\": []\n}\n", "text": "Hello,Could you kindly advice what would be the best way performance-wise to retroactively update documents in one collection if a change is applied to a related field in a different collection?An example:If we were to create a new product:Now let’s suppose mr. John Doe was assigned to group-a because of his address. group-a refers to a document in a separate collection:The problem I am experiencing is what to do should the “name” be changed for our Group A, meaning that the field “nameKey” would also be regenerated. For example a name is changed to Group B and so the nameKey is regenerated to group-b, which means that what is saved on John Doe’s document is outdated and no longer valid.Could you kindly tell me what approach you would take, please?Thank you very much", "username": "RENOVATIO" }, { "code": "", "text": "You simply should not refer to documents from other collections with dynamic data. The _id field is unique, it is indexed and cannot be modified.You may use transactions to update both collections when changing a referee to update its referrers.Change stream can also be used to do a delayed update of the referrers.I think groupName in first collection will be better named groupNameKey.", "username": "steevej" }, { "code": "", "text": "Thanks for your reply and for a detailed answer, however I have some comments regarding your first 2 sentences:You simply should not refer to documents from other collections with dynamic data.Could you tell me what should be done instead? If 2 documents have their reason to exist separately (e.g. they standalone serve other components) and yet they both belong on the correctness of each other, what can be done?The _id field is unique, it is indexed and cannot be modified.I guess yours is a very good insight here. I could obviate this need by simply using the _id key, however the more legible group-a is very alluring.Also, finally. You mentioned transactions, whatabout using updateMany instead? Is it a poor choice?EDIT: In general, I have seen many APIs providing unique names generated from entered names to access resources, as they are easier to remember than database _ids. Based on your extensive experience, how would you say these systems solve the problem of having a name change?Thank you!", "username": "RENOVATIO" }, { "code": "", "text": "I did not write that you should not have 2 documents. I wrote that one document should not refer to another document using a field that is mutable, dynamic, modifiable. Since the field groupName can be changed is not a good choice to maintain a relation from collection 1 to collection 2. Since _id cannot be changed, is unique and is indexed. As you see making the relation field modifiable it causes update difficulties.updateMany works within a single collection. What you want is to change groupName in 1 collection to be reflected into another collection.how would you say these systems solve the problem of having a name change?The problem of having a name change is not related to the way you get the name. It is related to the fact that your use-case allow to change. There is no issue in having groupName:group-a from one collection refer to another document in another collection using nameKey:group-a, the issue iswhat to do should the “name” be changed for our Group AThe problem is there because you want to change it and use it as a key. Yes show group-a is more user friendly than an _id, but users are not supposed to see your internal structure. Hopefully when you display a group your it name:“Group A” rather than it nameKey:“group-a”.", "username": "steevej" }, { "code": "", "text": "Thank you for your detailed and quality reply!", "username": "RENOVATIO" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Retroactively updating related documents when another collection is modified
2022-05-30T12:29:46.010Z
Retroactively updating related documents when another collection is modified
2,720
null
[ "aggregation", "queries", "node-js", "data-modeling", "compass" ]
[ { "code": "[ { subscribers: [...] }, { subscribers: [...] }, ... ]\nreturn this.subscriptions\n .aggregate([\n {\n '$match': {\n 'entity': params.entity, \n 'status': 'active'\n }\n }, \n {\n '$lookup': {\n 'from': 'appsubscriptionssubscribers', \n 'localField': '_id', \n 'foreignField': 'subscription', \n 'as': 'subscribers'\n }\n }, \n {\n '$match': {\n 'subscribers.0': {\n '$exists': true\n }\n }\n },\n { $skip : batch.skip },\n { $limit : batch.size }, \n {\n '$project': {\n '_id': 0, \n 'subscribers': 1\n }\n }\n ])\n .toArray();\n }\n", "text": "Hello, I have a query that return me something like that(I am also using Compass to see the results) :but what I want is to join al that “sub-arrays” called “subscribers” in one only array in an aggregation but I don’nt know how to do that. This is my code:What can I do to achieve that? Thank you in advance.", "username": "Ariel_P" }, { "code": "subscriptionsappsubscriptionssubscribersdb.coll.insertMany(<copy/paste>)", "text": "Hi @Ariel_P and welcome in the MongoDB Community !I think I see what you want to do and I think I can do it.Can you please provide a few sample documents (just the relevant fields (especially the IDs)) from subscriptions and appsubscriptionssubscribers so I can try.If you could also provide the expected output based on these few sample docs, it would be perfect.Bonus points if I just have to db.coll.insertMany(<copy/paste>) to insert them in my cluster.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": " \"_id\": {\n \"$oid\": \"5eec3928d5fb583beb63ecee\"\n },\n \"uid\": \"SsBO0x4yl\",\n \"type\": \"dynamic\",\n \"interval\": \"1m\",\n \"limit\": 0,\n \"trial\": 0,\n \"initialDay\": -1,\n \"shorten_url\": \"http://mb\",\n \"return_url\": \"https://speryans.com\",\n \"webhook\": \"https://sperya\",\n \"currency\": \"ARS\",\n \"status\": \"active\",\n \"customerForm\": \"default\",\n \"features\": [],\n \"entity\": {\n \"$oid\": \"577660654629781000df9552\"\n },\n \"name\": \"Nombre Suscripción\",\n \"description\": \"Mi suscripción 1\",\n \"reference\": \"SsBO0x4yl\",\n \"total\": 1,\n \"options\": {\n \"button\": false,\n \"embed\": false,\n \"domain\": \"midominio.com\",\n \"theme\": {\n \"type\": \"light\",\n \"background\": \"\",\n \"showHeader\": true,\n \"header\": false,\n \"colors\": {\n \"primary\": \"#6f00ff\"\n }\n }\n },\n \"created\": {\n \"$date\": \"2020-06-19T04:03:52.558Z\"\n },\n \"updated\": {\n \"$date\": \"2020-06-19T04:03:52.568Z\"\n },\n \"agenda\": [],\n \"__v\": 0,\n \"intent\": \"payment.v2\"\n}\n\n{\n \"_id\": {\n \"$oid\": \"5bf36db84bcbac7920daa39f\"\n },\n \"uid\": \"oLb8ThhVt\",\n \"type\": \"dynamic\",\n \"interval\": \"1m\",\n \"limit\": 5,\n \"trial\": 1,\n \"initialDay\": -1,\n \"shorten_url\": \"http://m\",\n \"currency\": \"ARS\",\n \"status\": \"active\",\n \"entity\": {\n \"$oid\": \"577660654629781000df9552\"\n },\n \"description\": \"Esto es otra cosa\",\n \"total\": 10,\n \"agenda\": [],\n \"created\": {\n \"$date\": \"2018-11-20T02:13:12.588Z\"\n },\n \"updated\": {\n \"$date\": \"2018-11-20T02:13:12.588Z\"\n },\n \"__v\": 0,\n \"name\": \"Esto es otra cosa\",\n \"return_url\": \"https://speryans.com/thanks\",\n \"customerForm\": \"custom:80rV25rvb\",\n \"intent\": \"payment.v2\"\n}\n\n{\n \"_id\": {\n \"$oid\": \"5ea6067f104d8ceff26da0df\"\n },\n \"uid\": \"P2434fDxJ\",\n \"type\": \"dynamic\",\n \"interval\": \"1m\",\n \"limit\": 12,\n \"trial\": 0,\n \"initialDay\": -1,\n \"shorten_url\": \"http:\",\n \"return_url\": \"\",\n \"webhook\": \"https://webhook.site/c1820828-6cea-4113-8480-c4df83bbc394\",\n \"currency\": \"ARS\",\n \"status\": \"active\",\n \"customerForm\": \"default\",\n \"entity\": {\n \"$oid\": \"5761a3dd8aad6c120042b287\"\n },\n \"name\": \"Nombre Suscripción\",\n \"description\": \"Mi suscripción 1\",\n \"reference\": \"P2434fDxJ\",\n \"total\": 100,\n \"created\": {\n \"$date\": \"2020-04-26T22:09:03.788Z\"\n },\n \"updated\": {\n \"$date\": \"2020-04-26T22:09:03.801Z\"\n },\n \"agenda\": [],\n \"__v\": 0,\n \"intent\": \"payment.v2\",\n \"notes\": \"\"\n} \n\n\n...and this is \"appsubscriptionssubscribers\": \n\n\n\n\n{\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab03661ff\"\n },\n \"uid\": \"FX8BBAH3VBH8T8051D\",\n \"reference\": \"FX8BBAH3VBH8T8051D\",\n \"total\": 0,\n \"setupFee\": 0,\n \"test\": false,\n \"customerData\": {\n \"_id\": \"5b8dd7bf26d681000f90bc20\",\n \"name\": \"Mijo\",\n \"identification\": \"33996953\",\n \"email\": \"[email protected]\",\n \"phone\": \"12123123\",\n \"uid\": \"33996953\",\n \"tired_lobot\": \"logical_ayla_secura\",\n \"purple_biggs_darklighter\": \"1988-10-13\"\n },\n \"sources\": [],\n \"subscription\": {\n \"$oid\": \"5bf36db84bcbac7920daa39f\"\n },\n \"status\": \"active\",\n \"customer\": {\n \"$oid\": \"5b8dd7bf26d681000f90bc20\"\n },\n \"startDate\": {\n \"$date\": \"2020-11-30T16:44:21.867Z\"\n },\n \"agenda\": [{\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366200\"\n },\n \"day\": 30,\n \"month\": 10,\n \"period\": 1\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366201\"\n },\n \"day\": 30,\n \"month\": 11,\n \"period\": 2\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366202\"\n },\n \"day\": 30,\n \"month\": 0,\n \"period\": 3\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366203\"\n },\n \"day\": 28,\n \"month\": 1,\n \"period\": 4\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366204\"\n },\n \"day\": 28,\n \"month\": 2,\n \"period\": 5\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366205\"\n },\n \"day\": 28,\n \"month\": 3,\n \"period\": 6\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366206\"\n },\n \"day\": 28,\n \"month\": 4,\n \"period\": 7\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366207\"\n },\n \"day\": 28,\n \"month\": 5,\n \"period\": 8\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366208\"\n },\n \"day\": 28,\n \"month\": 6,\n \"period\": 9\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366209\"\n },\n \"day\": 28,\n \"month\": 7,\n \"period\": 10\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab036620a\"\n },\n \"day\": 28,\n \"month\": 8,\n \"period\": 11\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab036620b\"\n },\n \"day\": 28,\n \"month\": 9,\n \"period\": 12\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab036620c\"\n },\n \"day\": 28,\n \"month\": 10,\n \"period\": 13\n }],\n \"customerToken\": [{\n \"active\": true,\n \"_id\": {\n \"$oid\": \"5fc521669d588f6ab036620f\"\n },\n \"token\": {\n \"$oid\": \"5fc521249d588f6ab03661e4\"\n }\n }],\n \"executions\": [{\n \"created\": {\n \"$date\": \"2020-11-30T16:44:21.867Z\"\n },\n \"period\": 0,\n \"status\": \"trial\",\n \"type\": \"registration\",\n \"uid\": \"2YGXXOENTH0X6EETU1\",\n \"reference\": \"\",\n \"currency\": \"ARS\",\n \"total\": 10,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab036620d\"\n },\n \"day\": 30,\n \"month\": 10,\n \"year\": 2020,\n \"subscription\": {\n \"$oid\": \"5bf36db84bcbac7920daa39f\"\n }\n }],\n \"created\": {\n \"$date\": \"2020-11-30T16:44:21.870Z\"\n },\n \"updated\": {\n \"$date\": \"2020-11-30T16:44:21.875Z\"\n },\n \"__v\": 0\n},\n\n{\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab03661ff\"\n },\n \"uid\": \"FX8BBAH3VBH8T8051D\",\n \"reference\": \"FX8BBAH3VBH8T8051D\",\n \"total\": 0,\n \"setupFee\": 0,\n \"test\": false,\n \"customerData\": {\n \"_id\": \"5b8dd7bf26d681000f90bc20\",\n \"name\": \"Mijo\",\n \"identification\": \"33996953\",\n \"email\": \"[email protected]\",\n \"phone\": \"12123123\",\n \"uid\": \"33996953\",\n \"tired_lobot\": \"logical_ayla_secura\",\n \"purple_biggs_darklighter\": \"1988-10-13\"\n },\n \"sources\": [],\n \"subscription\": {\n \"$oid\": \"5bf36db84bcbac7920daa39f\"\n },\n \"status\": \"active\",\n \"customer\": {\n \"$oid\": \"5b8dd7bf26d681000f90bc20\"\n },\n \"startDate\": {\n \"$date\": \"2020-11-30T16:44:21.867Z\"\n },\n \"agenda\": [{\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366200\"\n },\n \"day\": 30,\n \"month\": 10,\n \"period\": 1\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366201\"\n },\n \"day\": 30,\n \"month\": 11,\n \"period\": 2\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366202\"\n },\n \"day\": 30,\n \"month\": 0,\n \"period\": 3\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366203\"\n },\n \"day\": 28,\n \"month\": 1,\n \"period\": 4\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366204\"\n },\n \"day\": 28,\n \"month\": 2,\n \"period\": 5\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366205\"\n },\n \"day\": 28,\n \"month\": 3,\n \"period\": 6\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366206\"\n },\n \"day\": 28,\n \"month\": 4,\n \"period\": 7\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366207\"\n },\n \"day\": 28,\n \"month\": 5,\n \"period\": 8\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366208\"\n },\n \"day\": 28,\n \"month\": 6,\n \"period\": 9\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab0366209\"\n },\n \"day\": 28,\n \"month\": 7,\n \"period\": 10\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab036620a\"\n },\n \"day\": 28,\n \"month\": 8,\n \"period\": 11\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab036620b\"\n },\n \"day\": 28,\n \"month\": 9,\n \"period\": 12\n }, {\n \"type\": \"period\",\n \"year\": 0,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab036620c\"\n },\n \"day\": 28,\n \"month\": 10,\n \"period\": 13\n }],\n \"customerToken\": [{\n \"active\": true,\n \"_id\": {\n \"$oid\": \"5fc521669d588f6ab036620f\"\n },\n \"token\": {\n \"$oid\": \"5fc521249d588f6ab03661e4\"\n }\n }],\n \"executions\": [{\n \"created\": {\n \"$date\": \"2020-11-30T16:44:21.867Z\"\n },\n \"period\": 0,\n \"status\": \"trial\",\n \"type\": \"registration\",\n \"uid\": \"2YGXXOENTH0X6EETU1\",\n \"reference\": \"\",\n \"currency\": \"ARS\",\n \"total\": 10,\n \"_id\": {\n \"$oid\": \"5fc521659d588f6ab036620d\"\n },\n \"day\": 30,\n \"month\": 10,\n \"year\": 2020,\n \"subscription\": {\n \"$oid\": \"5bf36db84bcbac7920daa39f\"\n }\n }],\n \"created\": {\n \"$date\": \"2020-11-30T16:44:21.870Z\"\n },\n \"updated\": {\n \"$date\": \"2020-11-30T16:44:21.875Z\"\n },\n \"__v\": 0\n}", "text": "I would try that, it is my first time so, here we go, this is “subscriptions” and “appsubscriptionssubscribers”:", "username": "Ariel_P" }, { "code": "appsubscriptionssubscribers_id5bf36db84bcbac7920daa39fskiplimit[\n {\n '$match': {\n 'entity': params.entity, \n 'status': 'active'\n }\n }, {\n '$lookup': {\n 'from': 'appsubscriptionssubscribers', \n 'localField': '_id', \n 'foreignField': 'subscription', \n 'as': 'subscribers'\n }\n }, {\n '$match': {\n 'subscribers.0': {\n '$exists': true\n }\n }\n }, {\n '$skip': batch.skip\n }, {\n '$limit': batch.size\n }, {\n '$unwind': {\n 'path': '$subscribers'\n }\n }, {\n '$group': {\n '_id': null, \n 'subs': {\n '$push': '$$ROOT'\n }\n }\n }, {\n '$project': {\n '_id': 0, \n 'subs': 1\n }\n }\n]\n", "text": "I think that’s what you want but your sample docs didn’t help a lot because the 2nd appsubscriptionssubscribers share the same _id with the 1st one.You didn’t provide the expected output for this data sample so I’m not sure what I’m aiming for.Also I think there is a problem of duplicated items like in this example (in the end it’s the same 5bf36db84bcbac7920daa39f sub twice.Also skip and limit don’t make sense without a $sort ahead of them as your docs are in random orders before then.I hope it’s what you want.FYI: As you provided extended JSON format, I had to use mongoimport to insert them in MDB.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "I don´t know how to put value on ‘path’ in the Compass because I am making the aggregation in the subscribers collection which the actual name is “apaasubscriptionspplans” . When you said…FYI: As you provided extended JSON format, I had to use mongoimport to insert them in MDB.in which format should I show you my data?", "username": "Ariel_P" }, { "code": "{\n _id: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n uid: 'oLb8ThhVt',\n type: 'dynamic',\n interval: '1m',\n limit: 5,\n trial: 1,\n initialDay: -1,\n shorten_url: 'http://m',\n currency: 'ARS',\n status: 'active',\n entity: ObjectId(\"577660654629781000df9552\"),\n description: 'Esto es otra cosa',\n total: 10,\n agenda: [],\n created: ISODate(\"2018-11-20T02:13:12.588Z\"),\n updated: ISODate(\"2018-11-20T02:13:12.588Z\"),\n __v: 0,\n name: 'Esto es otra cosa',\n return_url: 'https://speryans.com/thanks',\n customerForm: 'custom:80rV25rvb',\n intent: 'payment.v2'\n}\n", "text": "The output from Mongosh is easier because I can just copy/paste back into Mongosh for insertion.No big deal though, but you didn’t get the bonus points !", "username": "MaBeuLux88" }, { "code": "", "text": "Oh, I didn’t know that, I will do that in mongosh right now. Thanks I need this kind of information and solution.", "username": "Ariel_P" }, { "code": "subscriptionsappsubscriptionssubscribersappsubscriptionssubscriberssubscriptions", "text": "I don´t know how to put value on ‘path’ in the Compass because I am making the aggregation in the subscribers collection which the actual name is “apaasubscriptionspplans” .I don’t understand.At the moment you are doing a $lookup from the subscriptions collection to the appsubscriptionssubscribers collection. Which results in an array of appsubscriptionssubscribers docs into the subscriptions collection.You can do the opposite if you like. Or you can also “lookup” (=join) on the same collection if this is what you are after. Totally works.", "username": "MaBeuLux88" }, { "code": "{ subscribers: \n [ { _id: ObjectId(\"5fc521659d588f6ab03661ff\"),\n uid: 'FX8BBAH3VBH8T8051D',\n reference: 'FX8BBAH3VBH8T8051D',\n total: 0,\n setupFee: 0,\n test: false,\n customerData: \n { _id: '5b8dd7bf26d681000f90bc20',\n name: 'Mijo',\n identification: '33996953',\n email: '[email protected]',\n phone: '12123123',\n uid: '33996953',\n tired_lobot: 'logical_ayla_secura',\n purple_biggs_darklighter: '1988-10-13' },\n sources: [],\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n status: 'active',\n customer: ObjectId(\"5b8dd7bf26d681000f90bc20\"),\n startDate: 2020-11-30T16:44:21.867Z,\n agenda: \n [ { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366200\"),\n day: 30,\n month: 10,\n period: 1 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366201\"),\n day: 30,\n month: 11,\n period: 2 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366202\"),\n day: 30,\n month: 0,\n period: 3 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366203\"),\n day: 28,\n month: 1,\n period: 4 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366204\"),\n day: 28,\n month: 2,\n period: 5 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366205\"),\n day: 28,\n month: 3,\n period: 6 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366206\"),\n day: 28,\n month: 4,\n period: 7 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366207\"),\n day: 28,\n month: 5,\n period: 8 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366208\"),\n day: 28,\n month: 6,\n period: 9 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab0366209\"),\n day: 28,\n month: 7,\n period: 10 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab036620a\"),\n day: 28,\n month: 8,\n period: 11 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab036620b\"),\n day: 28,\n month: 9,\n period: 12 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc521659d588f6ab036620c\"),\n day: 28,\n month: 10,\n period: 13 } ],\n customerToken: \n [ { active: true,\n _id: ObjectId(\"5fc521669d588f6ab036620f\"),\n token: ObjectId(\"5fc521249d588f6ab03661e4\") } ],\n executions: \n [ { created: 2020-11-30T16:44:21.867Z,\n period: 0,\n status: 'trial',\n type: 'registration',\n uid: '2YGXXOENTH0X6EETU1',\n reference: '',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5fc521659d588f6ab036620d\"),\n day: 30,\n month: 10,\n year: 2020,\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\") } ],\n created: 2020-11-30T16:44:21.870Z,\n updated: 2020-11-30T16:44:21.875Z,\n __v: 0 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eed8\"),\n uid: 'SgkjbBCVI',\n reference: 'SgkjbBCVI',\n status: 'active',\n customerData: \n { name: 'Admin Mobbex',\n identification: '21123123',\n email: '[email protected]',\n phone: '15123123' },\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n customer: ObjectId(\"5c22f96a28f81718732c0336\"),\n agenda: \n [ { _id: ObjectId(\"5c23027e7a62ce19e989eee4\"), day: 26, month: 10 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eee3\"), day: 26, month: 11 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eee2\"), day: 26, month: 1 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eee1\"), day: 26, month: 2 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eee0\"), day: 26, month: 3 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eedf\"), day: 26, month: 4 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eede\"), day: 26, month: 5 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eedd\"), day: 26, month: 6 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eedc\"), day: 26, month: 7 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eedb\"), day: 26, month: 8 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eeda\"), day: 26, month: 9 },\n { _id: ObjectId(\"5c23027e7a62ce19e989eed9\"), day: 26, month: 10 } ],\n customerToken: \n [ { active: true,\n _id: ObjectId(\"5c23027e7a62ce19e989eee5\"),\n customer: ObjectId(\"5c22f96a28f81718732c0336\"),\n token: ObjectId(\"5c23026b7a62ce19e989eecd\"),\n entitySource: ObjectId(\"5bb0efa2565353000efe2a13\"),\n entityInstallment: ObjectId(\"5baff820565353000efe2814\") } ],\n executions: \n [ { created: 2018-12-25T04:24:30.179Z,\n period: 0,\n status: 'paid',\n uid: 'hj_XcYau7w',\n _id: ObjectId(\"5c23027e7a62ce19e989eee6\"),\n day: 26,\n month: 11 },\n { created: 2018-12-26T04:25:11.875Z,\n period: 1,\n status: 'paid',\n uid: '_nt7Oh1kU',\n _id: ObjectId(\"5c2302ab7a62ce19e989eef0\"),\n day: 26,\n month: 11 },\n { created: 2018-12-27T04:25:11.875Z,\n period: 2,\n status: 'retried',\n uid: '_nt7Oh1kJ',\n day: 27,\n month: 11,\n _id: ObjectId(\"5c476224cf33037c3f421d08\") },\n { created: 2019-01-22T19:26:27.290Z,\n period: 2,\n status: 'retried',\n type: 'manual',\n uid: 'JmUcvDEVR',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5c476e63ba71917da2c6b231\"),\n day: 22,\n month: 0 },\n { created: 2019-01-22T19:30:32.584Z,\n period: 2,\n status: 'retried',\n type: 'manual',\n uid: 'ZNolJrhVP',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5c476f586ded2a7dec73f4a3\"),\n day: 22,\n month: 0 },\n { created: 2019-01-22T19:34:32.064Z,\n period: 2,\n status: 'retried',\n type: 'manual',\n uid: 'JoVPYElXI',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5c4770485578b17e33ab717b\"),\n day: 22,\n month: 0 },\n { created: 2019-01-22T19:36:36.457Z,\n period: 2,\n status: 'paid',\n type: 'manual',\n uid: 'YJbQt5tN9',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5c4770c4e9e4f27e4b516daa\"),\n day: 22,\n month: 0 } ],\n created: 2018-12-26T04:24:30.186Z,\n updated: 2018-12-26T04:24:30.186Z,\n __v: 0,\n total: 10,\n startDate: 2018-11-26T03:00:00.000Z },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2c9a\"),\n uid: 'NThbeRUcC',\n reference: 'NThbeRUcC',\n status: 'active',\n customerData: \n { name: 'Hola Prueba',\n identification: '12123123',\n email: '[email protected]',\n phone: '15123123' },\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n customer: ObjectId(\"5bf4d1a1d9641a151c0cc65b\"),\n agenda: \n [ { _id: ObjectId(\"5c22e6d6fb82c614eaec2ca6\"), day: 25, month: 11 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2ca5\"), day: 25, month: 0 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2ca4\"), day: 25, month: 1 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2ca3\"), day: 25, month: 2 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2ca2\"), day: 25, month: 3 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2ca1\"), day: 25, month: 4 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2ca0\"), day: 25, month: 5 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2c9f\"), day: 25, month: 6 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2c9e\"), day: 25, month: 7 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2c9d\"), day: 25, month: 8 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2c9c\"), day: 25, month: 9 },\n { _id: ObjectId(\"5c22e6d6fb82c614eaec2c9b\"), day: 25, month: 10 } ],\n customerToken: \n [ { active: true,\n _id: ObjectId(\"5c22e6d6fb82c614eaec2ca7\"),\n customer: ObjectId(\"5bf4d1a1d9641a151c0cc65b\"),\n token: ObjectId(\"5c22e61dfb82c614eaec2c91\"),\n entitySource: ObjectId(\"5bb0efa2565353000efe2a19\"),\n entityInstallment: ObjectId(\"5bb0ef65565353000efe2a0c\") } ],\n executions: \n [ { created: 2018-12-26T02:26:30.867Z,\n period: 0,\n status: 'paid',\n uid: '5XQTqRDrm',\n _id: ObjectId(\"5c22e6d6fb82c614eaec2ca8\"),\n day: 25,\n month: 11 } ],\n created: 2018-12-26T02:26:30.881Z,\n updated: 2018-12-26T02:26:30.881Z,\n __v: 0,\n startDate: 2018-12-25T03:00:00.000Z },\n { _id: ObjectId(\"5ed4b77e73b75e74a675e0c7\"),\n uid: 'T0~Q0nMyz',\n reference: 'T0~Q0nMyz',\n total: 0,\n test: false,\n customerData: \n { name: 'Roman A Sarria',\n email: '[email protected]',\n identification: '33996953',\n phone: '12123123' },\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n status: 'active',\n customer: ObjectId(\"5b9ab217f25fd4000ec200e6\"),\n agenda: \n [ { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0c8\"),\n day: 1,\n month: 5,\n period: 1 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0c9\"),\n day: 1,\n month: 6,\n period: 2 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0ca\"),\n day: 1,\n month: 7,\n period: 3 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0cb\"),\n day: 1,\n month: 8,\n period: 4 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0cc\"),\n day: 1,\n month: 9,\n period: 5 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0cd\"),\n day: 1,\n month: 10,\n period: 6 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0ce\"),\n day: 1,\n month: 11,\n period: 7 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0cf\"),\n day: 1,\n month: 0,\n period: 8 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0d0\"),\n day: 1,\n month: 1,\n period: 9 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0d1\"),\n day: 1,\n month: 2,\n period: 10 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0d2\"),\n day: 1,\n month: 3,\n period: 11 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0d3\"),\n day: 1,\n month: 4,\n period: 12 } ],\n customerToken: \n [ { active: true,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0d5\"),\n token: ObjectId(\"5ed4b77073b75e74a675e0b6\") } ],\n executions: \n [ { created: 2020-06-01T08:08:30.153Z,\n period: 0,\n status: 'trial',\n type: 'registration',\n uid: 'K1q4EEydQ5',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5ed4b77e73b75e74a675e0d4\"),\n day: 1,\n month: 5,\n year: 2020,\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\") } ],\n created: 2020-06-01T08:08:30.155Z,\n updated: 2020-06-01T08:08:30.176Z,\n __v: 0,\n startDate: 2020-06-01T03:00:00.000Z },\n { _id: ObjectId(\"5f01f8597e3620344a6f7431\"),\n uid: 'l2IuPVqVa',\n reference: 'l2IuPVqVa',\n total: 0,\n test: false,\n customerData: \n { name: 'Sarria Roman Agustin',\n email: '[email protected]',\n identification: '33996953',\n phone: '12123123' },\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n status: 'active',\n customer: ObjectId(\"5b9ab217f25fd4000ec200e6\"),\n agenda: \n [ { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf141\"),\n day: 7,\n month: 6,\n period: 1 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf142\"),\n day: 6,\n month: 7,\n period: 2 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf143\"),\n day: 5,\n month: 8,\n period: 3 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf144\"),\n day: 5,\n month: 9,\n period: 4 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf145\"),\n day: 5,\n month: 10,\n period: 5 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf146\"),\n day: 5,\n month: 11,\n period: 6 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf147\"),\n day: 5,\n month: 0,\n period: 7 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf148\"),\n day: 5,\n month: 1,\n period: 8 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf149\"),\n day: 5,\n month: 2,\n period: 9 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf14a\"),\n day: 5,\n month: 3,\n period: 10 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf14b\"),\n day: 5,\n month: 4,\n period: 11 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5f571bec8a2785b2f8bdf14c\"),\n day: 5,\n month: 5,\n period: 12 } ],\n customerToken: \n [ { active: false,\n _id: ObjectId(\"5f01f8597e3620344a6f743f\"),\n token: ObjectId(\"5f01f8407e3620344a6f7420\") },\n { active: true,\n _id: ObjectId(\"5f4700352210897614a48c2e\"),\n token: ObjectId(\"5f4700322210897614a48c1c\") } ],\n executions: \n [ { created: 2020-07-05T15:57:13.302Z,\n period: 0,\n status: 'retried',\n type: 'registration',\n uid: 'JkWZGLR5OU',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5f01f8597e3620344a6f743e\"),\n day: 5,\n month: 6,\n year: 2020,\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\") },\n { created: 2020-09-08T05:43:55.304Z,\n period: -1,\n status: 'paid',\n type: 'manual',\n uid: 'rAfgg1fB88',\n reference: '',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5f571a1b8a2785b2f8bdf10d\"),\n day: 8,\n month: 8,\n year: 2020,\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\") },\n { created: 2020-11-18T04:53:00.356Z,\n period: 0,\n status: 'paid',\n type: 'cron',\n uid: 'B768YEIYR45MZEQ6R8',\n reference: '',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5fb4a8acabe27427387cd935\"),\n day: 18,\n month: 10,\n year: 2020,\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\") } ],\n created: 2020-07-05T15:57:13.307Z,\n updated: 2020-07-05T15:57:13.313Z,\n __v: 0,\n startDate: 2020-07-07T03:00:00.000Z },\n { _id: ObjectId(\"5efb6a84853a0348789c0865\"),\n uid: '4ucsbBkp~',\n reference: '4ucsbBkp~',\n total: 0,\n test: false,\n customerData: \n { name: 'Sarria Roman Agustin',\n email: '[email protected]',\n identification: '33996953',\n phone: '12123123' },\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n status: 'active',\n customer: ObjectId(\"5b9ab217f25fd4000ec200e6\"),\n agenda: \n [ { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c0866\"),\n day: 30,\n month: 5,\n period: 1 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c0867\"),\n day: 30,\n month: 6,\n period: 2 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c0868\"),\n day: 30,\n month: 7,\n period: 3 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c0869\"),\n day: 30,\n month: 8,\n period: 4 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c086a\"),\n day: 30,\n month: 9,\n period: 5 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c086b\"),\n day: 30,\n month: 10,\n period: 6 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c086c\"),\n day: 30,\n month: 11,\n period: 7 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c086d\"),\n day: 30,\n month: 0,\n period: 8 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c086e\"),\n day: 28,\n month: 1,\n period: 9 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c086f\"),\n day: 28,\n month: 2,\n period: 10 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c0870\"),\n day: 28,\n month: 3,\n period: 11 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c0871\"),\n day: 28,\n month: 4,\n period: 12 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5efb6a84853a0348789c0872\"),\n day: 28,\n month: 5,\n period: 13 } ],\n customerToken: \n [ { active: true,\n _id: ObjectId(\"5efb6a85853a0348789c0874\"),\n token: ObjectId(\"5efb6a76853a0348789c0854\") } ],\n executions: \n [ { created: 2020-06-30T16:38:28.904Z,\n period: 0,\n status: 'deleted',\n type: 'registration',\n uid: 'pcsTgx~zuH',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5efb6a84853a0348789c0873\"),\n day: 30,\n month: 5,\n year: 2020,\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\") } ],\n created: 2020-06-30T16:38:28.905Z,\n updated: 2020-06-30T16:38:28.911Z,\n __v: 0,\n startDate: 2020-06-30T03:00:00.000Z },\n { _id: ObjectId(\"5fc522d79d588f6ab036622d\"),\n uid: '13YJHY1YWCNI6WAOQF',\n reference: '13YJHY1YWCNI6WAOQF',\n total: 0,\n setupFee: 0,\n test: false,\n customerData: \n { name: 'Mijo 3',\n identification: '33996953',\n email: '[email protected]',\n tired_lobot: 'logical_ayla_secura',\n purple_biggs_darklighter: '2020-12-03T09:54:33.787Z' },\n sources: [],\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n status: 'active',\n customer: ObjectId(\"5b8dd7bf26d681000f90bc20\"),\n startDate: 2020-11-30T16:50:31.185Z,\n agenda: \n [ { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab036622e\"),\n day: 30,\n month: 10,\n period: 1 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab036622f\"),\n day: 30,\n month: 11,\n period: 2 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366230\"),\n day: 30,\n month: 0,\n period: 3 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366231\"),\n day: 28,\n month: 1,\n period: 4 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366232\"),\n day: 28,\n month: 2,\n period: 5 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366233\"),\n day: 28,\n month: 3,\n period: 6 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366234\"),\n day: 28,\n month: 4,\n period: 7 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366235\"),\n day: 28,\n month: 5,\n period: 8 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366236\"),\n day: 28,\n month: 6,\n period: 9 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366237\"),\n day: 28,\n month: 7,\n period: 10 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366238\"),\n day: 28,\n month: 8,\n period: 11 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab0366239\"),\n day: 28,\n month: 9,\n period: 12 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc522d79d588f6ab036623a\"),\n day: 28,\n month: 10,\n period: 13 } ],\n customerToken: \n [ { active: false,\n _id: ObjectId(\"5fc522d79d588f6ab036623c\"),\n token: ObjectId(\"5fc522c89d588f6ab0366214\") },\n { active: true,\n _id: ObjectId(\"5fdc19f0c53befa954817b0a\"),\n token: ObjectId(\"5fdc19ebc53befa954817af1\") } ],\n executions: \n [ { created: 2020-11-30T16:50:31.185Z,\n period: 0,\n status: 'trial',\n type: 'registration',\n uid: '7O8G5PACN4LRB6CVRC',\n reference: '',\n currency: 'ARS',\n total: 10,\n _id: ObjectId(\"5fc522d79d588f6ab036623b\"),\n day: 30,\n month: 10,\n year: 2020,\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\") } ],\n created: 2020-11-30T16:50:31.187Z,\n updated: 2020-11-30T16:50:31.192Z,\n __v: 0 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cb7\"),\n uid: 'xKv3mPTa-',\n reference: 'xKv3mPTa-',\n status: 'active',\n customerData: \n { name: 'Hola Prueba',\n identification: '12123123',\n email: '[email protected]',\n phone: '15123123' },\n subscription: ObjectId(\"5bf36db84bcbac7920daa39f\"),\n customer: ObjectId(\"5bf4d1a1d9641a151c0cc65b\"),\n agenda: \n [ { _id: ObjectId(\"5c22e6fdfb82c614eaec2cc3\"), day: 26, month: 11 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cc2\"), day: 25, month: 0 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cc1\"), day: 25, month: 1 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cc0\"), day: 25, month: 2 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cbf\"), day: 25, month: 3 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cbe\"), day: 25, month: 4 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cbd\"), day: 25, month: 5 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cbc\"), day: 25, month: 6 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cbb\"), day: 25, month: 7 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cba\"), day: 25, month: 8 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cb9\"), day: 25, month: 9 },\n { _id: ObjectId(\"5c22e6fdfb82c614eaec2cb8\"), day: 25, month: 10 } ],\n customerToken: \n [ { active: true,\n _id: ObjectId(\"5c22e6fdfb82c614eaec2cc4\"),\n customer: ObjectId(\"5bf4d1a1d9641a151c0cc65b\"),\n token: ObjectId(\"5c22e6edfb82c614eaec2cad\"),\n entitySource: ObjectId(\"5bb0efa2565353000efe2a19\"),\n entityInstallment: ObjectId(\"5bb0ef65565353000efe2a0c\") } ],\n executions: \n [ { created: 2018-12-26T02:27:09.826Z,\n period: 0,\n status: 'paid',\n uid: 'jfr_dAwH0',\n _id: ObjectId(\"5c22e6fdfb82c614eaec2cc5\"),\n day: 25,\n month: 11 } ],\n created: 2018-12-26T02:27:09.830Z,\n updated: 2018-12-26T02:27:09.830Z,\n __v: 0,\n total: 9,\n startDate: 2018-12-26T03:00:00.000Z } ] }\n{ subscribers: \n [ { _id: ObjectId(\"5fc33ac804ffee77dc74a262\"),\n uid: '2KDYL9EPI2ZU6SKYZ3',\n reference: 'demo_user_321',\n total: 0,\n setupFee: 0,\n test: false,\n customerData: \n { identification: '32321321',\n email: '[email protected]',\n name: 'Demo User' },\n sources: [],\n subscription: ObjectId(\"5fc33ab504ffee77dc74a260\"),\n status: 'active',\n customer: ObjectId(\"5e9e93ee696b688246ba7a1c\"),\n customerToken: [],\n executions: [],\n agenda: \n [ \n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ac804ffee77dc74a26e\"),\n day: 15,\n month: 11,\n period: 12 } ],\n startDate: 2021-01-15T03:00:00.000Z,\n created: 2020-11-29T06:08:08.588Z,\n updated: 2020-11-29T06:08:08.593Z,\n __v: 0 },\n { _id: ObjectId(\"5fc33ba4fc854b4170755364\"),\n uid: 'V4JB942GQQ9S7AN5CR',\n reference: 'demo_user_321',\n total: 0,\n setupFee: 0,\n test: false,\n customerData: \n { identification: '32321321',\n email: '[email protected]',\n name: 'Demo User' },\n sources: [],\n subscription: ObjectId(\"5fc33ab504ffee77dc74a260\"),\n status: 'active',\n customer: ObjectId(\"5e9e93ee696b688246ba7a1c\"),\n customerToken: \n [ { active: false,\n _id: ObjectId(\"5fc33f202d6b4e6638ad5c89\"),\n token: ObjectId(\"5fc33f1c2d6b4e6638ad5c70\") },\n { active: false,\n _id: ObjectId(\"5fc33fc62d6b4e6638ad5caa\"),\n token: ObjectId(\"5fc33fc32d6b4e6638ad5c91\") },\n { active: false,\n _id: ObjectId(\"5fc3400e2d6b4e6638ad5cc9\"),\n token: ObjectId(\"5fc3400b2d6b4e6638ad5cb0\") },\n { active: false,\n _id: ObjectId(\"5fc3411a2d6b4e6638ad5cee\"),\n token: ObjectId(\"5fc341172d6b4e6638ad5cd5\") },\n { active: true,\n _id: ObjectId(\"5fc34276d260ebee34c358df\"),\n token: ObjectId(\"5fc34273d260ebee34c358c6\") } ],\n executions: [],\n agenda: \n [ { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b4170755365\"),\n day: 15,\n month: 0,\n period: 1 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b4170755366\"),\n day: 15,\n month: 1,\n period: 2 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b4170755367\"),\n day: 15,\n month: 2,\n period: 3 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b4170755368\"),\n day: 15,\n month: 3,\n period: 4 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b4170755369\"),\n day: 15,\n month: 4,\n period: 5 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b417075536a\"),\n day: 15,\n month: 5,\n period: 6 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b417075536b\"),\n day: 15,\n month: 6,\n period: 7 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b417075536c\"),\n day: 15,\n month: 7,\n period: 8 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b417075536d\"),\n day: 15,\n month: 8,\n period: 9 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b417075536e\"),\n day: 15,\n month: 9,\n period: 10 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b417075536f\"),\n day: 15,\n month: 10,\n period: 11 },\n { type: 'period',\n year: 0,\n _id: ObjectId(\"5fc33ba4fc854b4170755370\"),\n day: 15,\n month: 11,\n period: 12 } ],\n startDate: 2021-01-15T03:00:00.000Z,\n created: 2020-11-29T06:11:48.966Z,\n updated: 2020-11-29T06:11:48.973Z,\n __v: 0 } ] }\n", "text": "This is the result in mongosh of my aggregation:", "username": "Ariel_P" }, { "code": "```db.appsubscriptionsplans.aggregate([{\n '$match': {\n 'entity': ObjectId('577660654629781000df9552'), \n 'status': 'active'\n }\n }, \n {\n '$lookup': {\n 'from': 'appsubscriptionssubscribers', \n 'localField': '_id', \n 'foreignField': 'subscription', \n 'as': 'subscribers'\n }\n }, \n {\n '$match': {\n 'subscribers.0': {\n '$exists': true\n }\n }\n },\n //{ $skip : batch.skip },\n { $limit : 2 }, \n {\n '$project': {\n '_id': 0, \n 'subscribers': 1\n }\n }]);`", "text": "My aggregation in mogosh was:", "username": "Ariel_P" }, { "code": " {\n '$unwind': {\n 'path': '$subscribers'\n }\n }, {\n '$group': {\n '_id': null, \n 'subs': {\n '$push': '$ROOT'\n }\n }\n }, {\n '$project': {\n '_id': 0, \n 'subs': 1\n }\n }\n", "text": "Try my version with this at the end.", "username": "MaBeuLux88" }, { "code": "{ subs: [] }", "text": "This is the return in mogosh: { subs: [] }", "username": "Ariel_P" }, { "code": "", "text": "Not possible because you had something a minute ago !\nYou did something wrong.", "username": "MaBeuLux88" }, { "code": "", "text": "Use the “import pipeline from text” option in Compass to import the entire pipeline I shared earlier.", "username": "MaBeuLux88" }, { "code": "", "text": "Also I suggest you have a look to this free training that will be useful for sure !Discover our MongoDB Database Management courses and begin improving your CV with MongoDB certificates. Start training with MongoDB University for free today.", "username": "MaBeuLux88" }, { "code": "", "text": "yes, in Compass already at stage $group “subs” is an empty array", "username": "Ariel_P" }, { "code": "", "text": "Fix the earlier stage then. You can also disable some temporarily.", "username": "MaBeuLux88" }, { "code": "", "text": "The earlier stage is the $unwind you made…", "username": "Ariel_P" }, { "code": "$unwindsubscribers", "text": "$unwind breaks arrays into multiple docs. If you have nothing after that stage, it means the array subscribers is empty in all the docs you have at this stage.\nSo something’s wrong above that.", "username": "MaBeuLux88" }, { "code": "", "text": "\nimage1624×1003 129 KB\n", "username": "MaBeuLux88" } ]
I want to join/group differents collections in one
2022-05-30T15:23:17.202Z
I want to join/group differents collections in one
4,700
null
[ "kotlin" ]
[ { "code": "", "text": "Following this tutorial doesn’t work. https://www.mongodb.com/docs/realm/sdk/kotlin/install/kotlin-multiplatform/#std-label-kotlin-install-kotlin-multiplatformIt tells me Caused by: org.gradle.api.internal.artifacts.ivyservice.DefaultLenientConfiguration$ArtifactResolveException: Could not resolve all files for configuration ‘:androidApp:debugRuntimeClasspath’.", "username": "Solomon_Ponzio" }, { "code": "", "text": "The basic realm database works, but sync doesn’t.", "username": "Solomon_Ponzio" }, { "code": "", "text": "I’ve figured it out: the default android studio project has iosSimulatorArm64() but realm sync doesn’t support that.", "username": "Solomon_Ponzio" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Gradle sync fails with Kotlin Multiplatform Mobile and Realm Sync
2022-06-01T17:24:39.640Z
Gradle sync fails with Kotlin Multiplatform Mobile and Realm Sync
3,401
https://www.mongodb.com/…6f83e9e98b6d.png
[ "app-services-cli" ]
[ { "code": "", "text": "I was following along with the Set up the Task Tracker Tutorial Backend, but I’ve encountered the following error when I reached step E.Please advise!", "username": "Primadonna_Queen" }, { "code": "", "text": "Hmm, I was just able to do this successfully. Based on that error message, I’m wondering if your data source details don’t match what the backend is looking for. When you followed Step B to set up your Atlas cluster, did you use any different settings or choose a different cluster name? Alternately, did your Atlas cluster finish provisioning before you pushed the app configuration? It can take 5-10 minutes to spin up a new cluster, and this error message seems plausible if the cluster hasn’t finished provisioning yet.", "username": "Dachary_Carey" } ]
Error When Creating A New Task Tracker Backend Realm App
2022-06-01T15:15:15.511Z
Error When Creating A New Task Tracker Backend Realm App
2,432
null
[ "node-js" ]
[ { "code": "exports = async function() {\nconst Sentry = require('@sentry/node');\nconst SentryTracing = require(\"@sentry/tracing\")\n console.log(Sentry.init)\n Sentry.init(); // or using Sentry.init({ dsn: \"something\" }) does not make any difference.\n}\n> error: \n{\"message\":\"Value is not an object: undefined\",\"name\":\"TypeError\"}\n", "text": "Hello,I am using Mongodb triggers for running some periodic jobs. In order to collect the errors I want to send them to Sentry by using its SDK on Nodejs:And I get am getting this error with no stack trace:Any help or clue is appreciated.", "username": "Poorshad_Shaddel" }, { "code": "", "text": "Hi @Poorshad_Shaddel and welcome in the MongoDB Community !I think you are looking for this doc.I never touched that part so I can’t really help much but there is some stuff to test I guess.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "Thanks @MaBeuLux88 !I will give it a try.", "username": "Poorshad_Shaddel" } ]
How to use Sentry in mongodb scheduled trigger
2022-06-01T11:21:56.342Z
How to use Sentry in mongodb scheduled trigger
2,071
https://www.mongodb.com/…0_2_1024x519.png
[]
[ { "code": "", "text": "HiI develop code using VS Code and linked to GitHub which is synchronised with my Realm account. I have on occasion noticed errors during the deployment process (in the Deployment section of the Realm UI), however this has been getting more frequent. Over the past 2 days I have had 4 failures in 10 attempted deployments. The error messages vary, with error invalidating cloudfront being one of them. Attempting to redeploy sometimes works and sometimes doesn’t. Below is a screenshot showing some of the errors. I’m pretty sure it is not an issue with the code as sometimes I just add a space to the code and resync to get it to deploy again.Is this something I can fix with settings from the UI or code or is it a bug? Thanks\nimage1527×775 140 KB\n", "username": "ConstantSphere" }, { "code": "", "text": "Hi Simon,Does that only happen when making changes to hosting files or with other types of changes as well?Regards", "username": "Mansoor_Omar" }, { "code": "", "text": "Hi @Mansoor_Omar thanks for taking a look at this. I can’t say with 100% certainty but spot checking a few deployments all of the of the ones that failed first time included hosting changes, whereas many of the ones that succeeded first time didn’t, so inclusion of hosting files might be a contributing factor.I know from other work that cloudfront can be very slow and timeout sometimes and we’ve needed to put retry strategies in.", "username": "ConstantSphere" }, { "code": "", "text": "if it’s useful here’s the exact error message from a couple more from 24-May.Failed: error invalidating cloudfront cache: ServiceUnavailable: CloudFront encountered an internal error. Please try again. status code: 503, request id: aa2e6a1d-7941-4d6d-93c6-0c13f4e71ecfFailed: error invalidating cloudfront cache: ServiceUnavailable: CloudFront encountered an internal error. Please try again. status code: 503, request id: 36d5feb0-92e4-49db-919a-96feb9859687", "username": "ConstantSphere" } ]
GitHub releases frequently failing
2022-05-24T13:22:37.465Z
GitHub releases frequently failing
2,023
null
[ "aggregation", "queries" ]
[ { "code": " _id : <string>\n metadata : {\n event_type : <string> - one of published, deleted, updated. Vast majority (95%+) is published\n ... other fields ...\n }\n series_id : {\n id : <string>\n type : <string>\n }\n ... other fields\n", "text": "Hi, we had simple Match…Group aggregation pipeline which has strange performance characteristics and thought it may be a bug/poor implementation that I wanted to flag.Basically we have a collection of 300k documents containing (not only) the following fields:We wanted to run a query to return all unique series_id.id, excluding event_type = deleted. We have indexes on both series_id.id and event_typeA simple Group(series_id.id) is fast (<1s)\nThe obvious Match : ne(deleted) → Group (series_id.id) is very slow (12s)\nRunning Match: or([“published”,“updated”]) → Group is also very slowHowever running Match : “published” → Group (series_id.id) is fast, as is running this with “updated”.I can’t see an obvious reason why adding the OR in the match on an indexed field should be over 12x slower than running the two queries independently (and merging the results on the client). Is this expected behaviour?", "username": "Matt_Allwood" }, { "code": "$ne$ne$ne", "text": "Please publish sample documents, the exact pipeline and all the indexes (with getIndexes()).If you look at the $ne documentation in the last paragraph you will read:The inequality operator $ne is not very selective since it often matches a large portion of the index. As a result, in many cases, a $ne query with an index may perform no better than a $ne query that must scan all documents in a collection. See also Query Selectivity.A partial index with your $ne:deleted might help.", "username": "steevej" }, { "code": "db.getCollection('petchem_price_assessments_2').aggregate([{ \"$match\" : { \"metadata.event_type\" : { \"$in\" : [\"published\", \"updated\"] } } }, { \"$group\" : { \"_id\" : \"$series_id.id\" } }]).toArray()\n[\n {\n \"v\" : 2,\n \"key\" : {\n \"_id\" : 1\n },\n \"name\" : \"_id_\",\n \"ns\" : \"pricing-local.petchem_price_assessments_2\"\n },\n {\n \"v\" : 2,\n \"key\" : {\n \"metadata.event_type\" : 1\n },\n \"name\" : \"metadata.event_type_1\",\n \"ns\" : \"pricing-local.petchem_price_assessments_2\"\n },\n {\n \"v\" : 2,\n \"key\" : {\n \"series_id.id\" : 1,\n \"created_for\" : 1\n },\n \"name\" : \"series_id.id_1_created_for_1\",\n \"ns\" : \"pricing-local.petchem_price_assessments_2\"\n }\n]\n/* 48 */\n{\n \"_id\" : \"energy-darwin_10-pricehistory-20200807000000\",\n \"metadata\" : {\n \"event_type\" : \"published\",\n \"partition_key\" : \"energy-darwin_10-pricehistory-20200807000000\",\n \"correlation_id\" : \"energy-darwin_10-pricehistory-20200807000000\",\n \"id\" : \"energy-darwin_10-pricehistory-20200807000000\",\n \"source\" : \"platform_cms\",\n \"type\" : \"icis.petchem.prices.price_assessments\",\n \"event_time\" : \"1634589908000\",\n \"released_on\" : \"1596801900000\"\n },\n \"base_type\" : \"series-item\",\n \"released_on\" : NumberLong(1596801900000),\n \"created_on\" : NumberLong(1596798329000),\n \"descriptor_id\" : \"price-range\",\n \"domain_id\" : \"energy-darwin\",\n \"series_id\" : {\n \"id\" : \"energy-darwin_10\",\n \"type\" : \"icis.petchem.series.price_assessments_specifications\"\n },\n \"created_for\" : NumberLong(1596758400000),\n \"contract_period\" : [ \n {\n \"lang\" : \"en\",\n \"value\" : \"\"\n }, \n {\n \"lang\" : \"zh\",\n \"value\" : \"\"\n }\n ],\n \"period_start_date\" : NumberLong(0),\n \"period_end_date\" : NumberLong(0),\n \"factory_price_range\" : [],\n \"price_range\" : [ \n {\n \"assessment_low\" : 2880.0,\n \"assessment_mid\" : 2905.0,\n \"assessment_high\" : 2930.0,\n \"assessment_low_delta\" : 50.0,\n \"assessment_high_delta\" : 50.0,\n \"market_time\" : {\n \"id\" : \"\",\n \"type\" : \"icis.petchem.reference_data.market_time\"\n },\n \"iosco_data_used\" : \"\",\n \"osp\" : null,\n \"delta_type\" : {\n \"id\" : \"delta-type_regular\",\n \"type\" : \"icis.petchem.reference_data.delta_type\"\n },\n \"versus_dated\" : null\n }\n ],\n \"single_price\" : []\n}\n\n/* 49 */\n{\n \"_id\" : \"energy-darwin_10-pricehistory-20200810000000\",\n \"metadata\" : {\n \"event_type\" : \"published\",\n \"partition_key\" : \"energy-darwin_10-pricehistory-20200810000000\",\n \"correlation_id\" : \"energy-darwin_10-pricehistory-20200810000000\",\n \"id\" : \"energy-darwin_10-pricehistory-20200810000000\",\n \"source\" : \"platform_cms\",\n \"type\" : \"icis.petchem.prices.price_assessments\",\n \"event_time\" : \"1634589984000\",\n \"released_on\" : \"1597061100000\"\n },\n \"base_type\" : \"series-item\",\n \"released_on\" : NumberLong(1597061100000),\n \"created_on\" : NumberLong(1597056012000),\n \"descriptor_id\" : \"price-range\",\n \"domain_id\" : \"energy-darwin\",\n \"series_id\" : {\n \"id\" : \"energy-darwin_10\",\n \"type\" : \"icis.petchem.series.price_assessments_specifications\"\n },\n \"created_for\" : NumberLong(1597017600000),\n \"contract_period\" : [ \n {\n \"lang\" : \"en\",\n \"value\" : \"\"\n }, \n {\n \"lang\" : \"zh\",\n \"value\" : \"\"\n }\n ],\n \"period_start_date\" : NumberLong(0),\n \"period_end_date\" : NumberLong(0),\n \"factory_price_range\" : [],\n \"price_range\" : [ \n {\n \"assessment_low\" : 2980.0,\n \"assessment_mid\" : 3005.0,\n \"assessment_high\" : 3030.0,\n \"assessment_low_delta\" : 100.0,\n \"assessment_high_delta\" : 100.0,\n \"market_time\" : {\n \"id\" : \"\",\n \"type\" : \"icis.petchem.reference_data.market_time\"\n },\n \"iosco_data_used\" : \"\",\n \"osp\" : null,\n \"delta_type\" : {\n \"id\" : \"delta-type_regular\",\n \"type\" : \"icis.petchem.reference_data.delta_type\"\n },\n \"versus_dated\" : null\n }\n ],\n \"single_price\" : []\n}\n\n/* 50 */\n{\n \"_id\" : \"energy-darwin_10-pricehistory-20200811000000\",\n \"metadata\" : {\n \"event_type\" : \"published\",\n \"partition_key\" : \"energy-darwin_10-pricehistory-20200811000000\",\n \"correlation_id\" : \"energy-darwin_10-pricehistory-20200811000000\",\n \"id\" : \"energy-darwin_10-pricehistory-20200811000000\",\n \"source\" : \"platform_cms\",\n \"type\" : \"icis.petchem.prices.price_assessments\",\n \"event_time\" : \"1634590028000\",\n \"released_on\" : \"1597147500000\"\n },\n \"base_type\" : \"series-item\",\n \"released_on\" : NumberLong(1597147500000),\n \"created_on\" : NumberLong(1597143737000),\n \"descriptor_id\" : \"price-range\",\n \"domain_id\" : \"energy-darwin\",\n \"series_id\" : {\n \"id\" : \"energy-darwin_10\",\n \"type\" : \"icis.petchem.series.price_assessments_specifications\"\n },\n \"created_for\" : NumberLong(1597104000000),\n \"contract_period\" : [ \n {\n \"lang\" : \"en\",\n \"value\" : \"\"\n }, \n {\n \"lang\" : \"zh\",\n \"value\" : \"\"\n }\n ],\n \"period_start_date\" : NumberLong(0),\n \"period_end_date\" : NumberLong(0),\n \"factory_price_range\" : [],\n \"price_range\" : [ \n {\n \"assessment_low\" : 2980.0,\n \"assessment_mid\" : 3005.0,\n \"assessment_high\" : 3030.0,\n \"assessment_low_delta\" : 0.0,\n \"assessment_high_delta\" : 0.0,\n \"market_time\" : {\n \"id\" : \"\",\n \"type\" : \"icis.petchem.reference_data.market_time\"\n },\n \"iosco_data_used\" : \"\",\n \"osp\" : null,\n \"delta_type\" : {\n \"id\" : \"delta-type_regular\",\n \"type\" : \"icis.petchem.reference_data.delta_type\"\n },\n \"versus_dated\" : null\n }\n ],\n \"single_price\" : []\n}\n", "text": "Can’t save as a file as I’m too new, but embedded example of 3 docs are below.\nI found that bit of the doc pages from a StackOverflow search, and I’m OK with that explaining the $ne result, but it doesn’t really explain why performing an $or query should be so slow.The $or query:The indices (I also tried adding a compond index on event_type_1_series_id.id_1 (and the other way around) with no impact)Example data:", "username": "Matt_Allwood" }, { "code": "", "text": "Please share the explain plans for …event_type:published, …event_type:updated and the one for the $in variation.With compound index event_type_1_series_id.id_1, a $project of series_id.id might help.What I suspect is that the working set does not fit in RAM so a lot more of disk I/O occurs with $in.", "username": "steevej" }, { "code": " \"stages\" : [ \n {\n \"$cursor\" : {\n \"query\" : {\n \"metadata.event_type\" : {\n \"$in\" : [ \n \"published\", \n \"updated\"\n ]\n }\n },\n \"fields\" : {\n \"series_id.id\" : 1,\n \"_id\" : 0\n },\n \"queryPlanner\" : {\n \"plannerVersion\" : 1,\n \"namespace\" : \"pricing-systest.petchem_price_assessments_2\",\n \"indexFilterSet\" : false,\n \"parsedQuery\" : {\n \"metadata.event_type\" : {\n \"$in\" : [ \n \"published\", \n \"updated\"\n ]\n }\n },\n \"queryHash\" : \"D2963EBC\",\n \"planCacheKey\" : \"AB3E9A31\",\n \"winningPlan\" : {\n \"stage\" : \"PROJECTION_DEFAULT\",\n \"transformBy\" : {\n \"series_id.id\" : 1,\n \"_id\" : 0\n },\n \"inputStage\" : {\n \"stage\" : \"IXSCAN\",\n \"keyPattern\" : {\n \"metadata.event_type\" : 1,\n \"series_id.id\" : 1,\n \"_id\" : 1\n },\n \"indexName\" : \"test1\",\n \"isMultiKey\" : false,\n \"multiKeyPaths\" : {\n \"metadata.event_type\" : [],\n \"series_id.id\" : [],\n \"_id\" : []\n },\n \"isUnique\" : false,\n \"isSparse\" : false,\n \"isPartial\" : false,\n \"indexVersion\" : 2,\n \"direction\" : \"forward\",\n \"indexBounds\" : {\n \"metadata.event_type\" : [ \n \"[\\\"published\\\", \\\"published\\\"]\", \n \"[\\\"updated\\\", \\\"updated\\\"]\"\n ],\n \"series_id.id\" : [ \n \"[MinKey, MaxKey]\"\n ],\n \"_id\" : [ \n \"[MinKey, MaxKey]\"\n ]\n }\n }\n },\n \"rejectedPlans\" : []\n }\n }\n }, \n {\n \"$group\" : {\n \"_id\" : \"$series_id.id\"\n }\n }\n ],\n", "text": "Sure:\nThe distribution of event_types will be vast majority (95%+) “published” - if there were memory issues I’d expect match event_type:“published” to also be slow, but it’s a fast-running query", "username": "Matt_Allwood" }, { "code": "\"stage\" : \"PROJECTION_DEFAULT\",\n \"transformBy\" : {\n \"series_id.id\" : 1,\n \"_id\" : 0\n }\n", "text": "Index wise I see nothing except that having _id is not useful._id not useful because it is projected out anyway as seen inThe distribution of event_types will be vast majority (95%+) “published” - if there were memory issues I’d expect match event_type:“published” to also be slowNot necessarily, they could all fit but the little extra from the other event_type might cause a cascade of disk I/O.Please share collections stats using https://www.mongodb.com/docs/manual/reference/method/db.collection.stats/ and the characteristics of your installation, RAM, disk. Is client running on same machine as the server? Any other database or collection of any significance on the same machine?", "username": "steevej" } ]
Performance drop when adding an OR in a Match step
2022-05-31T10:13:56.009Z
Performance drop when adding an OR in a Match step
1,650
null
[ "swift", "graphql", "serverless", "android", "next-js" ]
[ { "code": "", "text": "Hi all,I am looking for guidance how to approach creating a web app working with MongoDB Atlas/Realm, coming from the native Realm SDK for iOS.Current project state is a fully implemented iOS app using the Realm Swift SDK with a MongoDB Atlas database and a Realm App to synchronize the local database with the sync schema definition and custom partitioning strategy (using a function). For authentication I am currently using the built-in Realm Email/User-Password service, as-well as “Sign in with Apple” and “Login with Google”. All of it is configured in MongoDB Atlas/Realm App.Now we want to expand the iOS and Android app with a Web App offering the same functionality. Basically we are just editing the data in the database and visualizing on the different platforms. I decided to go with Realm Sync, because the native apps are offline-first, therefore I was looking for a smart solution to migrate the data into the cloud as soon as the user decides to do so.Now for web this is a different topic, as the storage solutions of moderns browsers are not that sophisticated, but offline-first is also not required.\nSo I already looked into the Web SDK guides/tutorials in the documentation (https://www.mongodb.com/docs/realm/web/) on how to get started, but I still have a few open questions, I hope you can answer for me.Q1. Should I continue to use the built-in authentication of Realm for the web app, or should I go with a different auth provider, e.g. Auth0? I am worrying the verification of the authentication with Realm Sync is not available in my own backend (I couldn’t find a public key to verify the JWT Token signature created by the Realm App). If I have to change to the Custom JWT strategy, I should probably do that now, before creating a larger userbase, but I am wondering if User/Password in Realm is not available in a custom backend, or if I just can’t find how to set it up.Q2. Offline-first synchronization like on iOS/Android is not really a thing in the web. Do I understand correctly, for web I do not interact with any kind of local database, but instead only work with the GraphQL endpoint + Realm functions?Q3. We are using Next.js as the web app framework. It also includes the option to built the API backend used in the web-app. Is there any preference in using the Realm GraphQL endpoint vs. building the Next.js backend by myself to connect to the MongoDB database and create my own API endpoint? Of course it is more work, but it would also give me more freedom with the queries and self-hosted/container-based pricing seems cheaper than serverless-hosted by MongoDB.I found an example in the Next.JS GitHub project, but it only uses the Realm GraphQL endpoint, rather than a custom endpoint.canary/examples/with-realm-webThe React Framework. Contribute to vercel/next.js development by creating an account on GitHub.Thanks in advance for your help!", "username": "Philip_Niedertscheid" }, { "code": "web-mqlweb-graphql", "text": "Hello @Philip_Niedertscheid ,I don’t have any specific docs on how to do a cross-platform web/mobile app in Realm, but you can take a look at the Realm Template Apps to see how I might approach it - you can pick either web-mql or web-graphql depending on which API you want.Q1: I don’t understand your Question 1 entirely - you can definitely use the same authentication provider in your web and mobile apps (e.g. email/password). However, if you need to integrate auth with another system outside of Realm then you may want to consider using something like Auth0 instead for both web/mobile.Q2: You are generally correct regarding Realm Database & Offline Storage being unavailable for web. you suggested GraphQL+Functions but could also use MQL+Functions as both APIs are more or less equivalent in terms of what you can do. It’s a matter of preference, though I’d personally suggest MQL if you want real-time because we don’t have GraphQL subscriptions yet.Q3: One of the main value premises of Realm is that you don’t have to do all the extra work of setting up a backend. So yes it might be a little more flexible at the end of the day but it probably won’t be cheaper for you to implement your own versions of Realm’s rules, auth, validation, etc. However, if you prefer Next.js functions to Realm’s serverless functions, you could always use the Realm SDK in your Next.js handlers to route database requests through our rules/auth/etcThanks,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "Hi Tarun,thanks for the reply and the link to the templates.\nI will check it out and maybe it already helps me enough.Regarding your feedback:Q1: AFAIK authenticating with Realm your backend creates a JWT token for the internal Realm username/password system and signs it using a private key. This token is then used for authenticated requests. Your e.g. GraphQL backend will receive the request with said JWT token and needs to verify its signature. To verify a JWT token, the backend which processes the request needs access to the public key. You have access to public key, but if I want to use my own backend, I also need access to the public key so I can also verify that the user is in fact authenticated. This is exactly what Auth0 is offering as a service, but to do the same with Realm username/password I would only need your public key (which is not confidental at all).Q2: I understand, thanks for highlighting the differecnesQ3: Extending my further explanation from Q1, I am looking for a way to verify the authentication token of a request with the users in the Realm Users database.", "username": "Philip_Niedertscheid" } ]
Guide for expanding from Realm Sync to Web
2022-05-02T12:11:54.975Z
Guide for expanding from Realm Sync to Web
3,153
null
[ "realm-studio" ]
[ { "code": "", "text": "I’m getting an error attempting to import data with a CSV via Realm Studio.My class has 5 property’s, _id (objectId), value (float), createdAt(date), timestamp(date) and unit(string).\nMy _id is an objectId, set as the Primary Key.CSV:\n_id,value,createdAt,timestamp,unit\n62950a7fb786e7670db2b967,100,2022-05-25T04:25:13.489Z,2022-05-25T04:25:13.489Z,test\nError:\nFailed to import data. Parsing error at line 0, expected type “objectId” but got “62950a7fb786e7670db2b967” for column “_id”. Error details: Error: Importing data of type “objectId” is not supported.No problem, so I try importing without including that property, thinking Realm Studio would generate and I get:\nCSV:\nvalue,createdAt,timestamp,unit\n100,2022-05-25T04:25:13.489Z,2022-05-25T04:25:13.489Z,test\nError:\nFailed to import data Missing value for property ‘Glucose._id’.So I figured I’d leave it null:\n_id,value,createdAt,timestamp,unit\n,100,2022-05-25T04:25:13.489Z,2022-05-25T04:25:13.489Z,test\nError:\nSame as first one.Is there a way to get Realm Studio to generate my objectId for me in the CSV? I haven’t been able to find anything in help to work around this.", "username": "Casey_S" }, { "code": "", "text": "This is a very good question but at this time I don’t believe there’s a direct answer as RealmStudio cannot import ObjectId fields (this has been an ongoing issue IMO).ButI think it can be done with", "username": "Jay" }, { "code": "", "text": "Thanks for the response Jay, I don’t have the MongoDB Database Tools installed, so I guess I’ll give that a go. QA is trying to find a way to share datasets between team members easily so not all options are at all disposal, the easy Realm Studio option of Import Data from > CSV was a very appealing option. I’ll see if I can get mongoimport working.", "username": "Casey_S" } ]
How do I import data into a class with an ObjectId via CSV import within Realm Studio?
2022-05-30T18:47:07.284Z
How do I import data into a class with an ObjectId via CSV import within Realm Studio?
3,134
null
[ "golang", "database-tools" ]
[ { "code": "server returned error on SASL authentication step: BSON field 'saslContinue.mechanism' is an unknown field.\nimport (\n\t\"gopkg.in/mgo.v2\"\n)\n\nfunc ConnectDb() (mongoSession *mgo.Session) {\n\tmongoDBDialInfo := &mgo.DialInfo{\n\t\tAddrs: []string{ThemeDatabaseIpPort},\n\t\tUsername: ThemeDbUsername,\n\t\tPassword: ThemeDbPassword,\n\t\tDatabase: \"admin\",\n\t\tTimeout: 60 * time.Second,\n\t\tPoolLimit: 4096,\n\t}\n\tmongoSession, err := mgo.DialWithInfo(mongoDBDialInfo)\n\tif err != nil {\n\t\tfmt.Printf(\"CreateSessionForThemeDB: %s\\n\", err)\n\t\tdefer mongoSession.Close()\n\t\treturn mongoSession\n\t}\n\tmongoSession.SetMode(mgo.Monotonic, true)\n\treturn mongoSession\n}\n", "text": "I am trying to connect to remote mongodb server from another server.remote mongodb version: 5.0.8\nremote mongodb tools version: 100.5.1when I am trying to connect to the db using old mongodb driver then it throws following error:connection function:", "username": "Aman_Kaur" }, { "code": "", "text": "It could be driver version compatibility issues\nYou may have to upgrade your driver", "username": "Ramachandra_Tummala" }, { "code": "", "text": "@Aman_Kaur the “gopkg.in/mgo.v2” package only officially supports up to MongoDB v3.6. To use MongoDB v5.0.8, you need to use the “go.mongodb.org/mongo-driver” package, which is the officially supported driver for Go.Check out the Go driver compatibility matrix here to see what Go driver versions support what MongoDB server versions.", "username": "Matt_Dale" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Error connecting to db: server returned error on SASL authentication step: BSON field 'saslContinue.mechanism' is an unknown field
2022-05-13T05:42:04.468Z
Error connecting to db: server returned error on SASL authentication step: BSON field &lsquo;saslContinue.mechanism&rsquo; is an unknown field
6,043
https://www.mongodb.com/…4_2_1023x453.png
[ "aggregation", "queries", "node-js", "mongoose-odm" ]
[ { "code": "$project{\n $project: {\n _id: 1,\n content: {\n $function: {\n body: function (str) {\n if (logic) {\n return someValue\n } else {\n return null\n }\n },\n args: [\"$content\"],\n lang: \"js\"\n }\n },\n }\n}\nMongoError: Invalid $project :: caused by :: The body function must be specified.serializeFunctionsMongoError: SyntaxError: unterminated parenthetical", "text": "I have a $project within my query that looks like the following:This isn’t the exact content, since I had to change variable names and logic for privacy, but you get the idea.Initially I was getting the error MongoError: Invalid $project :: caused by :: The body function must be specified.Upon researching this issue, I located (relevant help case ticket) and this was their response:\nScreen Shot 2022-05-18 at 9.32.21 AM1648×730 73.3 KBCurrently I’m running on Mongoose 5.13.14 and my db is on 4.4.14 so I would think that this issue would’ve already been handled by now.So, just in case I was being an idiot I was looking for documentation on this option serializeFunctions and could not find anything anywhere, not on the mongo docs nor just by simply googling the option. Can anyone link me to the documentation?Or better yet, help me solve this error!I tried setting the body function to a string like in his first option, as well as this user’s solution on stackoverflow, but that simply results in the error MongoError: SyntaxError: unterminated parenthetical so I’m not sure what to do.Any help would be greatly appreciated - thanks.", "username": "Charlie_Buyas" }, { "code": "const Test = mongoose.model('Test', {x: Number})\n\nasync function exec() {\n const doc = new Test({x: 1})\n await doc.save()\n\n const res = await Test.aggregate([\n {$project: {\n _id: 1,\n result: {\n $function: {\n body: function(x) {\n if (x === 1) {\n return 'Found'\n } else {\n return null\n }\n },\n args: ['$x'],\n lang: 'js'\n }\n }\n }}\n ])\n console.log(res)\n\n await mongoose.connection.close()\n}\n[ { _id: 6296b9567edf52f41fde6180, result: 'Found' } ]\n", "text": "Hi @Charlie_BuyasI tried this short code for testing purposes by taking inspiration from the example you give, using Mongoose 5.13.14 and MongoDB 5.0.8:The output it gives:So it seems to behave as expected.Could you provide a simple minimal example that is causing the issue you’re seeing?Best regards\nKevin", "username": "kevinadi" } ]
Cannot find any documentation on serializeFunctions, and getting an error about "body function must be specified."
2022-05-18T16:40:40.361Z
Cannot find any documentation on serializeFunctions, and getting an error about &ldquo;body function must be specified.&rdquo;
2,527
null
[ "java", "change-streams", "scala" ]
[ { "code": "import org.mongodb.scala.{Document, Observer}\nimport org.mongodb.scala.model.changestream.ChangeStreamDocument\n\nobject ObserveTest extends Basic with App {\n\n val separator = \"\\n\\n\"\n\n def subscribe(): Unit = {\n test_collection.watch().subscribe(new Observer[ChangeStreamDocument[Document]] {\n override def onNext(result: ChangeStreamDocument[Document]): Unit = {\n println(s\"$separator onNext: $result$separator\")\n throw new RuntimeException(\"error\")\n }\n\n override def onError(e: Throwable): Unit = {\n println(s\"$separator onError: $e$separator \")\n subscribe()\n }\n\n override def onComplete(): Unit = println(s\"$separator onComplete $separator \")\n })\n }\n\n subscribe()\n\n}\n\n", "text": "Hi,\nI noticed different behavior of the driver in Change Stream in exceptions handling when I upgraded from mongo-scala-driver 2.9.0 to 4.6.0.\nI assume that in the code that is handling events of changes in MongoDB (onNext method of the Observer) the exception can be thrown. In that case in onError method I was subscribing to events again. It was working in old version of the driver. Simply when onNext thrown exception, then onError method was called: mongo-java-driver/AbstractSubscription.java at r3.12.1 · mongodb/mongo-java-driver · GitHub\nAfter upgrade to 4.6.0 I noticed that project reactor is used and different code is handling exceptions: reactor-core/FluxCreate.java at main · reactor/reactor-core · GitHub\nIn the end onError method is not invoked.This is my simple test for that.With\n“org.mongodb.scala” %% “mongo-scala-driver” % “2.9.0”\nit works perfeclty.\nBut with\n“org.mongodb.scala” %% “mongo-scala-driver” % “4.6.0”\nwhen exception is thrown after first event, the subscriber is closed, I don’t receive more events.Is there any way to handle exception that could potentially been thrown from the code run in onNext method?", "username": "Michal" }, { "code": "ObserverObserverorg.reactivestreams.SubscriberonSubscribeonNextonErroronCompletenulljava.lang.NullPointerExceptionSubscriberSubscriptionSubscriptionSubscriberObserverSubscriber.onErrorSubscriberSubscriberonErrorSubscriberObserverObserver", "text": "Hi @Michal,The Observer in your code does not return normally, it throws an exception. Driver’s Observer is an org.reactivestreams.Subscriber, for which the rule #13 in the reactive streams specification statesCalling onSubscribe , onNext , onError or onComplete MUST return normally except when any provided parameter is null in which case it MUST throw a java.lang.NullPointerException to the caller, for all other situations the only legal way for a Subscriber to signal failure is by cancelling its Subscription . In the case that this rule is violated, any associated Subscription to the Subscriber MUST be considered as cancelled, and the caller MUST raise this error condition in a fashion that is adequate for the runtime environment.The last part of the rule is further clarified in the spec«Raise this error condition in a fashion that is adequate for the runtime environment» could mean logging the error—or otherwise make someone or something aware of the situation—as the error cannot be signalled to the faulty Subscriber.The Observer in your code violates this rule. Even if version 2.9 behaved differently, it is a requirement now. Subscriber.onError exists as the “only legal way to signal failure to a Subscriber” (a quote from the spec), i.e., if a Subscriber throws, there is no need to notify it via onError that an exception was thrown, because it was the Subscriber who threw it.Your Observer may cancel the subscription, and then you may create a new one, if that is what you want, but the Observer methods must return normally.", "username": "Valentin_Kovalenko" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Handling exceptions in Change Stream
2022-05-31T16:03:17.753Z
Handling exceptions in Change Stream
3,620
null
[ "dot-net", "crud" ]
[ { "code": "using System.Text.RegularExpressions;\nusing MongoDB.Driver;\n\nvar test = new Test();\ntest.Run();\n\npublic class Test\n{\n private readonly string dbName = \"test\";\n private readonly IMongoCollection<Container> collection;\n\n public Test()\n {\n var connectionString = \"mongodb://localhost:27017\";\n var client = new MongoClient(connectionString);\n\n client.ListDatabaseNames().ToList()\n .Where(db => Regex.IsMatch(db, $\"^{dbName}.*\")).ToList().ForEach(db => client.DropDatabase(db));\n\n collection = client.GetDatabase(dbName).GetCollection<Container>($\"{nameof(Container)}Collection\");\n }\n\n public void Run()\n {\n var id = Guid.NewGuid();\n var container = new Container(id, new List<Parent>\n {\n new Parent(new List<Child>\n {\n new Child(\"child 1.1\"),\n new Child(\"child 1.2\")\n }, \"parent 1\"),\n\n new Parent(new List<Child>\n {\n new Child(\"child 2.1\"),\n new Child(\"child 2.2\")\n }, \"parent 2\")\n\n }, \"containerLevel\");\n\n collection.InsertOne(container);\n\n // modify child 2.2 ChildProperty\n var filter = Builders<Container>.Filter.Eq(c => c.Id, id)\n & Builders<Container>.Filter.ElemMatch(e => e.Parents, p => p.ParentProperty == \"parent 2\")\n & Builders<Container>.Filter.ElemMatch(e => e.Parents[-1].Children, p => p.ChildProperty == \"child 2.2\");\n\n var update = Builders<Container>.Update.Set(c => c.Parents[-1].Children[-1].ChildProperty, \"child 2.2 - updated\");\n\n collection.UpdateOne(filter, update); // sounds good, doesn't work\n // 'A write operation resulted in an error.\n // WriteError: { Category : \"Uncategorized\", Code : 2, Message : \"Too many positional (i.e. '$') elements found in path 'Parents.$.Children.$.ChildProperty'\" }.'\n\n }\n\n public record Container(Guid Id, IList<Parent> Parents, string ContainerProperty);\n public record Parent(IList<Child> Children, string ParentProperty);\n public record Child(string ChildProperty);\n}\n\n", "text": "Hi,\nI’m trying to update a property of a nested array. I’m able to update property that is nested only one level deep, but if it is a level deeper it gives me the “too many positional elements” error.\nIs this a valid scheme or should I completely avoid to use an array that contains items that are again arrays?\nPlease let me know how to update the Container.Parents.Children.ChildProperty in my sample code:\n(change the “child 2.2” to “child 2.2 - updated”).\nIf possible, I would prefer typed C# code .", "username": "Ladislav_Chvila" }, { "code": "test> db.coll.updateMany({}, {$set: { \"Parents.$.Children.$.ChildProperty\": 42 }})\nMongoServerError: Too many positional (i.e. '$') elements found in path 'Parents.$.Children.$.ChildProperty'\n", "text": "Hi, @Ladislav_Chvila,Reviewing your C# code and the error, you are attempting to use nested positional operators in your update statement. This is not permitted by the server and the error that you received back is a server error, not a .NET/C# driver error. Trying a similar operation (with an empty filter for ease of typing) in the shell, we can see the server return the same error:I would strongly recommend that you to reconsider your schema for this reason among others.Sincerely,\nJames", "username": "James_Kovacs" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
C# nested element update
2022-05-31T20:13:32.082Z
C# nested element update
3,950
null
[ "production", "server" ]
[ { "code": "", "text": "MongoDB 5.0.9 is out and is ready for production deployment. This release contains only fixes since 5.0.8, and is a recommended upgrade for all 5.0 users.\nFixed in this release:", "username": "Jon_Streets" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB 5.0.9 is released
2022-05-31T18:53:36.110Z
MongoDB 5.0.9 is released
3,252
null
[]
[ { "code": "", "text": "I have a lot of schema but i want to take a record of each of changes in the document in one separate schema that keep record of each and every data…i don’t know how to do it", "username": "Kashif_Iqbal" }, { "code": "", "text": "Hi @Kashif_Iqbal and welcome in the MongoDB Community !I don’t understand anything and it’s probably the same for everybody in here that want to help. It probably makes sense with all the context that you have but it’s very hard for us.Please provide a few sample documents and the expected output. Eventually more context that would help understand what you are trying to do. This forum supports Markdown so you can share code blocks.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "baseUrl:{\n type: String,\n\n},\nip:{\n type: String,\n},\nchangeData:[{\n type: String,\n}],\noriginalData:[{\n type: String,\n}],\nchangedBy:{\n type: String,\n ref: 'users'\n},\ncreatedAt:{\n type: Date,\n default: Date.now,\n},\nupdatedAt:{\n type: Date,\n default: Date.now,\n}\n", "text": "const auditSchema = new Schema({})\nsir suppose we have this audit document now i want to store each and every history of document and i don’t want to do this in my primary document so how it will done.\nfor example i have user and user updated some thing then after that i will save previous data and current data.\n{\nemailid: [email protected],\nname: kashf,\nphoneNo.: 8864005925,\n},\nnow after the update it will looks like\n{\noriginalData: kashf\nchangeData: kashif\n}", "username": "Kashif_Iqbal" }, { "code": "", "text": "MongoDB Change Streams produce exactly the diff doc you are after. Then it’s up to you where you want to store it. Kafka could be a solution or another MongoDB collection.You could use a Trigger in MongoDB Realm (which is using Change Streams in the background) and in the Realm Function, you could reinsert these change events in a new MongoDB collection. Only takes a few lines of code. Less than 5 I think.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "thank u so much sir u saved my life and job", "username": "Kashif_Iqbal" } ]
About stuck in program
2022-05-30T08:03:12.261Z
About stuck in program
1,884
null
[ "node-js" ]
[ { "code": "{strict:true}{strict:true}", "text": "The latest changes in node-mongodb-native include deprecation of the {strict:true} option of the Db.collection method. I’ve failed to find any guidance for how to write code that is assured to only work with existing collections without using {strict:true}. Can you point me at something that shows the proper way to do this in the future?", "username": "John_Bito" }, { "code": "strict: truefalse", "text": "Hello @John_Bito, welcome to the MongoDB Community forum!The latest MongoDB NodeJS Driver v3.6 does have the Db.collection method working fine with both the strict: true and false (the default) options. It is not deprecated.Please tell what is the version of NodeJS, MongoDB server and the MongoDB NodeJS Driver you are working with? You can also include the code you had tried along with the actual error / warning message you are seeing.", "username": "Prasad_Saya" }, { "code": "", "text": "I guess the deprecation may not be released, as it was merged about 4 weeks ago, after 3.6.9 was finished. The Jira ticket doesn’t provide any rationale for this change, though it’s linked to the removal of the strict option from Db.createCollection saying the strict option has no effect on that method’s behavior.The strict option does change the behavior of Db.collection. Perhaps the rationale is that it appears to provide some advantage over calling Db.listCollections separately when there’s not an actual advantage.", "username": "John_Bito" }, { "code": "", "text": "So now, if you try to use {strict: true} on createCollection() you will get an error:\nMongoServerError: BSON field ‘create.strict’ is an unknown field.From this ticket: https://jira.mongodb.org/browse/NODE-2752, looks like it is expected now.Thus, could you guys please update documentation, that “strict” option is not allowed anymore:\nhttp://mongodb.github.io/node-mongodb-native/3.1/api/Db.html#createCollectionThanks", "username": "Nikita_Tertytskyi" }, { "code": "", "text": "We could really use a solution to this. The strict option was a very easy way to ensure that we were working on an existing collection. They provided a deprecation message but did not state an alternative or solution. If possible, could anyone please give a solution on how to ensure the collection exists using db.collection? Thank you.", "username": "Ryan_Scully" } ]
Db.collection no longer supports strict mode, so how can I assure that my code isn't making a new collection?
2021-06-23T22:24:44.098Z
Db.collection no longer supports strict mode, so how can I assure that my code isn&rsquo;t making a new collection?
5,217
null
[ "aggregation", "dot-net", "atlas-search" ]
[ { "code": "", "text": "Hi,I have a use case for performing bulk searches in short bursts of around 1000-5000 individual search terms.I’d like to avoid 5000 separate network calls to perform each search, are there any aggregation stages that can perform the searches in bulk?", "username": "Anthony_Halliday" }, { "code": "", "text": "Hi @Anthony_Halliday – what expectations do you have for the search results? Would each one be unique to each query?", "username": "Elle_Shwer" }, { "code": "", "text": "Hi @Elle_Shwer, yes exactly. I’d like to effectively run n,000 distinct searches in a batch.", "username": "Anthony_Halliday" }, { "code": "", "text": "And why do you want to avoid the network call? Just trying to understand, as I’m not sure we have a clean way to do this today.", "username": "Elle_Shwer" }, { "code": "", "text": "Yes, I wasn’t sure it would be possible.For short bursts of related queries, my initial instinct was to batch the searches into a single network operation. But perhaps that’s not the standard approach with MongoDB.", "username": "Anthony_Halliday" } ]
Performing bulk search reads
2022-05-27T12:45:37.950Z
Performing bulk search reads
1,769
null
[]
[ { "code": "", "text": "Hey,\nI have sent the mail regarding for availing the voucher or promo code for the Developer Certification 2 days back. Please look into it.", "username": "Devata_Harshith" }, { "code": "", "text": "Hello Devata,Welcome to the MongoDB for Academia community! Thanks for reaching out.Our US-office was closed on Monday 5/30 and we are experiencing a high volume of inquiries. Please expect a response within the next 48 hours. Thanks for your patience.Sarah", "username": "Sarah" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
About MongoDB Student Pack Voucher for Developer Certification
2022-05-31T09:04:01.363Z
About MongoDB Student Pack Voucher for Developer Certification
3,121
null
[ "queries" ]
[ { "code": "const game = await Game.findOne({name}, {\"lastRefreshed\": 0, \"createdAt\": 0, \"tokens\": { $slice: 25 }}).lean().exec();\n", "text": "I have my collection where every document has an array of item. But some documents has more than 10k items in their array.What Im trying to do is to select the document and limit the number of displayed items from that array in 50 for example (to speed up the query, because it takes so long to load all the items).I dont know if thats possible…\nI was trying:The tokens array has more than 10k items, and the code above does not work.", "username": "foco_radiante" }, { "code": "$slice\"tokens\": { $slice: 25 }", "text": "Hello @foco_radiante,The syntax to restrict the number of elements returned from an array field using $slice in the projection (\"tokens\": { $slice: 25 }) is correct - and I believe it works. I cannot tell further if there is an issue from the available details in your question.See the usage from the manual:", "username": "Prasad_Saya" }, { "code": "", "text": "The syntax findOne(…).lean().exec(), as I do not recognize it, makes me think that you might be using an obstruction abstraction layer like mongoose.If it is the case it might be a good idea to add mongoose in the tag list so that people only tracking mongoose related topics see your post.the code above does not workHow does it behave exactly? Errors or wrong results?I am not sure you can have projection with exclusion like lastRefreshed:0 and inclusion like the tokens specification. As mentioned by @Prasad_Saya, $slice should work. I would try just using the $slice part. In principal, all other fields will be excluded.", "username": "steevej" } ]
Limit the items returned from array
2022-05-31T06:58:41.896Z
Limit the items returned from array
5,845