image_url
stringlengths
113
131
tags
sequence
discussion
list
title
stringlengths
8
254
created_at
stringlengths
24
24
fancy_title
stringlengths
8
396
views
int64
73
422k
null
[ "node-js", "replication", "mongoose-odm", "containers" ]
[ { "code": "MONGODB_URI=\"mongodb://ruser:rpassword@localhost:27017/database?authSource=admin&directConnection=true\"\n\n//Docker-compose\nmongodb:\nimage: 'bitnami/mongodb:latest'\nenvironment:\n - MONGODB_ADVERTISED_HOSTNAME=127.0.0.1\n - MONGODB_REPLICA_SET_MODE=primary\n - MONGODB_ROOT_USER=ruser\n - MONGODB_ROOT_PASSWORD=rpassword\n - MONGODB_DATABASE=database\n - MONGODB_REPLICA_SET_KEY=replicasetkey123\nports:\n - '27017:27017'\nvolumes:\n - 'mongo-db:/bitnami/mongodb'\n\n\n// Connection code\nexport default async (): Promise<void> => {\n LoggerInstance.info(\"Connecting to database at %s\", config.databaseUrl);\n await mongoose.connect(config.databaseUrl!);\n \n};\n info: Connecting to database at mongodb://ruser:rpassword@localhost:27017/database?authSource=adm\nrectConnection=true\n /monorepo/node_modules/mongoose/node_modules/mongodb/lib/cmap/con\non.js:207\n callback(new error_1.MongoServerError(document));\n ^\n\n MongoServerError: Authentication failed.\n at Connection.onMessage (/monorepo/node_modules/mongoose/node\nles/mongodb/lib/cmap/connection.js:207:30)\n at MessageStream.<anonymous> (/monorepo/node_modules/mongoose\n_modules/mongodb/lib/cmap/connection.js:60:60)\n at MessageStream.emit (node:events:390:28)\n at MessageStream.emit (node:domain:475:12)\n at processIncomingData (/monorepo/node_modules/mongoose/node_\nes/mongodb/lib/cmap/message_stream.js:132:20)\n at MessageStream._write (/monorepo/node_modules/mongoose/node\nles/mongodb/lib/cmap/message_stream.js:33:9)\n at writeOrBuffer (node:internal/streams/writable:389:12)\n at _write (node:internal/streams/writable:330:10)\n at MessageStream.Writable.write (node:internal/streams/writable:334:10)\n at Socket.ondata (node:internal/streams/readable:754:22) {\n ok: 0,\n code: 18,\n codeName: 'AuthenticationFailed',\n '$clusterTime': {\n clusterTime: Timestamp { low: 1, high: 1676708807, unsigned: true },\n signature: {\n hash: Binary {\n sub_type: 0,\n buffer: Buffer(20) [Uint8Array] [\n 51, 129, 179, 197, 133, 216,\n 110, 109, 120, 20, 24, 215,\n 211, 82, 72, 5, 192, 89,\n 198, 39\n ],\n position: 20\n },\n keyId: Long { low: 7, high: 1676706790, unsigned: false }\n }\n },\n operationTime: Timestamp { low: 1, high: 1676708807, unsigned: true },\n [Symbol(errorLabels)]: Set(1) { 'HandshakeError' }\n }\n [nodemon] app crashed - waiting for file changes before starting...\ndatabaseadmin", "text": "Hello everyone, I need to run replica set locally because I am using Prisma.So my setup was as followingBut when I try to run my app, I am getting this errorThe server connecting to this db is not inside docker-compose,My suspicion is that the root user is not present in the database DB but rather admin DB, but not sure how to connect to one and use the other", "username": "Nikola_Milovic" }, { "code": "databaseadminauthSource=admin", "text": "My suspicion is that the root user is not present in the database DB but rather admin DB, but not sure how to connect to one and use the otherAdd the authSource option to your connection string. authSource=admin", "username": "chris" }, { "code": "mongodb://ruser:rpassword@localhost:27017/database?authSource=admin&directConnection=true\n", "text": "That is already present in the connection URL @chris", "username": "Nikola_Milovic" }, { "code": "database", "text": "Possibly the opposite then, set it to database I’m not familiar with the bitnami image and how they work.", "username": "chris" } ]
Authentication Failed for mongodb cluster in docker-compose
2023-02-18T08:44:20.428Z
Authentication Failed for mongodb cluster in docker-compose
2,828
https://www.mongodb.com/…6_2_1024x213.png
[ "replication", "server" ]
[ { "code": "", "text": "I have deployed mongodb replicaset using helm charts on RKE2 cluster (RHEL8 VMs) using “managed-nfs-storage”. The first mongo replica came up and the PRIMARY is also set properly but the second replica is crashing with the below fsync error.`{“t”:{“$date”:“2023-02-17T18:35:18.407+00:00”},“s”:“E”, “c”:“STORAGE”, “id”:20557, “ctx”:“initandlisten”,“msg”:“DBException in initAndListen, terminating”,“attr”:{“error”:“FileStreamFailed: Unable to write process id 1\\n to file (fsync failed): /bitnami/mongodb/data/db/mongod.lock Input/output error”}}Mongo pods under the database namespace:\n\nimage1892×394 91.5 KB\nPlease find below some of the findings around this issue,I need some inputs in debugging the root cause for crashing of the mongo secondary replica. Any suggestions would be appreciated", "username": "bhavaniprasad_reddy" }, { "code": "", "text": "You’ve identified the issue, you need to work out why fsync is failing to the storage.Try another node or new storage.", "username": "chris" } ]
FileStreamFailed: Unable to write process id 1 to file (fsync failed): /bitnami/mongodb/data/db/mongod.lock Input/output error
2023-02-17T19:02:54.823Z
FileStreamFailed: Unable to write process id 1 to file (fsync failed): /bitnami/mongodb/data/db/mongod.lock Input/output error
904
null
[ "node-js", "mongoose-odm", "compass" ]
[ { "code": "const mongoose = require('mongoose')\nmongoose.connect('mongodb://localhost/my_database', {useNewUrlParser: true})\nconst serverSelectionError = new ServerSelectionError();\n ^\n\nMongooseServerSelectionError: connect ECONNREFUSED ::1:27017\n at Connection.openUri (C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\node_modules\\mongoose\\lib\\connection.js:825:32) \n at C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\node_modules\\mongoose\\lib\\index.js:411:10\n at C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\node_modules\\mongoose\\lib\\helpers\\promiseOrCallback.js:41:5\n at new Promise (<anonymous>)\n at promiseOrCallback (C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\node_modules\\mongoose\\lib\\helpers\\promiseOrCallback.js:40:10)\n at Mongoose._promiseOrCallback (C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\node_modules\\mongoose\\lib\\index.js:1285:10)\n at Mongoose.connect (C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\node_modules\\mongoose\\lib\\index.js:410:20) \n at Object.<anonymous> (C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\index.js:43:10)\n at Module._compile (node:internal/modules/cjs/loader:1120:14)\n at Module._extensions..js (node:internal/modules/cjs/loader:1174:10) {\n reason: TopologyDescription {\n type: 'Unknown',\n servers: Map(1) {\n 'localhost:27017' => ServerDescription {\n address: 'localhost:27017',\n type: 'Unknown',\n hosts: [],\n passives: [],\n arbiters: [],\n tags: {},\n minWireVersion: 0,\n maxWireVersion: 0,\n roundTripTime: -1,\n lastUpdateTime: 158728533,\n lastWriteDate: 0,\n error: MongoNetworkError: connect ECONNREFUSED ::1:27017\n at connectionFailureError (C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\node_modules\\mongodb\\lib\\cmap\\connect.js:387:20)\n at Socket.<anonymous> (C:\\Users\\riema\\OneDrive\\Desktop\\Learning\\nodejs-express-mongo-blog\\node_modules\\mongodb\\lib\\cmap\\connect.js:310:22)\n at Object.onceWrapper (node:events:628:26)\n at Socket.emit (node:events:513:28)\n at emitErrorNT (node:internal/streams/destroy:151:8)\n at emitErrorCloseNT (node:internal/streams/destroy:116:3)\n at process.processTicksAndRejections (node:internal/process/task_queues:82:21) {\n cause: Error: connect ECONNREFUSED ::1:27017\n at TCPConnectWrap.afterConnect [as oncomplete] (node:net:1247:16) {\n errno: -4078,\n code: 'ECONNREFUSED',\n syscall: 'connect',\n address: '::1',\n port: 27017\n },\n [Symbol(errorLabels)]: Set(1) { 'ResetPool' }\n },\n topologyVersion: null,\n setName: null,\n }\n },\n stale: false,\n compatible: true,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n setName: null,\n maxElectionId: null,\n maxSetVersion: null,\n commonWireVersion: 0,\n logicalSessionTimeoutMinutes: null\n },\n code: undefined\n}\n", "text": "Hi,I am new to mongodb and have just started using it. I am going through a nodejs book and trying to connect to mongodb from my node js application. Both the application server and Db server are running on my windows local machine.I am using the below code to connect to mongodb:Also, the mongo server appears to be running as I see a MongoDB Database Server process in my task manager. I started this using mongod --config mongod.cfg. When I ran this command, there was nothing printed on console. Also, I am able to connect to DB using Compass.Below is the error stacktrace. Any pointers on how to debug this issue:", "username": "Data_Beings" }, { "code": "", "text": "If your mongod is running as service there is no need to start mongod again from cmd line\nCan you connect to your mongod from shell?\nTry with 127.0.0.1 instead localhost\nMay be the issue due to Ipv6 address\nSearch our forum threads for similiar issue", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Thanks, using 127.0.0.1 from nodesjs fixed it. But connection string with localhost worked fine when using Compass. Just wanted to understand what could be happening here?", "username": "Data_Beings" } ]
Connection Issues from nodejs
2023-02-17T17:17:03.232Z
Connection Issues from nodejs
1,015
https://www.mongodb.com/…b685d6d905ef.png
[ "connecting", "server" ]
[ { "code": "[root@instance-20230214-2159 opc]# mongo --port 27017 --authenticationDatabase \"admin\" -u \"<redacted>\" -p\nMongoDB shell version v4.4.18\nEnter password:\nconnecting to: mongodb://127.0.0.1:27017/?authSource=admin&compressors=disabled&gssapiServiceName=mongodb\n", "text": "I have been trying to host a mongo db server on a oracle virtual machine (using oracle linux 8). I have been using the mongodb extention for vscode to test the connection, but I keep getting this error:\n“Unable to load connection: MongoServerSelectionError: connect ETIMEDOUT :27017”\nHere are my connection settings:\n\nimage580×619 14.8 KB\n\nI also remembered to port forward the port 27017.\nThe service is enabled. I can connect to it via the command line when I have connected to the vm via ssh:Im not sure what Im doing wrong. Does anyone have any idea?\nThanks in advance!", "username": "ian_finity" }, { "code": "127.0.0.1bindIp bindIpAllgrep bindIp /etc/mongod.conf", "text": "It could be that the server is only bound to 127.0.0.1 and not listening on a lan IP.Check your configuration file for bindIp or bindIpAll :grep bindIp /etc/mongod.confIf you need to connect to the database from other hosts then this will need to be updated, the default is to only bind localhost. If you only need vscode to access it remotely consider using the “SSH Tunnel” options on the connection form.Also be aware of how to secure your installation once you allow access from other source:Before binding to a non-localhost (e.g. publicly accessible) IP address, ensure you have secured your cluster from unauthorized access. For a complete list of security recommendations, see Security Checklist. At minimum, consider enabling authentication and hardening network infrastructure.", "username": "chris" } ]
Connect ETIMEDOUT when attempting to connect to database hosted on oracle vm
2023-02-17T01:38:25.916Z
Connect ETIMEDOUT when attempting to connect to database hosted on oracle vm
1,460
null
[ "flutter" ]
[ { "code": "flutter pub run realm generateimport 'package:realm/realm.dart';\n\npart 'car.g.dart';\n\n@RealmModel()\nclass _Car {\n late String make;\n\n late String model;\n\n int? kilometers = 500;\n}\n", "text": "Hi, I’m trying to generate a new RealmModel.I put in a simple model and when I run ‘flutter pub run realm generate’ I get the following error:[SEVERE] Unhandled build failure!\nInvalid argument(s): name shouldn’t end with a ‘.’\n[SEVERE] Failed after 13.6s\npub finished with exit code 1Ran on: M1 Macbook | Flutter 3.7.3 | Dart 2.19.2 | realm ^1.0.1\nWhat I’ve done:Repro StepsCode Snippet\n/lib/models/car.dart", "username": "Alvin_Chan" }, { "code": "flutter pub run realm generate --cleanflutter pub deps", "text": "Hi,\nwe could not reproduce the problem. For example adding this model in this sample we are able to generate the Realm models correctly realm-dart-samples/provider_shopper at main · realm/realm-dart-samples · GitHubAre you adding this model to a a brand new flutter project or an existing one? In such case it might help if you do run flutter pub run realm generate --clean command first.\nIf this is a brand new project then could you run flutter pub deps and send the output here so we can see what dependencies this project has?cheers", "username": "Lyubomir_Blagoev" }, { "code": "", "text": "Hi,Thanks for the quick reply. I started eliminating directories and individual files from the project until I found the offending file. It turns out one of the files had two periods in the file name (‘permission_handler…dart’). I never noticed this since the Flutter project still compiles. After taking out one period the RealmModel generated successfully.I suggest that the Realm generator should account for this.Regards,Alvin", "username": "Alvin_Chan" }, { "code": "", "text": "Cristiano Ronaldo`s inspirational story", "username": "Zaid_Khan" } ]
Can't generate RealmModel
2023-02-17T01:48:33.889Z
Can&rsquo;t generate RealmModel
1,451
null
[ "crud", "atlas-cluster", "golang" ]
[ { "code": "d,_ := mongo.Connect(context.TODO(), options.Client().ApplyURI(\"mongodb+srv://user:[email protected]/?retryWrites=true&w=majority\"))\n\nd.Database(\"database\").Collection(\"collection\").UpdateOne(context.TODO(), bson.M{\"user\":r.FromValue(\"username\")},bson.D{{\"$set\",bson.D{{\"pass\",r.FromValue(\"password \")},{\"field_not_repeated\",??????}}}})\n", "text": "Hi,\nI am trying to create a field that is not repeated in any other document, such as _id, but I searched a lot and did not find anything, can you help me?", "username": "mmahdi" }, { "code": "_id", "text": "Hi @mmahdi. Welcome to the community.You need a unique index on that field, just like _id, which is automatically assigned a unique index during collection creation.", "username": "Mahi_Satyanarayana" }, { "code": "Reason: [08:52:07.642] Error running insert command for 'braq.users' on process 'atlas-bqu6iy-shard-00-01.um0c2p7.mongodb.net:27017' : [08:52:07.642] Error executing WithClientFor() for cp=atlas-bqu6iy-shard-00-01.um0c2p7.mongodb.net:27017 (local=true) connectMode=SingleConnect : [08:52:07.642] Error running command for runCommandWithTimeout(dbName=braq, cmd=[{insert users} {documents [[{_id ObjectID(\"63f0919e87c53bfc5d5e8293\")} {o p}]]} {writeConcern map[w:1]}]) : result=\"\" identityUsed=mms-automation@admin[[MONGODB-CR/SCRAM-SHA-1]][24] : write exception: write errors: [E11000 duplicate key error collection: braq.users index: otpemurl_1 dup key: { otpurl: null }]\ncrypto/randcrypto/randReason: [08:57:12.827] Error running insert command for 'braq.users' on process 'atlas-bqu6iy-shard-00-01.um0c2p7.mongodb.net:27017' : [08:57:12.827] Error executing WithClientFor() for cp=atlas-bqu6iy-shard-00-01.um0c2p7.mongodb.net:27017 (local=true) connectMode=SingleConnect : [08:57:12.827] Error running command for runCommandWithTimeout(dbName=braq, cmd=[{insert users} {documents [[{_id ObjectID(\"63f092e087c53bfc5d5e8294\")} {otpemurl 1}]]} {writeConcern map[w:1]}]) : result=\"\" identityUsed=mms-automation@admin[[MONGODB-CR/SCRAM-SHA-1]][24] : write exception: write errors: [E11000 duplicate key error collection: braq.users index: otpemurl_1 dup key: { otpemurl: \"1\" }]\n", "text": "Thanks it works, but there are some problems:\n1- When I try to create a document without adding that field, an error appears:I want the unique field mode to be optional.\n2- I am using crypto/rand a random string, if crypto/rand generates a similar string it will show an error:How can I solve these problems?", "username": "mmahdi" } ]
Create a unique field in mongodb
2023-02-18T06:25:56.941Z
Create a unique field in mongodb
1,115
null
[ "atlas-device-sync", "react-native" ]
[ { "code": "", "text": "Hi, I perfectly followed the instructions that the documentation reported, but every time I start my app it gives me the Missing Realm Constructor error and I don’t understand how to fix it, can you help me thanks. I use all the latest versions of React Native, Realm and Expo", "username": "Samuele_Cervietti" }, { "code": ".sopod installRealm", "text": "@Samuele_Cervietti This typically happens when the Realm .so is not linked into the compiled React Native project. Verify that pod install has indeed included Realm and do a rebuild of the project.Also I see you are using Expo. If you are attempting to use the web browser, then this error will also come up, as we are only compatible with Android and iOS.Can you provide a link to the documentation you are following? I can verify if something is missing.", "username": "Andrew_Meyer" }, { "code": "", "text": "@Andrew_Meyer sorry but at the moment i am not home for work, as soon as i come back i will let you know, just one thing but i need to link my react native project with realm? It seemed to me that after version 0.65 it was no longer necessary", "username": "Samuele_Cervietti" }, { "code": "", "text": "You won’t have to manually link anything. Did you follow our Expo documentation?This is based off of a template we created to help get started using Realm in React Native. Might be a good starting point to figuring out what is happening in your project.", "username": "Andrew_Meyer" }, { "code": "", "text": "I had already created the project for some time, I just wanted to integrate Realm, can you advise me to create a new project and reinstall everything?", "username": "Samuele_Cervietti" }, { "code": "", "text": "It should work without creating a new project. But if you are using expo, you will need to use the expo-dev-client. This allows you to use 3rd party libraries in an Expo project. Here’s the getting started documentation. Once you have that setup, you should be able to use Realm.", "username": "Andrew_Meyer" }, { "code": "", "text": "Hi, I had already tried to use Expo-dev-client but without result, because when I launch the app through “expo start --dev-client” it doesn’t load the app when I try to open it with the expo app on play store", "username": "Samuele_Cervietti" }, { "code": "", "text": "Expo-dev-client is not compatible with Expo Go. You will have to build a dev client and install it on your device. Have you tried use the a simulator or emulator?", "username": "Andrew_Meyer" }, { "code": "", "text": "I haven’t tried any emulator yet, now I try, in case how do I create a development client? Excuse the many questions but it is the first time that such a thing has happened to me", "username": "Samuele_Cervietti" }, { "code": "", "text": "@Andrew_Meyer i tried with the emulator but it keeps giving me the same error ie that the realm constructor is missing. I followed the whole procedure but it didn’t lead to anything, what can I do?", "username": "Samuele_Cervietti" }, { "code": "", "text": "It’s hard to say without seeing the exact instructions you are using to install and run your project. I recommend trying out our expo-template and comparing the project contents to your project once you get that up and running. Maybe you will be able to find the issue through comparing a working realm project to yours.", "username": "Andrew_Meyer" }, { "code": "", "text": "Okay, I saw that there is also the javascript version, is it okay if I try this? realm-js/templates/expo-template-js at master · realm/realm-js · GitHub", "username": "Samuele_Cervietti" }, { "code": "", "text": "I tried to install the Expo templates with realm, but both the typescript and JavaScript templates are not downloaded, when I run the script the download starts and stops almost immediately, creating just the folder, you know how I can", "username": "Samuele_Cervietti" }, { "code": "", "text": "Did you follow the instructions in the README?", "username": "Andrew_Meyer" }, { "code": "", "text": "yes i read the documentation on README but it keeps giving me that problem", "username": "Samuele_Cervietti" }, { "code": "", "text": "What exact command do you type to produce this error?", "username": "Andrew_Meyer" }, { "code": "", "text": "I tried to install both the typescript and JavaScript template, the commands I used, are these:Typescript:\nnpx create-react-native-app MyAwesomeRealmApp -t with-realmJavaScript:\nexpo init MyAwesomeRealmApp --template @realm/expo-template-js", "username": "Samuele_Cervietti" }, { "code": "", "text": "And this fails to download the template? Do other templates (non-realm) work?", "username": "Andrew_Meyer" }, { "code": "", "text": "Executing the above commands does not download the realm model and gives me the error already mentioned, the other models work, also because I only use the default Expo templates (blank). And then I install the packages I need, I had done this for realm too before I knew it was giving an error", "username": "Samuele_Cervietti" }, { "code": "expo-cliexpo", "text": "@Samuele_Cervietti I just published new templates for expo. However I have found a bug on the expo-cli. I’m going to reach out to expo and get that fixed. I’ll post here when it’s working again.", "username": "Andrew_Meyer" } ]
Missin Realm Constructor error
2022-05-29T18:27:01.854Z
Missin Realm Constructor error
11,197
null
[]
[ { "code": "", "text": "I have scenario where one of secondary is unable to sync from primary\nnow suppose we have increment on below document\n{_id:1 salary:500 }the secondary still have {_id:1 salary:50 }on which below inc applied\ndb.col.update({_id:1},{$inc:{salary:5}})Query:\nWill secondry wait for previous update or it will update to 55?as in oplog.rs I am seeing below datao: {\n‘$v’: 2,\ndiff: { u: { salary: { ‘$inc’: 5 } } }\n},", "username": "VIKASH_RANJAN1" }, { "code": "", "text": "Not a mongo employee. Following is my own understanding.Generally data replication is implemented as “total order broadcast”, so the events will be applied on secondary servers one by one and in exact the same order as sent. there shouldn’t be any “gap”. (otherwise the data is inconsistent ).", "username": "Kobe_W" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Stale data in secondry node $inc operation
2023-02-17T15:17:27.474Z
Stale data in secondry node $inc operation
369
null
[]
[ { "code": "mongod.service - MongoDB Database Server\n Loaded: loaded (/lib/systemd/system/mongod.service; disabled; vendor preset: enabled)\n Active: failed (Result: exit-code) since Sun 2022-03-27 15:18:13 CEST; 19s ago\n Docs: https://docs.mongodb.org/manual\n Process: 54971 ExecStart=/usr/bin/mongod --config /etc/mongod.conf (code=exited, status=14)\n Main PID: 54971 (code=exited, status=14)\n", "text": "Hello,I installed mongodb according to https://www.mongodb.com/docs/manual/tutorial/install-mongodb-on-ubuntu/ on ubuntu 20.04. I adhered the guide and did not change any configuration files or anything else. Now, when I run “sudo systemctl start mongod” the terminal just executes the command but no output is produced. Running “sudo systemctl status mongod” gives:Can somebody tell me what I have done wrong? Many thanks!", "username": "peter_parker" }, { "code": "cd /var/log", "text": "@peter_parker We may not be able to tell you what’s wrong but the system logs should be able to tell you!cd /var/log and check the syslog and any mongo* logs.", "username": "Jack_Woehr" }, { "code": "", "text": "Thanks for your answer, it turns out that I had to change the write permission to the /var/lib/mongodb folder", "username": "peter_parker" }, { "code": "", "text": "Excellent, have fun with MongoDB1", "username": "Jack_Woehr" }, { "code": "", "text": "┌──(root💀kali)-[/home/stark]\n└─# systemctl start mongod.service 3 ⨯┌──(root💀kali)-[/home/stark]\n└─# systemctl status mongod.service\n× mongod.service - MongoDB Database Server\nLoaded: loaded (/lib/systemd/system/mongod.service; enabled; vendor preset: disabled)\nActive: failed (Result: signal) since Mon 2022-05-02 15:57:15 IST; 1s ago\nDocs: https://docs.mongodb.org/manual\nProcess: 2351 ExecStart=/usr/bin/mongod --config /etc/mongod.conf (code=killed, signal=ILL)\nMain PID: 2351 (code=killed, signal=ILL)\nCPU: 17msMay 02 15:57:15 kali systemd[1]: Started MongoDB Database Server.\nMay 02 15:57:15 kali systemd[1]: mongod.service: Main process exited, code=killed, status=4/ILL\nMay 02 15:57:15 kali systemd[1]: mongod.service: Failed with result ‘signal’.", "username": "stark_jatt" }, { "code": "", "text": "please help me this error show my laptop when i put these command\ni used hp8440p laptopi5 i have linux os debian\nand i have win10 in my laptop here is work mongo but\nnot working in my linux os thro me this error please resolve this", "username": "stark_jatt" }, { "code": "", "text": "It terminated with SIGILL it means your CPU is INcompatible with the version of mongod you are trying to use.Search the forum with signal=ILL for potential solution.Thanks @Ramachandra_Tummala for the correction.", "username": "steevej" }, { "code": "", "text": "steevej i think you meant incompatible", "username": "Ramachandra_Tummala" }, { "code": "", "text": "I don’t understand what want to say\nPlease answer me in docs type", "username": "stark_jatt" }, { "code": "", "text": "The CPU in your laptop is probably an i5-520M.\nThat CPU does not support the AVX instruction.\nTherefore, it will not run the latest versions of MongoDB.", "username": "Jack_Woehr" }, { "code": "", "text": "New here so be kind pls.Getting this error when trying to install Mongo 5 on Ubuntu release 20.04 LTS 64.\nmongod.service - MongoDB Database Server\nLoaded: loaded (/lib/systemd/system/mongod.service; enabled; vendor preset: enabled)\nActive: failed (Result: core-dump) since Fri 2023-02-17 22:23:04 EST; 31s ago\nDocs: https://docs.mongodb.org/manual\nProcess: 7221 ExecStart=/usr/bin/mongod --config /etc/mongod.conf (code=dumped, signal=ILL)\nMain PID: 7221 (code=dumped, signal=ILL)Tried to uninstall and reinstall.Output from log:\nkernel: [ 3782.753092] traps: mongod[7221] trap invalid opcode ip:555b528a19da sp:7ffca788f9b0 error:0 in mongod[555b4e821000+51e5000]\nFeb 17 22:23:04 MobileMoe13 systemd[1]: mongod.service: Main process exited, code=dumped, status=4/ILL\nFeb 17 22:23:04 MobileMoe13 systemd[1]: mongod.service: Failed with result ‘core-dump’.\nFeb 17 22:25:57 MobileMoe13 PackageKit: daemon quitThanks in advanced.", "username": "Jim_Corrigan" }, { "code": "", "text": "ILL means illegal instruction\nI think your cpu architecture not supporting the mongodb you are installing\nCheck this link", "username": "Ramachandra_Tummala" } ]
Sudo systemctl start mongod not working
2022-03-27T16:47:19.338Z
Sudo systemctl start mongod not working
13,776
null
[ "server" ]
[ { "code": "[email protected]", "text": "Hi, played around with this for a few hours but I couldn’t fiture it out. I used the line below to start mongodb but I get an error.brew services start [email protected]: Calling plist_options is deprecated! Use service.require_root instead.\nPlease report this issue to the mongodb/brew tap (not Homebrew/brew or Homebrew/homebrew-core), or even better, submit a PR to fix it:\n/home/linuxbrew/.linuxbrew/Homebrew/Library/Taps/mongodb/homebrew-brew/Formula/[email protected]:50Error: Formula [email protected] has not implemented #plist, #service or installed a locatable service file", "username": "Dennis_Hurber" }, { "code": "", "text": "There is an open issue\nhttps://jira.mongodb.org/browse/SERVER-74134", "username": "Ramachandra_Tummala" } ]
Having trouble starting Mongodb
2023-02-17T16:55:25.122Z
Having trouble starting Mongodb
1,525
null
[ "react-native", "flutter" ]
[ { "code": "", "text": "Hey all, me again. I’ve been asking a lot of questions about MongoDB—specifically Realm with React—in the community forums lately. When I first started working with Realm, I tried to learn Flutter using the Realm Flutter SDK. When I was first getting started, it was a release candidate. A few days ago, it went GA!I put a pause on my Flutter project, but learning Realm has been an overall positive experience, and it’s been really helpful for bringing my React Native product to market. The team has also been helpful in the community forums as I’ve been bashing my head against the wall figuring out a few problems. (Thanks @Kyle_Rollins)So no questions or complaining in this post, just a congrats to the team—and I look forward to using the Realm Flutter SDK for my next project. If this violates some community guidelines, oops—sorry.", "username": "Alexander_Ye" }, { "code": "", "text": "Nice! I’m glad you’ve had a positive experience with Realm. It’s very cool tech. I wish I had had more time to help you out along the way, but lurking and helping in the forums is a bit of a luxury for me: my job responsibilities keep me pretty busy!Please continue to post when you get blocked. It’s a good signal for the docs team on where the docs could use some help and it can help other folks who are learning Realm.Good luck with the Flutter SDK!", "username": "Kyle_Rollins" } ]
[Realm Flutter] Congrats MongoDB Team on Release
2023-02-17T20:46:08.978Z
[Realm Flutter] Congrats MongoDB Team on Release
978
null
[]
[ { "code": "\nThread 1 received signal SIGSEGV, Segmentation fault.\nAddress not mapped to object.\nabsl::lts_20210324::container_internal::raw_hash_set<absl::lts_20210324::container_internal::NodeHashMapPolicy<mongo::ResourceId, mongo::PartitionedLockHead*>, absl::lts_20210324::hash_internal::Hash<mongo::ResourceId>, std::__1::equal_to<mongo::ResourceId>, std::__1::allocator<std::__1::pair<mongo::ResourceId const, mongo::PartitionedLockHead*> > >::find<mongo::ResourceId> (this=0x47dd6c90, key=..., hash=<optimized out>) at src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h:1372\n1372 src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h: No such file or directory.\n(gdb) bt\n#0 absl::lts_20210324::container_internal::raw_hash_set<absl::lts_20210324::container_internal::NodeHashMapPolicy<mongo::ResourceId, mongo::PartitionedLockHead*>, absl::lts_20210324::hash_internal::Hash<mongo::ResourceId>, std::__1::equal_to<mongo::ResourceId>, std::__1::allocator<std::__1::pair<mongo::ResourceId const, mongo::PartitionedLockHead*> > >::find<mongo::ResourceId> (\n this=0x47dd6c90, key=..., hash=<optimized out>)\n at src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h:1372\n#1 absl::lts_20210324::container_internal::raw_hash_set<absl::lts_20210324::container_internal::NodeHashMapPolicy<mongo::ResourceId, mongo::PartitionedLockHead*>, absl::lts_20210324::hash_internal::Hash<mongo::ResourceId>, std::__1::equal_to<mongo::ResourceId>, std::__1::allocator<std::__1::pair<mongo::ResourceId const, mongo::PartitionedLockHead*> > >::find<mongo::ResourceId> (\n this=0x47dd6c90, key=...)\n at src/third_party/abseil-cpp-master/abseil-cpp/absl/container/internal/raw_hash_set.h:1386\n#2 mongo::LockHead::migratePartitionedLockHeads (\n this=this@entry=0x4865b300)\n at src/mongo/db/concurrency/lock_manager.cpp:390\n#3 0x0000000004699794 in mongo::LockManager::lock (\n this=0x48722c60, resId=..., request=0x481126f0, \n mode=<optimized out>)\n at src/mongo/db/concurrency/lock_manager.cpp:527\n#4 0x00000000046a0140 in mongo::LockerImpl::_lockBegin (\n this=0x48016d00, opCtx=0x4864cc00, resId=..., \n mode=1219626888)\n at src/mongo/db/concurrency/lock_state.cpp:910\n#5 0x00000000046a22cc in mongo::LockerImpl::lock (\n--Type <RET> for more, q to quit, c to continue without paging-- this=0x48016d00, opCtx=0x4864cc00, resId=..., \n mode=mongo::MODE_X, deadline=...)\n at src/mongo/db/concurrency/lock_state.cpp:546\n#6 0x00000000046978a8 in mongo::Lock::DBLock::DBLock (\n this=0xffffffffe860, opCtx=0x4864cc00, db=..., \n mode=<optimized out>, deadline=..., \n skipGlobalAndRSTLLocks=false)\n at src/mongo/db/concurrency/d_concurrency.cpp:226\n#7 0x0000000003fe8964 in mongo::AutoGetDb::AutoGetDb (\n this=0xffffffffe848, opCtx=0x48b20788, dbName=..., \n mode=mongo::MODE_X, deadline=..., secondaryDbNames=...)\n at src/mongo/db/catalog_raii.cpp:171\n#8 0x0000000002c0d204 in mongo::(anonymous namespace)::logStartup (opCtx=0x4864cc00) at src/mongo/db/mongod_main.cpp:277\n#9 mongo::(anonymous namespace)::_initAndListen (\n serviceContext=<optimized out>, listenPort=<optimized out>)\n at src/mongo/db/mongod_main.cpp:677\n#10 0x0000000002c0b3fc in mongo::(anonymous namespace)::initAndListen (service=0x0, listenPort=<optimized out>)\n at src/mongo/db/mongod_main.cpp:850\n#11 0x0000000002c06270 in mongo::mongod_main (argc=3, \n argv=<optimized out>) at src/mongo/db/mongod_main.cpp:1548\n#12 0x0000000002c05bc4 in main (argc=0, argv=0x48b20788)\n at src/mongo/db/mongod.cpp:47\n(gdb)\n", "text": "Hi, I’m maintaining some MongoDB ports for FreeBSD.\nMongoDB 6.0 compiles fine, but after startup it crashes on a segmentation fault.\nThe information is collected in https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=267668I’ll quote the backtrace of 6.0.2 below. On 6.0.3 I get the same error.\nI build mongodb statically (without system libs) also, but same issue.Any help would be appreciated. Can I provide more information? Or help reproducing.", "username": "R_K" }, { "code": "", "text": "Hi Ronald, I’m using your mongodb60 port (6.0.2). Most likely you’re aware of this already, but just in case:I was getting the segmentation faults on startup on a fresh FreeBSD 13.1 machine (VMware ESXi VM) with 1GB of RAM. After upgrading the VM to 2GB the mongod daemon starts without issues.", "username": "Javier_Lavandeira" }, { "code": "", "text": "Hi, thanks for your remark. How is it going so far? My experience is that the first init of the DB fails. Sometimes a restart succeeds but mostly it does not. So to me it looks a bit like a race condition. What is your experience? Did it stay stable?", "username": "R_K" }, { "code": "", "text": "This issue is solved in https://jira.mongodb.org/browse/SERVER-71608 and the fix is applied to the current port in FreeBSD.", "username": "R_K" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
SIGSEGV in PartitionedLockHead on MongoDB 6.0 FreeBSD
2022-11-21T20:19:49.621Z
SIGSEGV in PartitionedLockHead on MongoDB 6.0 FreeBSD
2,119
null
[ "replication", "sharding" ]
[ { "code": "shard1shard2shard3rs1rs2rs3activityitemsactivity.itemsshard{1,2,3}appusersapp.usersshard1apps.usersactivity.itemsapp.usersshard1activity.itemsshard1shard4", "text": "I have a sharded collection with 3 shards (shard1, shard2, shard3). Each shard is a 3-node replicaset (rs1, rs2, rs3).I have a db called activity that has a large sharded collection called items. ie( activity.items). The data in this collection is split across shard{1,2,3}.I have another db called app and collection called users (ie, app.users). This is not a sharded collection. It is housed on shard1.I want to separate the replicasets that the apps.users and activity.items reside on. Ideally, app.users remains on shard1 and I can move activity.items from shard1 to a newly created shard4.Is this possible? Any high level guidance on which commands to be looking at example docs would be greatly appreciated.", "username": "AmitG" }, { "code": "", "text": "I’m open to other solutions as well. For example, maybe there is a way to tag the shards or replicasets in such a way that there is no affinity for data to be migrated there?", "username": "AmitG" }, { "code": "activity.items", "text": "You define a zone which shard 2/3/4 belongs to, and then set up zone key range for activity.items.Then the data on none-zone shards (e.g. shard1) will be gradually moved to other shards by balancer.We are now using this zone support to isolate our services traffic.", "username": "Kobe_W" }, { "code": "", "text": "Thank you! And do you just define minkey/maxkey as the range for the zone?", "username": "AmitG" }, { "code": "", "text": "Yes, whole collection data should be in that zone, so full key range.", "username": "Kobe_W" }, { "code": "", "text": "Thank you, do you have an example of setting a full key range? Ideally for a compound shard key.", "username": "AmitG" } ]
How to configure which shards a sharded collection's data can reside?
2023-02-16T20:55:27.023Z
How to configure which shards a sharded collection&rsquo;s data can reside?
693
null
[ "change-streams", "spring-data-odm" ]
[ { "code": "public Flux<Example> watch() {\n final ChangeStreamOptions changeStreamOptions = ChangeStreamOptions.builder().returnFullDocumentOnUpdate().build();\n return reactiveMongoTemplate.changeStream(\"collection\", changeStreamOptions, Example.class)\n .filter(e -> e.getOperationType() != null)\n .mapNotNull(ChangeStreamEvent::getBody);\n}\n", "text": "How to stop a mongodb changestream temporarily and resume it again using ReactiveMongoTemplate ?I’m trying to create a rest endpoint that should be able to stop the changestream for sometime while we do some database maintenance and then invoke the endpoint again to resume the stream from where it left off using resumeAt(timestamp)", "username": "Darshan_Bangre" }, { "code": "Disposable subscription = service.watch()\n .subscribe(exampleService::doSomething)\n\n// cancel the subscription \nsubscription.dispose();\n", "text": "Change stream can be unsubscribed/stopped by disposing the subscription", "username": "Darshan_Bangre" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Unwatch/re-watch change stream without restarting the application
2023-02-15T21:43:01.161Z
Unwatch/re-watch change stream without restarting the application
1,088
null
[ "aggregation", "atlas-functions" ]
[ { "code": "_id: ObjectId\nlocations: Array\n location: Object (embedded)\n type: \"Point\"\n coordinates: Array\n [ longitude, latitude ]\n dates: Array\n start_date: Date\n note: String\n$geoNear aggregate()$geoNearstart_datestart_datequery$geoNear$match$geoNearquery$expr$gte$cmpcontext.functions.execute(\"location_date\", -80.130, 26.393, 10000, new Date(\"2023-02-01\"), new Date(\"2023-02-28\"))exports = async function(longitude, latitude, searchRadius, startDate) {\n var serviceName = \"mongodb-atlas\";\n // Update these to reflect your db/collection\n var dbName = \"<my_database>\"; // example, actual database name differs\n var collName = \"<my_collection>\"; // example, actual collection name differs\n\n // Get a collection from the context\n var collection = context.services.get(serviceName).db(dbName).collection(collName);\n\n // used to process results from the query\n var findResult;\n \n // set a date to the beginning of the year from the date sent in\n var yearStartDate = new Date(startDate)\n yearStartDate.setMonth(0);\n yearStartDate.setDate(1); \n yearStartDate.setHours(0);\n yearStartDate.setMinutes(0);\n yearStartDate.setSeconds(0);\n \n // \n // starting query here\n try { \n findResult = await collection.aggregate([\n { // begin pipeline for geoNear, always comes first for any aggregate pipeline\n \"$geoNear\": {\n \"near\": {\n \"type\": \"Point\", \n \"coordinates\": [ longitude, latitude ]\n }, // end near\n \"includeLocs\": \"locations.location\",\n \"distanceField\": \"locations.distance\",\n \"maxDistance\": searchRadius,\n \"spherical\": true\n } // end $geoNear\n }, // end 1st pipeline\n { // THIS DOESN'T SEEM EVALUATE THE DATE COMPARISONS \n \"$match\": {\n \"$expr\": {\n \"$gte\": [ \"locations.stella_event_dates.end_date\", startDate ]\n }\n }\n }, // end 2nd pipeline\n { // define fields we're excluding from being in the findResult\n \"$project\": {\n \"event_name\": 1,\n \"locations\": 1,\n } // end $project\n }, // end 2nd pipeline\n { // for now, helping with the testing, by confirming date passed as arg is valid\n \"$addFields\": {\n \"startDate\": startDate.toDateString()\n }\n } // end 3rd pipeline\n ]\n ); // end .toArray().then()\n } catch(err) {\n console.log(\"Error occurred while executing find:\", err.message);\n return { error: err.message };\n }\n // To call other named functions:\n // var result = context.functions.execute(\"function_name\", arg1, arg2);\n return { result: findResult };\n};\n", "text": "I have collection, as described below…in my collection:I have successfully gotten the $geoNear part of the aggregate() working well now, but am confused about how to do date comparisons to further filter down the results.My function arguments are longitude, latitude, max distance, and a date to be used. What I am wanting to do in the aggregate is simple,$geoNear, which is working fine for meI don’t seem to be able to do date comparisons, no matter where or what methods I’ve used. This includes:using the query field in the $geoNear aggregate stage (always the first in the pipeline)using $match as the next stage in the pipeline (after the $geoNear without a query field setI’ve tried using $expr with $gte as this is recommended in what I’ve read. I’ve also tried $cmp with no success.Here is the function and what I’m calling the function with, using the Atlas App Services Function Editor and Console:From Atlas App Services Function Editor Console, I am testing with:context.functions.execute(\"location_date\", -80.130, 26.393, 10000, new Date(\"2023-02-01\"), new Date(\"2023-02-28\"))What is the best way to compare dates in this function, (what am I doing wrong?)Thanks", "username": "Josh_Whitehouse" }, { "code": "", "text": "Managed to get this working using the $geoNear “query” field. Completely flummoxed as to why it wasn’t working for me the first try, likely a typo on my part, this is now a closed issue.", "username": "Josh_Whitehouse" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Date comparison in an Atlas Function using collection.aggregate() and $geoNear
2023-02-14T14:53:19.932Z
Date comparison in an Atlas Function using collection.aggregate() and $geoNear
1,366
null
[ "connecting", "atlas-cluster" ]
[ { "code": "Error: querySrv ETIMEOUT _mongodb._tcp.test.y0gmmey.mongodb.net\n at QueryReqWrap.onresolve [as oncomplete] (node:internal/dns/promises:251:17) {\n errno: undefined,\n code: 'ETIMEOUT',\n syscall: 'querySrv',\n hostname: '_mongodb._tcp.test.y0gmmey.mongodb.net'\n}\n", "text": "Sometimes it works just fine & most of the time it’s unable to connect for this timeout error.\nI am using WSL2 Ubuntu 20 with Windows 11 (latest).My IPadress active in the network access (MongoCloud)", "username": "IAmTahazzot" }, { "code": "", "text": "To solve this DNS error you might want to try to use faster DNS servers such Google’s 8.8.8.8 and 8.8.4.4", "username": "steevej" }, { "code": "MongoServerSelectionError: connection <monitor> to 52.74.65.240:27017 closed\n at Timeout._onTimeout (/home/tahazzot/code_playground/express/node_modules/mongodb/lib/sdam/topology.js:284:38)\n at listOnTimeout (node:internal/timers:569:17)\n at process.processTimers (node:internal/timers:512:7) {\n reason: TopologyDescription {\n type: 'ReplicaSetNoPrimary',\n servers: Map(3) {\n 'ac-m7ha51d-shard-00-00.y0gmmey.mongodb.net:27017' => [ServerDescription],\n 'ac-m7ha51d-shard-00-01.y0gmmey.mongodb.net:27017' => [ServerDescription],\n 'ac-m7ha51d-shard-00-02.y0gmmey.mongodb.net:27017' => [ServerDescription]\n },\n stale: false,\n compatible: true,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n setName: 'atlas-110lxu-shard-0',\n maxElectionId: null,\n maxSetVersion: null,\n commonWireVersion: 0,\n logicalSessionTimeoutMinutes: null\n },\n code: undefined,\n [Symbol(errorLabels)]: Set(0) {}\n}```", "text": " [Edited] (Avoid this reply, nothing important here)This error raise because of I didn’t property setup my IP address at Mongodb Cloud Network access.New error after setting DNS to 8.8.8.8 & …", "username": "IAmTahazzot" }, { "code": "", "text": "If you are using mongoose try upgrading or downgrading the version you are using.Otherwise try upgrading the mongodb driver.Go to your Atlas cluster and make sure you have a primary. It is very unlikely that you do not have one so the error ReplicaSetNoPrimary is most likely another error that is wrongly misinterpreted. A different driver version might give a better error message or even not err at all.", "username": "steevej" }, { "code": "", "text": " [SOLVED]Shift to Google DNS 8.8.8.8 and 8.8.4.4 Works.(Off topic but causing a new issue, just to remind.)\nThanks @steevej", "username": "IAmTahazzot" } ]
Why MongoDB connection gives ETIMEOUT error with ExpressJS?
2023-02-17T13:25:08.461Z
Why MongoDB connection gives ETIMEOUT error with ExpressJS?
1,310
null
[ "aggregation", "time-series" ]
[ { "code": "// Get the latest event before a given date\ndb.devicemetrics.aggregate([\n {\n $match: {\n 'device.someMetadata': '70b28808-da2b-4623-ad83-6cba3b20b774',\n time: {\n $lt: ISODate('2023-01-18T07:00:00.000Z'),\n },\n someValue: { $ne: null },\n },\n },\n { $sort: { time: -1 } },\n {\n $group: {\n _id: '$device._id',\n lastEvent: { $first: '$$ROOT' },\n },\n },\n {\n $replaceRoot: { newRoot: '$lastEvent' },\n }\n]);\n\n", "text": "Hello. I have timeseries data as events coming in at random times. They are not ongoing metrics, but rather events. “This device went online.” “This device went offline.”I need to report on the number of actual transitions within a time range. Because there are occasionally same-state events, for example two “went online” events in a row, I need to “seed” the data with the state previous to the time range. If I have one event in my time range, I need to compare it to the previous in order to determine if something actually changed.I already have aggregation stages that remove same-state events.Is there a way to add “the latest, previous event” to the data in the pipeline without writing two queries? A $facet stage totally ruins performance.For “previous”, I’m currently trying something like this in a separate query, but it’s very slow on the millions of records:", "username": "Heath_Volmer" }, { "code": "", "text": "You could try a $lookup after the $group to locate the previous event. The $lookup will be using a $limit 1.It is technically a separate query but it is done within the same database access and using the same index.", "username": "steevej" }, { "code": "", "text": "Thanks! That is actually what I wound up doing yesterday! It is very slow on a large set of events.What I’m going to do is modify how the incoming events are stored, looking up previous when a new event comes in and ignore repeated non-changing states. Then I no longer need to get the previous event to determine when something actually changed at the start of my range and things are substantially faster.", "username": "Heath_Volmer" } ]
Aggregation to get events in timespan, plus the previous event
2023-02-16T16:31:39.318Z
Aggregation to get events in timespan, plus the previous event
855
null
[ "compass", "flutter", "flexible-sync" ]
[ { "code": "__realm_sync_62e01a823bc9c11cd8ae1edf", "text": "Hello there,I got a problem with my realm flexible sync.\nIt’s a bit weird, but I will try to describe my problem.So I use the Flutter realm sdk (version 1.0.0), and in my different devices everything seems to work correctly. Read/write is ok, everything is synced between the different devices so no problem here.But when I connect to atlas, I see no updates inside my online database.\nReally weird since the sync across devices work well.\nWhen using Compass, I saw another database, called __realm_sync_62e01a823bc9c11cd8ae1edf.\nThis one isn’t visible on atlas.\nInside I saw all the updates, so I said to myself, everything work well, the data is not lost so it’s ok !And then I tried Atlas Function, tried to connect to my database, and got no data since it’s not synced to the correct database.Do you have any idea about this problem ?Thanks a lot.", "username": "Geoffrey_SEBASTIANELLI" }, { "code": "", "text": "Hi @Geoffrey_SEBASTIANELLI, thanks for posting!For some background, Device Sync uses a component known as the “translator” for propagating changes from your devices back to Atlas and vice versa. If the translator encounters an issue, devices will still be able to sync with each other, but the data will not be persisted back to Atlas which is the behavior you’re currently experiencing.Taking a look at the logs for your app, it appears that the translator is running into an error related to history trimming which is preventing it from propagating changes. As far as I can tell, it’s been in this position for at least the past two weeks.Do you know when this behavior started? Or do you recall a period of time when you paused sync on this app? That might help narrow down how your app got into this state.In the meantime, if this is a development/test app the easiest way to recover from this state is to terminate + re-enable sync. Unfortunately, given that the translator has not been persisting changes during this time, terminating sync will cause all of the device data to be lost so we should figure out a way to backup the device data before doing so if it is sensitive.", "username": "Kiro_Morkos" }, { "code": "", "text": "Hi @Kiro_Morkos, thanks for you help !I got this problem for, I think, the past 2/3 month. But I discovered the problem for the Atlas function today.\nMy app isn’t in production so it’s not a problem to loose some test data.In the meantime, if this is a development/test app the easiest way to recover from this state is to terminate + re-enable sync.But I already did that during the past 2 month when I did some change to my schema.\nInside the ‘translator’, I got a collection named ‘unsynced_documents’. And this collection is currently empty, so imo I didn’t have any problem on my data/schema side. But I may be wrong ^^\nShould I do it again ?And I didn’t mention it, because I don’t know if it related, but I got this error in the logs for some times.\nBut I don’t have any error in the phone’s app. I don’t know if it related (if it’s not ignore this, I will create another ticket for it)\n\nimage1808×789 16.7 KB\n", "username": "Geoffrey_SEBASTIANELLI" }, { "code": "", "text": "Ok after a new terminate + re-enable sync, and some data deleted, everything seems ok.So the problem was surely on my data side. But imo, to not have any notification or log that tell you that you have a problem on your schema or data is a problem for me.\nDo you have any solution to monitor this kind of problem ?Thanks a lot for your help !", "username": "Geoffrey_SEBASTIANELLI" }, { "code": "", "text": "Glad to hear it’s working for you now!I got this error in the logs for some times. But I don’t have any error in the phone’s app.That error is transient, it usually just means that the device has disconnected from sync.So the problem was surely on my data sideThe issue actually was unrelated to your data, or your schema. Some operations require you to terminate sync, such as performing a breaking change which it sounds like you’ve needed to do recently. A side effect of terminating sync is that it wipes all of the metadata associated with the sync app, so in this case it was able to “reset” the translator to get it out of the bad state. We still need to investigate on our side how your translator got into that state, and we have planned some work to prevent similar issues in the future.In the meantime, one thing you can do to prevent the likelihood of this specific issue re-occurring is to increase your “Client max offline time”.", "username": "Kiro_Morkos" }, { "code": "", "text": "Once again, thanks a lot for your help !The issue actually was unrelated to your data, or your schema.Ok, I just re read all your message and the doc and I think I understand better now.\nBut still, why an error occurring on trimming old data from a device will block all device to sync ? Or I may have misunderstood again ^^We still need to investigate on our side how your translator got into that state, and we have planned some work to prevent similar issues in the future.That’s good !", "username": "Geoffrey_SEBASTIANELLI" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Realm does not sync to the correct online database
2023-02-11T17:31:41.614Z
Realm does not sync to the correct online database
1,423
null
[ "replication", "mongodb-shell" ]
[ { "code": "", "text": "We trying to initialize replica set based on snapshot file, all members were on same same datafiles level.I did make sure oplog on three nodes is same level, and did ran:\nrs.initiate(\n{\n_id: “rs.ucld”,\nversion: 1,\nmembers: [\n{ _id: 0, host : “1a01.internal.us:27017” },\n{ _id: 1, host : “1b01.internal.us:27017” },\n{ _id: 2, host : “1c01.internal.us:27017” }\n]\n}\n)Question: Data files were same, oplog record count was identical across three nodes, but when we initialize why a primary is dropping all data on other 2 nodes and trying to sync, we can’t do that because our datafiles size is 600GB, and it keep failing in middle even though we allowed. We trying to prevent a new “Initial Sync” by using snapshot files.Received the logs:\n2023-02-07T21:41:22.128Z I NETWORK [conn18] received client metadata from 172.16.20.29:59406 conn18: { driver: { name: “NetworkInterfaceTL”, version: “4.0.28” }, os: { type: “Linux”, name: “Ubuntu”, architecture: “x86_64”, version: “18.04” } }\n2023-02-07T21:41:22.131Z I REPL [replexec-1] Member 1c01.internal.us:27017 is now in state STARTUP2\n2023-02-07T21:41:22.132Z I ACCESS [conn18] Successfully authenticated as principal __system on local from client 172.16.20.29:59406\n2023-02-07T21:41:22.132Z I REPL [replication-0] Starting initial sync (attempt 1 of 10)\n2023-02-07T21:41:22.133Z I STORAGE [replication-0] Finishing collection drop for local.temp_oplog_buffer (5d742075-16fd-4f64-97f2-0f3e04d51200).\n2023-02-07T21:41:22.136Z I STORAGE [replication-0] createCollection: local.temp_oplog_buffer with generated UUID: 254e5557-4968-41b7-90ed-b27c139d3a90\n2023-02-07T21:41:23.143Z I REPL [replication-1] sync source candidate: 1c01.internal.us:27017\n2023-02-07T21:41:26.423Z I REPL [replication-1] Initial syncer oplog truncation finished in: 3280ms\n2023-02-07T21:41:26.423Z I STORAGE [replication-1] dropAllDatabasesExceptLocal 6", "username": "pruthvi_reddy" }, { "code": "mongodmongod", "text": "Hello @pruthvi_reddy ,I noticed in the logs you shared that you are using MongoDB v4.0. It is out of support since April 2022. The oldest supported series is v4.2, so I would suggest you to upgrade from v4.0 to v4.2. Please upgrade the cluster as a whole, not just the affected nodes, it is also recommended to take backups before executing the procedure.As per the Documentation - Restore a Replica Set from MongoDB BackupsYou cannot restore a single data set to three new mongod instances and then create a replica set. If you copy the data set to each mongod instance and then create the replica set, MongoDB will force the secondaries to perform an initial sync.The procedures in the documentation describe the correct and efficient ways to deploy a restored replica set.\nI would recommend you to go through the documentation and follow the advised steps.Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
replSet initial sync drop all databases
2023-02-07T22:17:12.746Z
replSet initial sync drop all databases
981
null
[ "sharding" ]
[ { "code": "mongosmongosnumOpenConnsmongos{\"t\":{\"$date\":\"2023-02-09T11:14:55.664-05:00\"},\"s\":\"I\", \"c\":\"CONNPOOL\", \"id\":22567, \"ctx\":\"TaskExecutorPool-0\",\"msg\":\"Ending idle connection because the pool meets constraints\",\"attr\":{\"hostAndPort\":\"d-db1:27017\",\"numOpenConns\":8}}\n{\"t\":{\"$date\":\"2023-02-09T11:15:00.082-05:00\"},\"s\":\"I\", \"c\":\"CONNPOOL\", \"id\":22567, \"ctx\":\"TaskExecutorPool-0\",\"msg\":\"Ending idle connection because the pool meets constraints\",\"attr\":{\"hostAndPort\":\"h-db1:27017\",\"numOpenConns\":8}}\n{\"t\":{\"$date\":\"2023-02-09T11:15:00.086-05:00\"},\"s\":\"I\", \"c\":\"CONNPOOL\", \"id\":22567, \"ctx\":\"TaskExecutorPool-0\",\"msg\":\"Ending idle connection because the pool meets constraints\",\"attr\":{\"hostAndPort\":\"h-db1:27017\",\"numOpenConns\":7}}\n{\"t\":{\"$date\":\"2023-02-09T11:15:06.443-05:00\"},\"s\":\"I\", \"c\":\"CONNPOOL\", \"id\":22567, \"ctx\":\"TaskExecutorPool-0\",\"msg\":\"Ending idle connection because the pool meets constraints\",\"attr\":{\"hostAndPort\":\"d-db1:27017\",\"numOpenConns\":7}}\n{\"t\":{\"$date\":\"2023-02-09T11:15:06.463-05:00\"},\"s\":\"I\", \"c\":\"CONNPOOL\", \"id\":22567, \"ctx\":\"TaskExecutorPool-0\",\"msg\":\"Ending idle connection because the pool meets constraints\",\"attr\":{\"hostAndPort\":\"d-db1:27017\",\"numOpenConns\":6}}\n{\"t\":{\"$date\":\"2023-02-09T11:15:07.664-05:00\"},\"s\":\"I\", \"c\":\"CONNPOOL\", \"id\":22567, \"ctx\":\"TaskExecutorPool-0\",\"msg\":\"Ending idle connection because the pool meets constraints\",\"attr\":{\"hostAndPort\":\"d-db1:27017\",\"numOpenConns\":5}}\n", "text": "We have a sharded cluster with 3 shards, and about 15 app servers that run mongos.When I look at the mongos logs, I see a couple messages every second about connections being opened and then closed. The numOpenConns looks like it goes up to about 16… then it seems like the connections start to get ended. Is there any advantage of increasing the minimum pool size to something like 15 so the connections aren’t constantly created and ended? How would I do this in the mongos client?", "username": "AmitG" }, { "code": "--maxConns--listenBacklog-maxConns", "text": "Also, is there an easy way to determine the current values of --maxConns and --listenBacklog?I do not see what the default value for -maxConns is in the documentation: https://www.mongodb.com/docs/manual/reference/program/mongos/", "username": "AmitG" }, { "code": "Ending idle connection because the pool meets constraints--maxConns--listenBacklogmaxConnsdb.adminCommand(\n {\n getCmdLineOpts: 1\n }\n)\nSOMAXCONN", "text": "Hi @AmitG and welcome back to the MongoDbB community forum!!Ending idle connection because the pool meets constraintsAccording to the MongoDB specifications, an idle connection is the one which are present for longer time than the maxIdleTimeMS. Further as the specifications state, the connection being idle make them eligible for closing the connection, so I believe this is part of the connection pool maintenance.\nPlease refer to the documentation on Connection pooling and the setting to be followed for connection pooling for the application for more detailsAlso, is there an easy way to determine the current values of --maxConns and --listenBacklog?For maxConns, since this is a server parameter, you can find the value set for the specific deployment using the below command:while the default value for listenBacklog is SOMAXCONN .\nThe server ticket https://jira.mongodb.org/browse/SERVER-2554 mentions that the value is made configurable since version 3.4.6.Let us know if you have any more questions.Best Regards\nAasawari", "username": "Aasawari" } ]
Mongos connection pool understanding
2023-02-09T16:39:48.582Z
Mongos connection pool understanding
1,643
null
[ "node-js", "data-modeling", "mongoose-odm" ]
[ { "code": "const mongoose = require(\"mongoose\");\nconst passport = require(\"passport\");\nconst passportLocalMongoose = require(\"passport-local-mongoose\");\nconst Schema = mongoose.Schema;\nconst { TaskSchema } = require(\"./task\");\n\nconst Session = new Schema({\n refreshToken: { type: String, default: \"\" }\n});\n\nconst UserSchema = new Schema({\n firstName: { type: String, default: \"\" },\n lastName: { type: String, default: \"\" },\n email: { type: String, default: \"\" },\n authStrategy: { type: String, default: \"local\" },\n refreshToken: { type: [Session] }\n},\n{\n discriminatorKey: \"role\"\n});\n\n// remove refresh token from the response\nUserSchema.set(\"toJSON\", {\n transform: (doc, ret, options) => {\n delete ret.refreshToken;\n return ret;\n }\n});\n\nUserSchema.plugin(passportLocalMongoose);\nconst User = mongoose.model(\"User\", UserSchema);\n\nconst ChildUser = User.discriminator(\"child\", {\n tasks: { type: [TaskSchema] }\n});\nconst ParentUser = User.discriminator(\"parent\", {\n children: { type: [ChildUser.schema] }\n});\n\nChildUser.schema.plugin(passportLocalMongoose);\nParentUser.schema.plugin(passportLocalMongoose);\n\nmodule.exports = { User, ParentUser, ChildUser };\n.plugin(passportLocalMongoose)ParentUserChildUserUserregister", "text": "Hi, everyone.I have this file:I want to know if I can make it so that the effects of the call .plugin(passportLocalMongoose) would also be applied on ParentUser and ChildUser. When I mouse-over on either of them on VS Code right now, it says it’s a regular Model rather than a “Passport Local” Model. User is a “Passport Local” Model, so I can call passport-auth functions like register on it, but that’s not the case for the two “discriminated” ones. I want to be able to call those functions on all three.So is that not possible?", "username": "Osman_Zakir" }, { "code": "UserregisterChildUser.schema.plugin(passportLocalMongoose);\nParentUser.schema.plugin(passportLocalMongoose);\n", "text": "Hi @Osman_Zakir,Welcome to the MongoDB Community forums User is a “Passport Local” Model, so I can call passport-auth functions like register on it, but that’s not the case for the two “discriminated” ones.When creating new models from an existing one using the discriminator method, it’s necessary to apply the plugin to the new models as you have already done and it would presumably work.When I mouse-over over either of them on VS Code right now, it says it’s a regular Model rather than a “Passport Local” ModelI would suggest you call the function and see if you encounter any error messages.However, for more information on plugins, please refer to the Mongoose documentationBest,\nKushagra", "username": "Kushagra_Kesav" } ]
Making Plugin Effects Extend to Model Discriminations?
2023-02-07T18:52:12.621Z
Making Plugin Effects Extend to Model Discriminations?
856
null
[ "connecting", "mongodb-shell", "atlas-cluster" ]
[ { "code": "", "text": "I am learning MongoDB. I successfully installed mongo shell. It was working without any issues.\nToday, I installed database tools and upgraded some of them.\nAfter that, when trying to connect to Atlas via mongosh to practice some commands via the terminal, I continue getting this error.MongoServerSelectionError: Hostname/IP does not match certificate’s altnames: Host: ac-g7js2yq-shard-00-01.4sdhy13.mongodb.net. is not in the cert’s altnames: DNS:*.mongodb.net, DNS:mongodb.netI’ve done a lot of research, but I cannot find a solution.\nThanks!", "username": "Paulina_Segovia" }, { "code": "", "text": "Can you ping the host?\nWhat type of connect string are you using? SRV or a different one\nCan you connect using a different network?", "username": "Ramachandra_Tummala" }, { "code": "openssl s_client -connect ac-g7js2yq-shard-00-01.4sdhy13.mongodb.net:27017\nCONNECTED(00000005)\ndepth=2 C = US, O = DigiCert Inc, OU = www.digicert.com, CN = DigiCert Global Root CA\nverify return:1\ndepth=1 C = US, O = DigiCert Inc, CN = DigiCert TLS RSA SHA256 2020 CA1\nverify return:1\ndepth=0 C = US, ST = New York, L = New York, O = \"MongoDB, Inc.\", CN = *.mongodb.net\nverify return:1\n---\nCertificate chain\n 0 s:/C=US/ST=New York/L=New York/O=MongoDB, Inc./CN=*.mongodb.net\n i:/C=US/O=DigiCert Inc/CN=DigiCert TLS RSA SHA256 2020 CA1\n 1 s:/C=US/O=DigiCert Inc/CN=DigiCert TLS RSA SHA256 2020 CA1\n i:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Global Root CA\n---\nServer certificate\n-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQCFLX3LMX6chcbkMeWb0xIjANBgkqhkiG9w0BAQsFADBP\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMSkwJwYDVQQDEyBE\naWdpQ2VydCBUTFMgUlNBIFNIQTI1NiAyMDIwIENBMTAeFw0yMjA2MTMwMDAwMDBa\nFw0yMzA2MTMyMzU5NTlaMGMxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhOZXcgWW9y\nazERMA8GA1UEBxMITmV3IFlvcmsxFjAUBgNVBAoTDU1vbmdvREIsIEluYy4xFjAU\nBgNVBAMMDSoubW9uZ29kYi5uZXQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDSd/KqVkULvyuNW8nAo/5Vy5BbNZv6P5oQC+tDGBj4gwa5ed9mOA79EC/R\n1/bc7n927sNa3s8P8Pg64oZzgL87IQnMcY7uD1j2I5rdHkBaU1ahrPAi7JtQzzev\nKj2v6WLhy/QsyRbALMTKqjvKogAnNqxAqMFTeX7YSi9XkZfWmTW34gP48/uxb3dn\nVeoDgbkMy3SyjpzZA7IA7HKRR/wx5lt70wshKi1egeAJIq0t6Jq3FJ9XNoG6h4OP\nr6iK/WcfcXdhrVlldplEgH39YwSKaV9ZQZOOJAmuRfpEaxOhRcN/6iLsE5EOkqnb\nu9w7EKfUG/XPYjG7J8A2Rf6wjz7DAgMBAAGjggN8MIIDeDAfBgNVHSMEGDAWgBS3\na6LqqKqEjHnqtNoPmLLFlXa59DAdBgNVHQ4EFgQUSe+vfcpbLT26AyISTvvNzpSv\nf2AwJQYDVR0RBB4wHIINKi5tb25nb2RiLm5ldIILbW9uZ29kYi5uZXQwDgYDVR0P\nAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjCBjwYDVR0f\nBIGHMIGEMECgPqA8hjpodHRwOi8vY3JsMy5kaWdpY2VydC5jb20vRGlnaUNlcnRU\nTFNSU0FTSEEyNTYyMDIwQ0ExLTQuY3JsMECgPqA8hjpodHRwOi8vY3JsNC5kaWdp\nY2VydC5jb20vRGlnaUNlcnRUTFNSU0FTSEEyNTYyMDIwQ0ExLTQuY3JsMD4GA1Ud\nIAQ3MDUwMwYGZ4EMAQICMCkwJwYIKwYBBQUHAgEWG2h0dHA6Ly93d3cuZGlnaWNl\ncnQuY29tL0NQUzB/BggrBgEFBQcBAQRzMHEwJAYIKwYBBQUHMAGGGGh0dHA6Ly9v\nY3NwLmRpZ2ljZXJ0LmNvbTBJBggrBgEFBQcwAoY9aHR0cDovL2NhY2VydHMuZGln\naWNlcnQuY29tL0RpZ2lDZXJ0VExTUlNBU0hBMjU2MjAyMENBMS0xLmNydDAJBgNV\nHRMEAjAAMIIBgAYKKwYBBAHWeQIEAgSCAXAEggFsAWoAdwDoPtDaPvUGNTLnVyi8\niWvJA9PL0RFr7Otp4Xd9bQa9bgAAAYFdZgNUAAAEAwBIMEYCIQDiiyQbHf1gftrL\nD7rx50O5G4CdPu73id+8UERqUseWwgIhAIaykgz2T+ryCc/Zur3pQZtc/SMi3csk\nHGasr7qz9mZuAHYANc8ZG7+xbFe/D61MbULLu7YnICZR6j/hKu+oA8M71kwAAAGB\nXWYDigAABAMARzBFAiBRSYirK2Zx960Y6/C+nnS9zTFQgw39y4kbiJTxZTtjGwIh\nANA0TpR1ZeobPj7uoaV6KDos4AjnErHsLAUEHryNvXbBAHcAtz77JN+cTbp18jnF\nulj0bF38Qs96nzXEnh0JgSXttJkAAAGBXWYDigAABAMASDBGAiEApjEpHe0f0qHa\nFsMRVrycXDuO0GsVdi/I1o35E/Z4qSACIQCI0of/vbhcD8+Ap5H/AVmgKm/z8Wyy\nIHWOUCtlCHo3LTANBgkqhkiG9w0BAQsFAAOCAQEAJnMCEMdGXTh6SX7gh+Ikn04I\nMhw0TTFFBK7b3NmsFHZ0PYmseXtWGHN70bdWXDftGqJMuomBz5OIDVp8XTUHWU4b\nOxr3LdLwnN3/80mrRsdOvRTjFG7uyndIBGel6W7h/T8FWdZ65CDrP6NvABArAamW\n7aE5peAygQwDgsHIhPDjQJWrh7sT7VuIeC5kk/iWobOLzWoCs/S3yKJWT8QP2mkV\nM96lOjAO1hPebE/toaS3oTvSs6acCmF+8VtxcwnaoZtkIcExLgS1bfAkVzkZhwam\nnU2IEe+Y1ylALlNvmj64rm9RvO6Vo7wrORyRko+mih/VGO7gtVgf3R2zc9EoWQ==\n-----END CERTIFICATE-----\nsubject=/C=US/ST=New York/L=New York/O=MongoDB, Inc./CN=*.mongodb.net\nissuer=/C=US/O=DigiCert Inc/CN=DigiCert TLS RSA SHA256 2020 CA1\n---\nNo client certificate CA names sent\nServer Temp Key: ECDH, X25519, 253 bits\n---\nSSL handshake has read 3575 bytes and written 301 bytes\n---\nNew, TLSv1/SSLv3, Cipher is ECDHE-RSA-AES128-GCM-SHA256\nServer public key is 2048 bit\nSecure Renegotiation IS supported\nCompression: NONE\nExpansion: NONE\nNo ALPN negotiated\nSSL-Session:\n Protocol : TLSv1.2\n Cipher : ECDHE-RSA-AES128-GCM-SHA256\n Session-ID: 2A911177314B845822BB8C721659ECF6C44DDDDB8040D4852FB9A10F749EC7B7\n Session-ID-ctx: \n Master-Key: 7C1C10D9AF144863FA999ED31215B906D60D5BE432D1B45A74C490525BFE0C01AC714D6407716C5457866FC107EEC966\n TLS session ticket:\n 0000 - bf 0f b1 9b a1 b5 73 62-70 58 6b 23 14 0d 8c 87 ......sbpXk#....\n 0010 - 9a 05 2f b3 e9 9d 99 b3-2c 8d 77 5b 24 8b 6e 7f ../.....,.w[$.n.\n 0020 - f2 83 e1 a8 5a 48 88 2d-5b d0 a7 99 23 55 dc 18 ....ZH.-[...#U..\n 0030 - 5b 88 a3 4c 0e 92 e2 96-c6 62 f9 a6 4a d2 a7 ca [..L.....b..J...\n 0040 - 02 fd dc 35 13 8c 4c d8-15 f8 65 1f 52 22 1d 7d ...5..L...e.R\".}\n 0050 - 12 88 77 00 df 82 22 bc-08 ad 63 f5 47 54 cb 11 ..w...\"...c.GT..\n 0060 - 65 cd 23 e2 94 a4 96 23-40 97 54 e0 34 69 04 08 e.#....#@.T.4i..\n 0070 - 76 8d 2d 88 cf fe d3 0e-e9 ed f1 7b f1 50 52 d5 v.-........{.PR.\n 0080 - 28 (\n\n Start Time: 1676439593\n Timeout : 7200 (sec)\n Verify return code: 0 (ok)\n---\n", "text": "FYI, i can connect to the hostname with openssl:", "username": "Kobe_W" }, { "code": "", "text": "The common name is actually valid, so i guess it’s more of an issue on how you connect to it.", "username": "Kobe_W" }, { "code": "", "text": "I’m encountering the exact same problem, were you able to figure it out?", "username": "Matthew_Azada" }, { "code": "", "text": "Thank you for the comments!Just going to the basics to try to figure it out.\nI have mongosh --version 1.7.1 installed.Then, when I type only mongosh I get this error.\nMongoNetworkError: connect ECONNREFUSED 127.0.0.1:27017Yesterday, I was trying to connect via Atlas to mongoshell using the connection string\nand I get this error\nMongoServerSelectionError: Hostname/IP does not match certificate’s altnames: Host: ac-g7js2yq-shard-00-02.4sdhy13.mongodb.net. is not in the cert’s altnames: DNS:*.mongodb.net, DNS:mongodb.netI have read that I could solve this problem using mongod, but honestly, I don’t know yet how that works or what command I should use in the terminal. Would the mongod help? How do I use it?\nThanks!", "username": "Paulina_Segovia" }, { "code": "", "text": "Not yet!\nI read thisI haven’t tried it yet, I’ll do it when I have some hours to dedicate to this, but it might be helpful to your issue.\nPlease, let me know if it works ", "username": "Paulina_Segovia" }, { "code": "", "text": "you can very well remove itWOW WOW WOW. You may remove it IF AND ONLY IF there is NO mongod currently running and listening on port 27017.", "username": "steevej" }, { "code": "", "text": "Yes, thanks, I read your reply in the other thread warning about it.\nI haven’t tried this solution yet.", "username": "Paulina_Segovia" }, { "code": "brew uninstall mongodb-community\nbrew uninstall mongosh\nwget https://raw.githubusercontent.com/Homebrew/homebrew-core/4519776bc4563548dcd8c8639ac7e073b107c381/Formula/mongosh.rb\nbrew install ./mongosh.rb\n", "text": "Hey @Paulina_Segovia I just figured out my issue and I hope it helps you with yours.if you downgrade your version of mongosh to 1.6.2, you will be able to connect. For some reason the latest version of mongosh gives the error. After downgrading, I was able to connect no problem.", "username": "Matthew_Azada" }, { "code": "", "text": "Hi Matthew.\nI was just getting ready to spend hours on the computer trying to fix my issue, and I read your message.\nThank you…It worked!\nSee you around in this learning process!", "username": "Paulina_Segovia" }, { "code": "", "text": "Great to hear! Good luck as well!", "username": "Matthew_Azada" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoServerSelectionError: Hostname/IP does not match certificate's altnames
2023-02-14T23:34:13.021Z
MongoServerSelectionError: Hostname/IP does not match certificate&rsquo;s altnames
5,288
null
[ "sharding" ]
[ { "code": "", "text": "Hi,Due to a planned migration of date from an old system, our data is going to grow.\nCurrently we are not using shards, and all our data is using one server (actually 3 replicas, but no sharding).\nI was wondering if there is a rule of thumb on the size limit of data on a single shard/server.\nIn other words - what is the max size of my data that requires using more than one server and using shards?Thanks,\nTamar", "username": "Tamar_Nirenberg" }, { "code": "", "text": "Hi @Tamar_Nirenberg,\nI remember up to 2 TB you can mantain a repl set, but from the production notes, is a bad way to mantain a replica set only in one node!!!\nin attachment, the link with the details from the documentation:\nhttps://www.mongodb.com/docs/manual/core/replica-set-architecture-geographically-distributed/#:~:text=While%20replica%20sets,centers%20is%20unavailable.If I retrive the post with this information, i Will post It!Regards", "username": "Fabio_Ramohitaj" }, { "code": "", "text": "Byan old systemandis using one server (actually 3 replicasDo I understand correctly that you are running 3 instances of the replica set on the same machine? Or do you have 3 machines.Except for experimentation, there is not point in running 3 instances on the same hardware. This is a single point of failure. If you can live with a single point of failure you are better off with 1 mongod instance.", "username": "steevej" }, { "code": "", "text": "Sorry, I guess I did not make my self clear.\nI have 1 Primary, and two standby’s, each running on a different server.They all have, of course, the same amount of data.My question is how large can the data be before I need to consider sharding the data.Thanks,\nTamar", "username": "Tamar_Nirenberg" }, { "code": "", "text": "Sharding is powerful but also challenging to maintain. , generally you can always try vertical scaling first until you get to the ceiling before you start horizontal scaling (i believe this is also recommended by many books/sites). This is more a thing to ask your team than to mongo team.", "username": "Kobe_W" } ]
Decide when to use sharding
2023-01-12T10:27:55.233Z
Decide when to use sharding
930
null
[ "crud", "golang" ]
[ { "code": "_, err = s.Collection.UpdateOne(ctx, bson.M{\n\t\"month\": primitive.NewDateTimeFromTime(GetFirstOFMonth()),\n\t\"projectId\": projId,\n}, bson.D{\n\t{Key: \"$inc\", Value: bson.D{{Key: \"count\", Value: 1}}},\n}, options.Update().SetUpsert(true))\nif err != nil {\n\tlog.Println(err)\n}\n", "text": "Hi,I’m just getting started with golang and MongoDB and have run into a bit of a issue with upserting using $inc to increment a value in the document. The code I’m using is…So if there is a document for this month I want it to increment the count field and if no document exists for the month it will create a new document. The insert part works fine but the update doesn’t increment the count field. If I change the value of the count increment is seemt to work on the first run but not subsequent ones. Anyone know what I’m doing wrong?", "username": "James_Cooke" }, { "code": "projectId\t// Upsert document with $inc\n\tprojId, _ := primitive.ObjectIDFromHex(\"63eefe67ef2f5959294bc653\")\n\t_, err = collection.UpdateOne(ctx, bson.M{\n\t\t\"month\": primitive.NewDateTimeFromTime(GetFirstOFMonth()),\n\t\t\"projectId\": projId,\n\t}, bson.D{\n\t\t{Key: \"$inc\", Value: bson.D{{Key: \"count\", Value: 1}}},\n\t}, options.Update().SetUpsert(true))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n➜ go run update.go\n2023/02/17 09:48:05 map[_id:ObjectID(\"63eefe6728f1bf90d6ad7b16\") count:2 month:1675189800000 projectId:ObjectID(\"63eefe67ef2f5959294bc653\")]\n➜ go run update.go\n2023/02/17 09:48:09 map[_id:ObjectID(\"63eefe6728f1bf90d6ad7b16\") count:3 month:1675189800000 projectId:ObjectID(\"63eefe67ef2f5959294bc653\")]\n", "text": "Hi @James_Cooke,Welcome to the MongoDB community forums I suspect your update operation is not finding any documents to update on subsequent runs, which is why the count field is not being incremented. This could be because the query filter you’re using is not matching the documents you expect.I tried the same code mentioned above by passing the value of projectId and it worked fine. Sharing the code for reference:The code returned the following output, and it’s working as expected:If this is not the result you’re seeing, could you please provide some example documents and more details on the output you’re seeing? It will be helpful to narrow down the specific issues.Let me know if you have any further questions.Best,\nKushagra", "username": "Kushagra_Kesav" } ]
Golang upsert with $inc issue
2023-02-12T16:34:48.380Z
Golang upsert with $inc issue
1,018
null
[ "data-modeling", "replication", "sharding" ]
[ { "code": "", "text": "Hi guysWhen I execute “show dbs” in mongoshell into a sharded cluster, the size of the database is the real data size (sum of each primary nodes) of the database or is it the size in all shard nodes?Thanks", "username": "Oscar_Cervantes" }, { "code": "show dbslistDatabasesshow dbslistDatabases[direct: mongos] test> db.adminCommand({listDatabases:1, filter:{name:/test/}})\n{\n databases: [\n {\n name: 'test',\n sizeOnDisk: 69632,\n empty: false,\n shards: { shard01: 57344, shard02: 12288 }\n }\n ],\n totalSize: 69632,\n totalSizeMb: 0,\n ok: 1,\n operationTime: Timestamp({ t: 1676495653, i: 1 }),\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1676495653, i: 1 }),\n signature: {\n hash: Binary(Buffer.from(\"0000000000000000000000000000000000000000\", \"hex\"), 0),\n keyId: Long(\"0\")\n }\n }\n}\n", "text": "Hi @Oscar_Cervantes,The show dbs shell command corresponds to the listDatabases admin command, which shows the size on disk for each database. The size for sharded collections is the sum of one member for each shard, and does not include redundant storage usage across all members of the shard replica set.If you want to see more details than show dbs, try running the listDatabases command:Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Command "show dbs" from mongoshell in a sharded cluster
2023-02-15T20:25:15.057Z
Command &ldquo;show dbs&rdquo; from mongoshell in a sharded cluster
1,014
null
[ "aggregation", "transactions", "php" ]
[ { "code": "", "text": "php version:7.4\nlaravel veresion:7.0\nmongodb version:4.2.1\nQPS:100~200\nError:\nException: MongoDB\\Driver\\Exception\\BulkWriteException(code:225): Cannot start transaction 40 on session 86f7dbe4-3142-40c3-9618-411755a9cb67 - abWnSN9iDr/ue5yq5STQ36kflt2oNBjuZhxOcaTuAVg= because a newer transaction 48 has already started. at /data/www/questions/vendor/mongodb/mongodb/src/Operation/InsertOne.php:134\nException Trace:#0 /data/www/questions/vendor/mongodb/mongodb/src/Operation/InsertOne.php(134): MongoDB\\Driver\\Server->executeBulkWrite()\n#1 /data/www/questions/vendor/mongodb/mongodb/src/Collection.php(931): MongoDB\\Operation\\InsertOne->execute()\n#2 [internal function]: MongoDB\\Collection->insertOne()\n#3 /data/www/questions/vendor/jenssegers/mongodb/src/Jenssegers/Mongodb/Collection.php(42): call_user_func_array()\n#4 /data/www/questions/vendor/jenssegers/mongodb/src/Jenssegers/Mongodb/Query/Builder.php(580): Jenssegers\\Mongodb\\Collection->__call()\n#5 /data/www/questions/vendor/laravel/framework/src/Illuminate/Database/Eloquent/Builder.php(1422): Jenssegers\\Mongodb\\Query\\Builder->insertGetId()\n#6 /data/www/questions/vendor/jenssegers/mongodb/src/Jenssegers/Mongodb/Eloquent/Builder.php(78): Illuminate\\Database\\Eloquent\\Builder->__call()\n#7 /data/www/questions/vendor/laravel/framework/src/Illuminate/Database/Eloquent/Model.php(902): Jenssegers\\Mongodb\\Eloquent\\Builder->insertGetId()\n#8 /data/www/questions/vendor/laravel/framework/src/Illuminate/Database/Eloquent/Model.php(867): Illuminate\\Database\\Eloquent\\Model->insertAndSetId()\n#9 /data/www/questions/vendor/laravel/framework/src/Illuminate/Database/Eloquent/Model.php(730): Illuminate\\Database\\Eloquent\\Model->performInsert()\n#10 /data/www/questions/vendor/laravel/framework/src/Illuminate/Database/Eloquent/Builder.php(776): Illuminate\\Database\\Eloquent\\Model->save()\n#11 /data/www/questions/vendor/laravel/framework/src/Illuminate/Support/helpers.php(433): Illuminate\\Database\\Eloquent\\Builder->Illuminate\\Database\\Eloquent{closure}()\n#12 /data/www/questions/vendor/laravel/framework/src/Illuminate/Database/Eloquent/Builder.php(777): tap()\n#13 /data/www/questions/app/Http/Controllers/Api/Question/V153/QuestionsController.php(757): Illuminate\\Database\\Eloquent\\Builder->create()", "username": "yong_huang" }, { "code": "jenssegers/laravel-mongodbmongodb/mongodbcomposer show mongodb/mongodbext-mongodbphp --ri mongodbjenssegers/laravel-mongodbcomposer show laravel/mongodbretryWritestruefalse", "text": "Hi,from the trace I can only see that you’re creating a new object through the query builder, but nothing else that would indicate that transactions are in use. Since you are using Laravel 7 I assume you’re also using an older version of the jenssegers/laravel-mongodb package, which doesn’t explicitly support transactions. When did you first observe the error? Did it start appearing as concurrency increased (i.e. QPS started getting higher), was it after a version update of packages, or has it always been there?To better narrow down the issue, please provide the following information:Depending on driver version, the retryWrites connection option may help in finding out the cause. This defaults to true on newer driver versions, so you could test explicitly setting it to false in your connection string to see if the error persists. Note that this is not a solution to fix the problem, but rather a way for us to find out what’s going on. The feature retries certain write operations once in case of specific errors to reduce the amount of write errors you receive. More information on this feature can be found in the documentation.Thanks,\nAndreas", "username": "Andreas_Braun" }, { "code": "", "text": "name : mongodb/mongodb\ndescrip. : MongoDB driver library\nkeywords : database, driver, mongodb, persistence\nversions : * 1.8.0mongodb\nMongoDB support => enabled\nMongoDB extension version => 1.9.0\nMongoDB extension stability => stable\nlibbson bundled version => 1.17.2\nlibmongoc bundled version => 1.17.2Not used composer show laravel/mongodb\nname : jenssegers/mongodb\ndescrip. : A MongoDB based Eloquent model and Query builder for Laravel (Moloquent)\nkeywords : database, eloquent, laravel, model, moloquent, mongo, mongodb\nversions : * v3.7.2Stand-alone 3-nodeI have no way to test the recurrence of online problems. Pressure test 200 did not recur.Thanks,\nYong", "username": "yong_huang" }, { "code": "", "text": "I have been troubled for a long time, the project test, the transaction is not effective, I don’t understand why the session is generated?Thanks,\nYong", "username": "yong_huang" } ]
Concurrency caused session problems, and no solution was found
2023-02-11T09:03:28.832Z
Concurrency caused session problems, and no solution was found
1,615
null
[ "aggregation", "queries" ]
[ { "code": "", "text": "Hi Team, I am using V 4.4.,\nI had collection, which contains the fields of temperature values, createdOn and state Id, I have to get the records where the tmp values are stable for more than 1 hours, Is there a way to get the result. Any help in this matter. For example if tmp value 12.0 degress is registered at 2022-12-14T00:00:03.086Z and the same tmp value is still stable upto at 2022-12-14T01:00:03.086Z those records have to get . Hope get my need, Kindly do needfull in this matter. Following is the attachment for your reference.home_out.csv (70.9 KB)", "username": "MERUGUPALA_RAMES" }, { "code": "", "text": "Please provide sample documents in JSON so that we can cut-n-paste directly in our system.", "username": "steevej" }, { "code": "", "text": "Hi Steevej,\nPlease find the attached json file as per the requirement.\nhome_out.json (318.9 KB)", "username": "MERUGUPALA_RAMES" }, { "code": "collection_name = \"sensors\" \n\nstable_period = { \"amount\" : 1 , \"unit\" : \"hour\" }\n_unwind_tmp = { \"$unwind\" : \"$tmp\" }\n/* note that I use the variable stable_period to set arguments\n to the $dateAdd so the field names in stable_period must match\n the one used by $dateAdd\n*/\n_set_end = { \"$set\" : {\n\t\"_end\" : { \"$dateAdd\" : {\n\t\t\"startDate\" : \"$createdOn\" ,\n\t\t...stable_period\n\t} }\n} }\n_lookup = { \"$lookup\" : {\n\t\"from\" : collection_name ,\n\t\"as\" : \"_period\" ,\n\t\"let\" : {\n\t\t\"start\" : \"$createdOn\" ,\n\t\t\"end\" : \"$_end\" ,\n\t\t\"tmp\" : \"$tmp\"\n\t} ,\n\t\"pipeline\" : [ \n\t\t/* the $set here is optional, but I like to keep temporary values for debugging \n\t\t it was added because the $match after did not work the first time\n\t\t so I needed to see if I was testing with the appropriate values\n */\n\t\t{ \"$set\" : {\n\t\t\t\"_debug_start\" : \"$$start\" ,\n\t\t\t\"_debug_end\" : \"$$end\" ,\n\t\t\t\"_debug_tmp\" : \"$$tmp\"\n\t\t} } ,\n\t\t{ \"$match\" : { \"$expr\" : { \"$and\" : [\n\t\t\t{ \"$gt\" : [ \"$createdOn\" , \"$$start\" ] } ,\n\t\t\t{ \"$lte\" : [ \"$createdOn\" , \"$$end\" ] }\n\t\t] } } } ,\n\t\t_unwind_tmp ,\n\t\t{ \"$facet\" : {\n\t\t\t\"_same_tmp\" : [ { \"$match\" : {\n\t\t\t\t\"$expr\" : { \"$eq\" : [ \"$tmp\" , \"$$tmp\" ] }\n\t\t\t} } ] ,\n\t\t\t\"_diff_tmp\" : [\n\t\t\t\t{ \"$match\" : {\n\t\t\t\t\t\"$expr\" : { \"$ne\" : [ \"$tmp\" ,\"$$tmp\" ] }\n\t\t\t\t} } ,\n\t\t\t\t{ \"$limit\" : 1 }\n\t\t\t]\n\t\t} }\n\t]\n} }\n_unwind_period = { \"$unwind\" : \"$_period\" }\n_match_stable = { \"$match\" : {\n \"_period._diff_tmp.0\" : { \"$exists\" : false }\n} }\n[ _unwind_tmp , _set_end , _lookup , _unwind_period , _match_stable ]\n{ _id: ObjectId(\"639ff4b553ba7b7edfc8f922\"),\n stateId: 795,\n tmp: 24.93,\n createdOn: 2022-12-14T00:40:42.069Z,\n _end: 2022-12-14T01:40:42.069Z,\n _period: { _same_tmp: [], _diff_tmp: [] } }\n", "text": "This is my third attempt at this. I cancelled the previous replies I was going to post because I was finding fundamental flaws that would make each of my previous solutions to fail. Now I got it. It is rough on the edge and would need clean-up. But keeping some of the fields until clean-up helps debugging and understanding.The approach is that for each reading, I $lookup for all reading that falls within the following 1 hour. Using $facet, I split the readings withing the same hour into same tmp and different tmp.Start with some variable to make the code cleaner:First stage is $unwind as tmp is an array.Then, we set a temporary field in the document that is the end of the stable period.The meat of the whole thing is a $lookup into itself to find other readings that fall withing the stable period (1 hour is this case). Basically we collect all reading withing the stable period and use $facet to split the reading into the same tmp and different tmp.Then we $unwind _period because we know there is only 1 document with the 2 facets and it is easier to work with.The final $match for stable periods.Running the pipelineprovided the following only 1-hour stable periodThanks for the challenge.", "username": "steevej" }, { "code": "$dateAdd", "text": "$dateAddHi Steevej,\nThanks for your effort that you had kept on this. I can understand the effors to bring the results. I has been working randomly on this and\nI was test in my environment and get to know the code is working in 5.0 version where in my environment i amd using 4.4, is there a way to get this code in 4.4. It will be more helpfull.Thanks&Regards,\nRamesh.", "username": "MERUGUPALA_RAMES" }, { "code": "", "text": "According to Quick Start: BSON Data Types - Date | MongoDB, the precision is milliseconds. This means that a $dateAdd of 1 hour, is equivalent to an $add of 1 hour * 60 minutes * 60 seconds * 1000 miliseconds.", "username": "steevej" } ]
Get the tmp values which are stable for one hours
2022-12-16T18:14:05.641Z
Get the tmp values which are stable for one hours
2,063
null
[ "swift" ]
[ { "code": "class Dog {\n var name: String\n var age: Int\n var toys: List<Toy>\n}\n\nclass Toy {\n var brand: String\n var name: String\n}\nlet keyPath: PartialKeyPath<Dog> = [\n \\Dog.toys.brand\n]\n", "text": "If I’d like to observe Realm changes by observing Dogs Realm Results, filtering by using Keypaths I am not allowed to do:(I’d like to get observers when any toy’s brand name for a dog changes)\nRealm documentation seems wrong, because ‘toys’ which is a List does not have a ‘brand’.Code, content, tutorials, programs and community to enable developers of all skill levels on the MongoDB Data Platform. Join or follow us here to learn more!", "username": "Patryk_Sredzinski" }, { "code": "resultslet results = realm.objects(Company.self)\nlet notificationToken = results.observe(keyPaths: [\\Company.orders.status]) { changes in\n\t// update UI\n}\n[\\Company.orders.status][\"orders.status\"]List", "text": "What specifically do you feel is not possible?Objects in List can certainly be observed for changes - the documentation you linked is a tad out of date but it’s also covered in the SDK DocsThat being said, in the link you provided to that Realm article, I don’t believe this code is valid as I think the partial key path only works on single objects, not on a collection, results in this case.which means that [\\Company.orders.status] should be [\"orders.status\"] if orders is a List PropertyIt’s possible I have the syntax wrong as well so probably need another set of eyes on this.", "username": "Jay" }, { "code": "", "text": "What I mean is that we are not allowed to use Swift’s PartialKeyPath to create an observer if we want to observe specific property on a child.If we use String then it works, but I find PartialKeyPaths more developer friendlyIn RealmSwift code, in observe method documentation it claims that it is possible to do what I proposed /.Dog.toys.brandBut Swifts compiler would not allow it. I am thinking of there is an another solution to handle this", "username": "Patryk_Sredzinski1" }, { "code": "let notificationToken = someDog.observe(keyPaths: [\\Dog.name]) { changes in\n\t// update UI\n}\nList", "text": "Understood.create an observer if we want to observe specific property on a child.As mentioned, you can observe a property on a child of a single object.but using PartialKeyPaths on a List appears to be broken? Not working as intended? Error in documentation?We’ll have to see if a Realmer puts their eyes on this thread - if not, a bug report may be in order.", "username": "Jay" } ]
Swift PartialKeyPath for observers not supported
2023-02-16T14:02:16.222Z
Swift PartialKeyPath for observers not supported
832
null
[ "aggregation" ]
[ { "code": "[\n {\n \"message_id\": 3,\n \"message_status\": 2,\n \"created_at\": \"2023-02-06T22:15:08.696+00:00\"\n },\n {\n \"message_id\": 3,\n \"message_status\": 1,\n \"created_at\": \"2023-02-05T22:28:08.696+00:00\"\n },\n {\n \"message_id\": 2,\n \"message_status\": 1,\n \"created_at\": \"2023-02-06T22:15:08.696+00:00\"\n },\n {\n \"message_id\": 1,\n \"message_status\": 2,\n \"created_at\": \"2023-02-06T22:15:08.696+00:00\"\n },\n {\n \"message_id\": 1,\n \"message_status\": 1,\n \"created_at\": \"2023-02-05T22:28:08.696+00:00\"\n }\n]\n[\n {\n \"message_status\": 1 \n \"count\" : 1 # message_id: 2 has lastest status 1\n },\n {\n\n \"message_status\": 2\n \"count\": 2 # message_id: 3 and 1 have lastest status 2\n }\n]\n{ $group: { _id: \"$message_id\", latest_status: {$last: \"$status_id\"}}}\n", "text": "Hi everyone!I have a collection of message logs like (I’ve excluded _id from example, but it presents):For this collection I wanted to get message count for each latest message_status (only with latest created_at)\nF.e.:I tried to use $group by message_id to get latest status, how can I sum these statuses for all collection?Thanks!", "username": "Nick_Sam" }, { "code": "sort = { \"$sort\" : { \"created_at\" : -1 } }\ngroup = { \"$group\" : {\n \"_id\" : \"$message_id\" ,\n \"status\" : { \"$first\" : \"$message_status\" }\n} }\ncount = { \"$group\" : {\n \"_id\" : \"$status\" ,\n \"count\" : { \"$sum\" : 1 }\n} }\ncosmetic = { \"$project\" : {\n \"message_status\" : \"$_id\" ,\n \"count\" : 1 ,\n \"_id\" : 0\n} }\npipeline = [ sort , group , count , cosmetic ]\n", "text": "You were going in the right direction with your $group.For $last and $first to have a meaning you however need to $sort first , otherwise the order is not defined.\nNote that $sort descending and use $first rather than ascending and $last. My gut feeling is that it is potentially more efficient, but I really do not know.Then the $group by message id:Then we $count the statuses withPersonally, I would stop here because I have the requested information. But to get the format you wish the following cosmetic $project can be used.The pipeline would then be:", "username": "steevej" }, { "code": "", "text": "Thank you very much!", "username": "Nick_Sam" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Get count of latest record statuses
2023-02-11T11:34:41.696Z
Get count of latest record statuses
515
null
[ "aggregation", "queries" ]
[ { "code": "[\n {\n \"teamName\": \"teamA\",\n \"members\": [\n {\n \"memberId\": \"A11\",\n \"activationDateTime\": \"2023-01-05 08:00:00.000Z\",\n \"deactivationDateTime\": \"2023-01-06 08:00:00.000Z\"\n },\n {\n \"memberId\": \"A12\",\n \"activationDateTime\": \"2023-01-05 08:00:00.000Z\",\n \"deactivationDateTime\": \"2023-01-06 08:00:00.000Z\"\n },\n \n ]\n },\n {\n \"teamName\": \"teamB\",\n \"members\": [\n {\n \"memberId\": \"A11\",\n \"activationDateTime\": \"2023-01-05 08:00:00.000Z\",\n \n },\n {\n \"memberId\": \"B11\",\n \"activationDateTime\": \"2023-01-05 08:00:00.000Z\",\n \"deactivationDateTime\": \"2023-01-06 08:00:00.000Z\"\n },\n {\n \"memberId\": \"B12\",\n \"activationDateTime\": \"2023-01-10 08:00:00.000Z\",\n \n },\n \n ]\n },\n {\n \"teamName\": \"teamC\",\n \"members\": [\n {\n \"memberId\": \"C11\",\n \"activationDateTime\": \"2023-01-05 08:00:00.000Z\",\n \n },\n {\n \"memberId\": \"B11\",\n \"activationDateTime\": \"2023-01-04 08:00:00.000Z\",\n \n },\n \n ]\n },\n \n]\n", "text": "I am having a teams collection which stores members details.\nteamA is having member A11 which is de-active, that means this member A11 can be part of another team but the activationDateTime should be after the deactivationDateTime from teamA value. Team A - member A11 is having deactivation date time- “deactivationDateTime”: “2023-01-06 08:00:00.000Z”\nTeam B is having member A11 but this member is having “activationDateTime”: “2023-01-05 08:00:00.000Z”,\nwhich is overlapping with the same member in another team. same date overlap happens with members B11 in teamC.\nwe need to find out such members whose dates are overlapping (dates should be exclusive).", "username": "Harpal_Singh" }, { "code": "", "text": "@Satyam @Aasawari @Tarun_Gaur @Stennie_X @Ramachandra_Tummala @Jay @Elle_Shwer @Sourabh_Bagrecha @Marcus @Sheila_Doyle\nCan u please provide input on this?", "username": "Harpal_Singh" }, { "code": "", "text": "@RajeshSNair @Arkadiusz_Borucki @michael_hoeller @Harshit @DerrickChua @TimSantos @GeniusLearner @Kushagra_Kesav @NeNaD @shrey_batra\nPlease , can I get solution for this.", "username": "Harpal_Singh" }, { "code": "", "text": "I am not sure the question is clear.Realm stores the data you put in it; if the wrong dates are being stored then that’s a coding (coder?) issue, not a Realm issue. On the other hand, if you want to perform a query, then you need to attempt that and include the code your attempted in your question; this is not a code-writing service as there are too many variables.Most importantly, the question needs to be clear - are you asking how to query for all objects that have the same activationDate and/or deactivationDate?Please clarify what’s being asked and we’ll take a look.", "username": "Jay" } ]
Find overlapping activation Date and deactivation Date(dates should be exclusive)
2023-02-13T06:08:19.870Z
Find overlapping activation Date and deactivation Date(dates should be exclusive)
606
null
[ "node-js", "replication", "mongodb-shell" ]
[ { "code": "MongoServerSelectionError: Server selection timed out after 30000 ms\n at Timeout._onTimeout (node_modules/mongodb/lib/sdam/topology.js:291:38)\n at listOnTimeout (node:internal/timers:559:17)\n at processTimers (node:internal/timers:502:7) {\n reason: TopologyDescription {\n type: 'ReplicaSetNoPrimary',\n servers: Map(1) {\n 'my-db.mongo.cosmos.azure.com:10255' => ServerDescription {\n address: my-db.mongo.cosmos.azure.com:10255',\n type: 'Unknown',\n hosts: [],\n passives: [],\n arbiters: [],\n tags: {},\n minWireVersion: 0,\n maxWireVersion: 0,\n roundTripTime: -1,\n lastUpdateTime: 7381815,\n lastWriteDate: 0,\n error: null,\n topologyVersion: null,\n setName: null,\n setVersion: null,\n electionId: null,\n logicalSessionTimeoutMinutes: null,\n primary: null,\n me: null,\n '$clusterTime': null\n }\n },\nServer selection timed out after 30000 msmongsh <connectionstring>mongodb://my-db-:[email protected]:10255/?ssl=true&replicaSet=globaldb&retrywrites=false&maxIdleTimeMS=120000&appName=@my-db@", "text": "Hi AllI have a mongodb database wrapped inside Azure CosmosDB, I am using a node.js v16.17.1 application using the backend as azure CosmosDB. This application is deployed as a ContainerApp in Azure, there it is working fine while connecting to cosmos-db.When I connect through localhost:5173:\nIt was working fine, but suddenly i am getting the below error from a particular day.Even though the error show Server selection timed out after 30000 ms it does not take that much time too.There are no permissions or network issue as I have allowed public network access to the db instance and it was always working.even from mongsh <connectionstring> i am getting the same error.Connection : is through my home wifi network which uses wpa2 personal. Before sometime back i was using wpa security. I am using a mac . My organization disallowed wpa security so i switched to wpa2 and ever since i notice this error when connecting from my localhost with my wifi networkConnectionString:\nmongodb://my-db-:[email protected]:10255/?ssl=true&replicaSet=globaldb&retrywrites=false&maxIdleTimeMS=120000&appName=@my-db@Mobile hotspot:\nAble to connect with my mobile-hotspot in the same machine to the same remote db without any errors.Tried this will different versions of node.js and mongodb libraries including latest but the same error .Can any one help here? Thanks", "username": "Bharath" }, { "code": "", "text": "Hi @Bharath,Welcome to the MongoDB Community forums Mobile hotspot:\nAble to connect with my mobile-hotspot in the same machine to the same remote db without any errors.As I can see it might be due to your home wifi network. Please switch to some other network and try again. Also, it’s important to note that CosmosDB is not a MongoDB product so we cannot comment on what it can or cannot do, or its compatibility with a genuine MongoDB product.I would suggest visiting the CosmosDB forums or reaching out to their support team if the problem persists.Best,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "As a complement to the previous answer by Kushagra, I suggest taking a look at this website - https://www.iscosmosdbreallymongodb.com/ - for further information.", "username": "Mahi_Satyanarayana" } ]
Getting MongoServerSelectionError: Server selection timed out after 30000ms from localhost:5173
2023-02-07T14:32:49.696Z
Getting MongoServerSelectionError: Server selection timed out after 30000ms from localhost:5173
1,415
null
[ "python", "atlas-cluster", "transactions", "spark-connector" ]
[ { "code": "readPreferecemongodb+srv://<user>:<password>@<cluster>-pri.wrmoz.mongodb.net/<database>.<collection>?tls=true&readPreference=nearest&readPreferenceTags=provider:AZURE,region:US_EAST,nodeType:READ_ONLY&readConcernLevel=local\nDataFrameshow()readConcernv10.0v3.0", "text": "We are trying to query a Mongo database hosted in Atlas from an Azure Databricks cluster.The Atlas database is hosted in a M10 clusyer with three primary nodes in AWS (the ones used by the transactional application) with an additional read-only node in Azure (the one we are trying to connect).\nWe already set up a peering connection between our vnet and the Atlas one, plus whitelisting the appropriate IP range. We confirmed that we can pin the read-only node using its private DNS from one of the Databricks worker nodes; we even confirmed that we can telnet the 27017 port. Even more, using pymongo from one of the workers we are able to connect to the database and query the collections.However, when we try to connect from Databricks we get some timeout errors which appear to be related to the mongo-spark-connector not honoring the readPreferece configuration.\nThis is the uri we are trying to use (omitting sensitive details)Yet, when trying to load the data as a DataFrame and perform a simple show() we get a connection time out error.\nThe stack trace of the exception shows that the driver was able to ping the desired node (while being unable to reach the AWS ones, as expected). But, neglects to connect to it since it doesn’t match the expected readConcernprimaryWe also tried to specify each of the parameters as individual options, using the global cluster config, or in code config, we also tried using both the v10.0 and the v3.0 versions of the connector.\nNevertheless no matter what we tried we always got the same error.Is this expected behavior? If so, can it be changed? Otherwise, does this count as a proper bug report?\nAdditionally, is there any workaround?", "username": "Luis_Miguel_Mejia_Suarez" }, { "code": "", "text": "Hi Luis, I’m not sure if this is helpful in your use case but if you would be interested in replicating your MongoDB data to Databricks by reading the changestreams log that’s something that you could do with Streamkap. It doesn’t solve your immediate problem but is an alternative way to get data to Databricks and has the advantage of not putting querying load on your MongoDB instance.", "username": "Paul_Dudley" } ]
Spark connector not respecting readPreference configuration
2022-09-30T23:38:30.765Z
Spark connector not respecting readPreference configuration
2,298
null
[ "node-js", "atlas-functions", "serverless", "change-streams", "atlas-triggers" ]
[ { "code": "", "text": "I’m looking to capture live changes from my MongoDB Atlas cluster and send them to BigQuery. Much in line with this article which uses Node.js to listen for change stream events and then writes them to Pub/Sub. It recommends using a change stream event listener for the purpose.The part which the article doesn’t cover is where to run your code. Can this be a serverless function or does it always need to be “on” (i.e virtual machine)?The other slightly confusing area is Atlas Database Triggers. These seem to allow for similar functionality in that they automatically listen for change events in the database and subsequently can be set to run a function (I think only on Atlas/Realm). Am I right in thinking this is an alternative method of listening to the change stream where I wouldn’t need to actually run the watcher to listen for events as that part has already been handled. I just need to run the desired action (post a message to Pub/Sub in my case).In summary, please can someone confirmThanks.", "username": "Ian" }, { "code": "collection.watch()", "text": "Hi Ian!1. If the watcher code can run as a serverless function (on Realm or elsewhere)\nIn general, serverless functions are short lived, meaning that they run until one of these things happen:On the other hand, if we subscribe to a MongoDB event stream, for example with collection.watch(), we’ll need to keep the function running, so we can receive any new events (changes in the collection). So, using a serverless function for that scenario wouldn’t work.You’ll need an instance that’s running continuously such as EC2 or Google App Engine.2. If Atlas Database Triggers are an alternative to running your own watcher code which achieve exactly the same thing. Are there any downsides?I’d say Atlas Triggers are a great alternative to what’s described in the blog post. If you go with them, you’d need to define an Atlas Function (a JavaScript function executed on Atlas) that publishes messages to Cloud Pub/Sub. I played around with this scenario and I can confirm it’s possible. There are a couple of caveats though:Atlas Functions don’t support the Node.js library for Cloud Pub/Sub @google-cloud/pubsub. However, you can use the Pub/Sub REST API instead — Cloud Pub/Sub API  |  Cloud Pub/Sub Documentation  |  Google Cloud.Authenticating to Google Cloud through Atlas Functions can be a bit tricky. I recommend using the google-auth-library and JWT authentication.3. [Bonus] Alternative integration between Atlas and BigQueryAnother way to set up a stream between Atlas and BigQuery, is by using Dataflow — https://cloud.google.com/blog/products/data-analytics/mongodb-atlas-and-bigquery-dataflow-templates.", "username": "Stanimira_Vlaeva" }, { "code": "", "text": "Hi @Stanimira_Vlaeva. Thanks for taking the time to such a helpful answer.What I expected. Ideally I’m looking to stay serverless on this one.That sounds like a semi-viable option. Why don’t Atlas Functions support the Node.js library for Pub/Sub? Surely it would be quite necessary in this type of situation? Thanks also for the tip about authenticating.Using Dataflow is a good option but don’t you still have this same dilemma where you first need to listen and write messages to Pub/Sub to be consumed by Dataflow? That loops back to needing one of the first two approaches. In my particular case I don’t really need any transformations on the data, just having the changes sync’d up to BigQuery. That’s why I was looking at using the BigQuery streaming ingest from Pub/Sub as per the blog post, or via Atlas Triggers.Thanks again.", "username": "Ian" }, { "code": "", "text": "Hi Ian, if you’re still stuck on this you could try out Streamkap. We have a number of companies using our service to stream data from MongoDB to BigQuery.", "username": "Paul_Dudley" } ]
Where does my change stream watcher need to run?
2022-12-13T12:46:59.984Z
Where does my change stream watcher need to run?
2,068
null
[]
[ { "code": "", "text": "While creating mongodb atlas trigger to copy new mongodb data in s3 i face the following Error.(InternalError) an error occurred when communicating with AWS S3, correlationID = 171b627ff3bf958f2718893a", "username": "Vishnu_kiran_M" }, { "code": "", "text": "I’m having the same problem.\nDid you resolve the problem?", "username": "wonho" }, { "code": "", "text": "not yet…will post solution once i find it…and iam hoping that u will post the solution once u find it", "username": "Vishnu_kiran_M" }, { "code": "", "text": "So how do you send the data to s3 now?", "username": "wonho" }, { "code": "", "text": "Hi, Streamkap recently released support for sending data from Mongodb to S3 via CDC from changestreams.", "username": "Paul_Dudley" } ]
Automate Continuous Data Copying from MongoDB to S3
2022-10-06T05:42:18.978Z
Automate Continuous Data Copying from MongoDB to S3
1,842
https://www.mongodb.com/…a8e2d782add.jpeg
[ "node-js", "crud" ]
[ { "code": "", "text": "\nmongodb array of data584×505 55.2 KB\n\nI need to update object 1 status: “pending” to status: “completed”. How to do this…?", "username": "jOSIEY_Vengappallil" }, { "code": "Hi JOSIEY_Vengappallil,\n\nI am new to this community. I am not sure how to update the object in array through index.\n\nPlease check the below approach, it might help.\n\ndb.getCollection(collectionName).update({\"username\" : \"JAMES BOND\", \"taskList.title\" :\"meeting\"},\n{$set : {\"taskList.$.status\" : \"completed\"}})\n\nThis will update only matched document in an array.\n\nThanks,\nNaveen\n", "text": "", "username": "Naveen_Kohinoor" } ]
How to update array of object data using object index number in node js?
2023-02-16T07:39:00.814Z
How to update array of object data using object index number in node js?
647
https://www.mongodb.com/…0fc27250389e.png
[ "database-tools", "backup", "ops-manager" ]
[ { "code": "Take snapshots every 6 hoursReference Time of Day", "text": "HelloI want to take a snapshot when I need but via Ops Manager UI, I can’t do so. I am updating snapshot schedule and configuring Take snapshots every 6 hours, which is the minimum I can.For instance, 00.00 UTC, Ops Manager takes a snapshot, and then right away, I’m configuring Reference Time of Day to 30 mins later, which 00.30. On Ops Manager UI, it shows tomorrow not today, which kind of make sense as the info part says the snapshot won’t be taken until that time. I went around on Backup Admin Dashboard to trigger backup job but no luck. In this case, how can I take any snapshots before doing any critical maintenance? I wAny help on this will be really appreciated, thanks!", "username": "Ercin_Demir" }, { "code": "", "text": "Hi @Ercin_Demir ,You are correct that in Ops Manager, you can not force an on demand snapshot. Changing the reference time will also move the next snapshot to the following day as you mentioned.What we recommend is that you utilize the Point-In-Time restore capability in case you have any issues during your critical maintenance and restore to a point in time before the maintenance started. Please see the documentation here.Thanks,\nEvin", "username": "Evin_Roesle" } ]
Can't take snapshots on Ops Manager
2023-02-15T00:14:52.218Z
Can&rsquo;t take snapshots on Ops Manager
1,126
null
[ "aggregation" ]
[ { "code": "let agg = [{\n \"$match\": {\n \"$or\": [\n {\n \"$regexMatch\": {\n \"input\": \"$field1\",\n \"regex\": new RegExp(search),\n \"options\": \"i\"\n }\n },\n {\n \"$regexMatch\": {\n \"input\": \"$field2\",\n \"regex\": new RegExp(search),\n \"options\": \"i\"\n }\n },\n ... more fields ...\n ]\n }\n}]\n", "text": "I am trying to run an aggregation, which at the end should enable a search over all fields.My idea was to use { “$match”: { “$or”: [ { \"$regexMatch: {}} ] } } and just add all field names with the same search string in my Javascript application. But I get an error “MongoServerError: unknown top level operator: $regexMatch”, not sure how to change the aggregation.", "username": "blue_puma" }, { "code": "let agg = [{\n \"$match\": {\n \"$expr\" : {\"$or\": [\n {\n \"$regexMatch\": {\n \"input\": \"$field1\",\n \"regex\": new RegExp(search),\n \"options\": \"i\"\n }\n },\n {\n \"$regexMatch\": {\n \"input\": \"$field2\",\n \"regex\": new RegExp(search),\n \"options\": \"i\"\n }\n }\n ] }\n }\n}]\n", "text": "Try by enclosing the whole $or within $expr.Example:", "username": "steevej" }, { "code": "", "text": "Awesome, thank you, that solved it!", "username": "blue_puma" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Run aggregation with $match and $or and $regexMatch from JavaScript?
2023-02-15T15:43:11.406Z
Run aggregation with $match and $or and $regexMatch from JavaScript?
780
null
[]
[ { "code": "", "text": "Hi,\nI can’t access to the lab it is showing error, i completed all the required pre requisites,\nThis lab assumes you have already created a free tier cluster. If you have not done this, please complete the “Creating and Deploying an Atlas Cluster” lab in lesson 2 of Unit 1 “Getting Started with MongoDB Atlas” before continuing here.", "username": "Vamsi_Krishna6" }, { "code": "", "text": "I don’t know the sequence of steps as per the latest course but have you created your own cluster and loaded sample data and created a dbuser to connect to your cluster?", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Though i did all the steps properly i am not able to launch my lab", "username": "Vamsi_Krishna6" }, { "code": "", "text": "Hi @Vamsi_Krishna6,Welcome to the MongoDB Community forums In order for us to better assist you with the issue you are experiencing while accessing the labs, could you kindly provide us with a screenshot of the error or problem you are encountering? This will help us to have a more clear understanding of the issue and to provide you with an effective solution.Thanks,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "This topic was automatically closed 60 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can't able to aceess lab?
2023-02-09T07:15:03.868Z
Can&rsquo;t able to aceess lab?
958
null
[ "queries", "serverless", "next-js" ]
[ { "code": "import { MongoClient } from 'mongodb'\n\nconst uri = process.env.NEXT_PUBLIC_MONGODB_URI;\n\nconst options = {\n useUnifiedTopology: true,\n useNewUrlParser: true,\n}\n\nlet client\nlet clientPromise\n\nif (!process.env.NEXT_PUBLIC_MONGODB_URI) {\n throw new Error('Please add your Mongo URI to .env.local')\n}\n\nif (process.env.NEXT_PUBLIC_NODE_ENV === 'development') {\n // In development mode, use a global variable so that the value\n // is preserved across module reloads caused by HMR (Hot Module Replacement).\n if (!global._mongoClientPromise) {\n client = new MongoClient(uri, options)\n global._mongoClientPromise = client.connect()\n }\n clientPromise = global._mongoClientPromise\n} else {\n // In production mode, it's best to not use a global variable.\n client = new MongoClient(uri, options)\n clientPromise = client.connect()\n}\n\n// Export a module-scoped MongoClient promise. By doing this in a\n// separate module, the client can be shared across functions.\nexport default clientPromise;\nexport async function getServerSideProps(context) {\n const session = await getServerSession(context.req, context.res, authOptions);\n\n if (!session) {\n return {\n redirect: {\n destination: '/login',\n permanent: false\n }\n }\n }\n\n const client = await clientPromise;\n const db = client.db(\"mongodb-db\");\n\n const testCollection = db.collection(\"jobs\");\n const testData = await jobsCollection.find({ userId: '100' }).toArray();\n\n const jobs = JSON.parse(JSON.stringify(testData));\n\n // Sort the jobs based on timestamp\n\n // Run a loop over all the sorted jobs i.e extract specific key/value pairs and return the data\n\n return {\n props: propsData\n }\n}\n", "text": "I am using mongodb with nextjs application. I am fetching data from mongodb inside getServerSideProps but I have observed it takes 4-5 secs to fetch data and display it on frontend. Any suggestions how can I improve it?\ncode:\nmongodb.js:dashboard.js:In my case I think that the slowness issue is related to mongodb connection or cold start of vercel serverless functions. Because in monogdb atlas it is showing me query command takes 300-500ms so I don’t think that fetching 500-1k array of objects from mongodb is an issue here. If it is related to connections how can I improve performance? Do I need to close the cursor after fetching data from mongodb? Something like this: proper use of mongo db in Next.js · vercel/next.js · Discussion #12229 · GitHubAny suggestions? Thanks", "username": "Aditya_Todkar1" }, { "code": "getServerSideProps()getServerSideProps()getServerSideProps", "text": "Hi @Aditya_Todkar1,Welcome to the MongoDB Community forums If it is related to connections how can I improve performance?I suspect it’s not related to connection, instead, the getServerSideProps() method forces a Next.js page to load with server-side rendering. What this means is that every time this page is loaded, the getServerSideProps() method runs on the backend, gets data, and sends it into the React component via props.To make it efficient you can wrap your function inside getServerSideProps . I found the following article - How to speed up your getServerSideProps that may be able to help you.Note that this is an article written by the Next.js community so the content may or may not be helpful to your specific case.Also, refer to this article to read more about different ways to fetch data to your Next.js application from MongoDB.I hope it helps!Best,\nKushagra", "username": "Kushagra_Kesav" } ]
Performance improvement for Nextjs + Mongodb app
2023-02-11T10:05:07.547Z
Performance improvement for Nextjs + Mongodb app
1,734
null
[ "aggregation", "queries", "node-js", "crud" ]
[ { "code": "\"mongoose\": \"^5.8.7\"const myData = await this.findOneAndUpdate({\n myId,\n color,\n }, {\n $setOnInsert: {\n myId,\n color,\n date: now.format('YYYY-MM-DD'),\n counter: myObj.counter - amount,\n },\n {\n $inc: { counter: -amount }\n },\n}, {\n new: true,\n upsert: true,\n}).exec();\ncounter$setOnInsert$inccounterconst myData = await this.findOneAndUpdate({\n myId,\n color,\n }, [{\n $set: {\n myId,\n color,\n date: now.format('YYYY-MM-DD'),\n counter: {\n $cond: {\n if: { $eq: ['$$ROOT', null] },\n then: myObj.counter - amount,\n else: { $subtract: ['$$ROOT.counter', amount] },\n }\n }\n }\n }], {\n new: true,\n upsert: true,\n });\n", "text": "I’m working with MongoDB 3.4.3 Community, but we will update to V4.4 soon.\nalso using \"mongoose\": \"^5.8.7\"\nI’m Trying to UPDATE if record exists or CREATE if it doesn’t.It is working fine with MongoDB v3.4 but does’t work with newer versions like 4.4 or 5.0.\nusing counter on both $setOnInsert and $inc causes a conflict, because both tries to set counter when at same time.Then I tried to change the query to this:Wich doesn’t work with v3.4. Not sure why, I believe I can only use this query on version 4.2 and above.Is there a solution where could work both on Version 3.4 and 4.4?", "username": "Backenderson" }, { "code": "", "text": "Hi @Backenderson and welcome to the MongoDB community forum!!To understand the requirement in a more better way, could you help me with a sample document and the desired output for the same which would help me to replicate in my local environment and help you with the solution.If you’re considering upgrading, I would recommend updating to the latest version of as it will offer far more features and flexibility for you to work more efficiently.Please refer to the documentation on How to upgrade to latest version of MongoDB documentation for smooth upgrade.Let us know if you have any more concerns.Best Regards\nAasawari", "username": "Aasawari" } ]
The dollar ($) prefixed field '$set' in '0.$set' is not valid for storage
2023-02-09T15:12:55.358Z
The dollar ($) prefixed field &lsquo;$set&rsquo; in &lsquo;0.$set&rsquo; is not valid for storage
1,358
null
[]
[ { "code": "wget http://downloads.mongodb.org/src/mongodb-src-r5.0.13-rc0.tar.gzpython3 buildscripts/scons.py -j 3 DESTDIR=/var/opt/mongo install-mongod inlined from 'mongo::mozjs::JSStringWrapper::JSStringWrapper(int32_t)' at src/mongo/scripting/mozjs/jsstringwrapper.cpp:48:12:\n/usr/include/aarch64-linux-gnu/bits/string_fortified.h:106:34: error: 'char* __builtin_strncpy(char*, const char*, long unsigned int)' specified bound 64 equals destination size [-Werror=stringop-truncation]\n 106 | return __builtin___strncpy_chk (__dest, __src, __len, __bos (__dest));\n | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ncc1plus: all warnings being treated as errors\nscons: *** [build/59f4f0dd/mongo/scripting/mozjs/jsstringwrapper.o] Error 1\nscons: building terminated because of errors.\nbuild/59f4f0dd/mongo/scripting/mozjs/jsstringwrapper.o failed: Error 1\n", "text": "Hi.\nTrying to build mongodb from source.\n1)wget http://downloads.mongodb.org/src/mongodb-src-r5.0.13-rc0.tar.gz\n2) Installed all packages as described in mongo/building.md at r5.0.13-rc0 · mongodb/mongo · GitHub\n3) Changed const in src/mongo/bson/util/builder.h to BSONObjMaxUserSize = 64 * 1024 * 1024\n4) Build with command python3 buildscripts/scons.py -j 3 DESTDIR=/var/opt/mongo install-mongodGetting error:Check another Version “r5.1.0-rc0” and it build successfully. But I need version 5.0. for local environment.My system:Linux version 5.15.0-1028-aws (buildd@bos02-arm64-060) (gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0, GNU ld (GNU Binutils for Ubuntu) 2.34) #32~20.04.1-Ubuntu SMP Mon Jan 9 18:02:12 UTC 2023Could you help me? How can i build mongod?", "username": "Vladymyr_Vladymyr" }, { "code": "python3 buildscripts/scons.py -j 3 DESTDIR=/var/opt/mongo install-mongod --disable-warnings-as-errors", "text": "FIxed error with adding \" --disable-warnings-as-errors\" to build command:\npython3 buildscripts/scons.py -j 3 DESTDIR=/var/opt/mongo install-mongod --disable-warnings-as-errors", "username": "Vladymyr_Vladymyr" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Build Mongo 5.0.13 from source error
2023-02-15T14:43:02.146Z
Build Mongo 5.0.13 from source error
1,183
null
[ "node-js", "replication", "compass", "connecting", "mongodb-shell" ]
[ { "code": "MongoServerSelectionError: Hostname/IP does not match certificate's altnames: Host: ac-kiiemsq-shard-00-00.cbhjwlv.mongodb.net. is not in the cert's altnames: DNS:*.mongodb.net, DNS:mongodb.net\n{\"t\":{\"$date\":\"2023-02-15T10:21:27.465Z\"},\"s\":\"I\",\"c\":\"MONGOSH\",\"id\":1000000000,\"ctx\":\"log\",\"msg\":\"Starting log\",\"attr\":{\"execPath\":\"/opt/homebrew/Cellar/node/19.6.0/bin/node\",\"envInfo\":{\"EDITOR\":null,\"NODE_OPTIONS\":null,\"TERM\":\"xterm-256color\"},\"version\":\"1.7.1\",\"distributionKind\":\"packaged\",\"buildArch\":\"x64\",\"buildPlatform\":\"linux\",\"buildTarget\":\"unknown\",\"buildTime\":\"2023-02-13T19:19:00.135Z\",\"gitVersion\":\"1bf0d7775443ac1849cc2597465d54eca867acea\",\"nodeVersion\":\"v19.6.0\",\"opensslVersion\":\"1.1.1s\",\"sharedOpenssl\":true}}\n{\"t\":{\"$date\":\"2023-02-15T10:21:27.469Z\"},\"s\":\"I\",\"c\":\"MONGOSH\",\"id\":1000000005,\"ctx\":\"config\",\"msg\":\"User updated\"}\n{\"t\":{\"$date\":\"2023-02-15T10:21:27.470Z\"},\"s\":\"I\",\"c\":\"MONGOSH\",\"id\":1000000048,\"ctx\":\"config\",\"msg\":\"Loading global configuration file\",\"attr\":{\"filename\":\"/etc/mongosh.conf\",\"found\":false}}\n{\"t\":{\"$date\":\"2023-02-15T10:21:27.475Z\"},\"s\":\"E\",\"c\":\"DEVTOOLS-CONNECT\",\"id\":1000000041,\"ctx\":\"mongosh-deps\",\"msg\":\"Missing optional dependency\",\"attr\":{\"name\":\"mongodb-client-encryption\",\"error\":\"Cannot find module 'mongodb-client-encryption'\\nRequire stack:\\n- /opt/homebrew/Cellar/mongosh/1.7.1/libexec/lib/node_modules/@mongosh/cli-repl/node_modules/@mongodb-js/devtools-connect/lib/connect.js\\n- /opt/homebrew/Cellar/mongosh/1.7.1/libexec/lib/node_modules/@mongosh/cli-repl/node_modules/@mongodb-js/devtools-connect/lib/index.js\\n- /opt/homebrew/Cellar/mongosh/1.7.1/libexec/lib/node_modules/@mongosh/cli-repl/node_modules/@mongosh/service-provider-server/lib/cli-service-provider.js\\n- /opt/homebrew/Cellar/mongosh/1.7.1/libexec/lib/node_modules/@mongosh/cli-repl/node_modules/@mongosh/service-provider-server/lib/index.js\\n- /opt/homebrew/Cellar/mongosh/1.7.1/libexec/lib/node_modules/@mongosh/cli-repl/lib/cli-repl.js\\n- /opt/homebrew/Cellar/mongosh/1.7.1/libexec/lib/node_modules/@mongosh/cli-repl/lib/index.js\\n- /opt/homebrew/Cellar/mongosh/1.7.1/libexec/lib/node_modules/@mongosh/cli-repl/lib/run.js\\n- /opt/homebrew/Cellar/mongosh/1.7.1/libexec/lib/node_modules/@mongosh/cli-repl/bin/mongosh.js\"}}\n{\"t\":{\"$date\":\"2023-02-15T10:21:27.517Z\"},\"s\":\"I\",\"c\":\"DEVTOOLS-CONNECT\",\"id\":1000000039,\"ctx\":\"mongosh-connect\",\"msg\":\"Resolving SRV record succeeded\",\"attr\":{\"from\":\"mongodb+srv://<credentials>@cluster0.cbhjwlv.mongodb.net/myFirstDatabase?appName=mongosh+1.7.1\",\"to\":\"mongodb://<credentials>@ac-kiiemsq-shard-00-00.cbhjwlv.mongodb.net,ac-kiiemsq-shard-00-01.cbhjwlv.mongodb.net,ac-kiiemsq-shard-00-02.cbhjwlv.mongodb.net/myFirstDatabase?appName=mongosh+1.7.1&authSource=admin&replicaSet=atlas-142s5d-shard-0&tls=true\",\"resolutionDetails\":[{\"query\":\"SRV\",\"hostname\":\"_mongodb._tcp.cluster0.cbhjwlv.mongodb.net\",\"error\":null,\"wasNativelyLookedUp\":true},{\"query\":\"TXT\",\"hostname\":\"cluster0.cbhjwlv.mongodb.net\",\"error\":null,\"wasNativelyLookedUp\":true}]}}\n{\"t\":{\"$date\":\"2023-02-15T10:21:27.530Z\"},\"s\":\"I\",\"c\":\"DEVTOOLS-CONNECT\",\"id\":1000000042,\"ctx\":\"mongosh-connect\",\"msg\":\"Initiating connection attempt\",\"attr\":{\"uri\":\"mongodb://<credentials>@ac-kiiemsq-shard-00-00.cbhjwlv.mongodb.net,ac-kiiemsq-shard-00-01.cbhjwlv.mongodb.net,ac-kiiemsq-shard-00-02.cbhjwlv.mongodb.net/myFirstDatabase?appName=mongosh+1.7.1&authSource=admin&replicaSet=atlas-142s5d-shard-0&tls=true\",\"driver\":{\"name\":\"nodejs|mongosh\",\"version\":\"4.14.0\"},\"devtoolsConnectVersion\":\"1.4.4\",\"host\":\"ac-kiiemsq-shard-00-00.cbhjwlv.mongodb.net:27017,ac-kiiemsq-shard-00-01.cbhjwlv.mongodb.net:27017,ac-kiiemsq-shard-00-02.cbhjwlv.mongodb.net:27017\"}}\n{\"t\":{\"$date\":\"2023-02-15T10:21:27.867Z\"},\"s\":\"W\",\"c\":\"DEVTOOLS-CONNECT\",\"id\":1000000034,\"ctx\":\"mongosh-connect\",\"msg\":\"Server heartbeat failure\",\"attr\":{\"connectionId\":\"ac-kiiemsq-shard-00-02.cbhjwlv.mongodb.net:27017\",\"failure\":\"\",\"isFailFast\":false,\"isKnownServer\":true}}\n{\"t\":{\"$date\":\"2023-02-15T10:21:28.053Z\"},\"s\":\"W\",\"c\":\"DEVTOOLS-CONNECT\",\"id\":1000000034,\"ctx\":\"mongosh-connect\",\"msg\":\"Server heartbeat failure\",\"attr\":{\"connectionId\":\"ac-kiiemsq-shard-00-01.cbhjwlv.mongodb.net:27017\",\"failure\":\"Hostname/IP does not match certificate's altnames: Host: ac-kiiemsq-shard-00-01.cbhjwlv.mongodb.net. is not in the cert's altnames: DNS:*.mongodb.net, DNS:mongodb.net\",\"isFailFast\":false,\"isKnownServer\":true}}\n{\"t\":{\"$date\":\"2023-02-15T10:21:28.200Z\"},\"s\":\"W\",\"c\":\"DEVTOOLS-CONNECT\",\"id\":1000000034,\"ctx\":\"mongosh-connect\",\"msg\":\"Server heartbeat failure\",\"attr\":{\"connectionId\":\"ac-kiiemsq-shard-00-00.cbhjwlv.mongodb.net:27017\",\"failure\":\"Hostname/IP does not match certificate's altnames: Host: ac-kiiemsq-shard-00-00.cbhjwlv.mongodb.net. is not in the cert's altnames: DNS:*.mongodb.net, DNS:mongodb.net\",\"isFailFast\":false,\"isKnownServer\":true}}\n", "text": "I’m learning MongoDB and set up an Atlas account. I’m trying to connect to my database using mongosh, allowed all IPs, set up my user and password credentials but I’m getting an error:Upon checking my logs, this is what shows:I’ve tried connecting through Mongodb Compass and the connection is successful.", "username": "Matthew_Azada" }, { "code": "brew uninstall mongodb-community\nbrew uninstall mongosh\nwget https://raw.githubusercontent.com/Homebrew/homebrew-core/4519776bc4563548dcd8c8639ac7e073b107c381/Formula/mongosh.rb\nbrew install ./mongosh.rb\n", "text": "For those who have the same issue, I was able to resolve it by doing this:If you downgrade your version of mongosh to 1.6.2, you will be able to connect. For some reason the latest version of mongosh gives the error. After downgrading, I was able to connect no problem.", "username": "Matthew_Azada" }, { "code": "", "text": "Your solution worked for me too!\nthanks ", "username": "Paulina_Segovia" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongosh connection error: MongoServerSelectionError: Hostname/IP does not match certificate's altnames
2023-02-15T10:29:29.071Z
Mongosh connection error: MongoServerSelectionError: Hostname/IP does not match certificate&rsquo;s altnames
1,452
null
[]
[ { "code": "", "text": "MongoDB documentation for upgrade and down grade of mongoDB version from 4.4.6 to 4.4.18", "username": "nanda1212" }, { "code": "", "text": "", "username": "Kobe_W" } ]
MongoDB documentation for upgrade and down grade of mongoDB version from 4.4.6 to 4.4.18
2023-02-16T05:12:43.765Z
MongoDB documentation for upgrade and down grade of mongoDB version from 4.4.6 to 4.4.18
430
null
[ "node-js", "mongodb-shell", "server", "installation" ]
[ { "code": "", "text": "I have been trying to set up MongoDB on my Mac using homebrew. I used MongoDB’s Documentation to do that, everything is installed correctly on my mac. But when I start the MongoDB services and try to run MongoDB manually as a background process I come across this error.mongod --config /opt/homebrew/etc/mongod.conf --fork ─╯\nabout to fork child process, waiting until server is ready for connections.\nforked process: 6558\nERROR: child process failed, exited with 48And I cannot even access Mongo Shell , this is what I getmongosh ─╯\n[1] 7036 killed mongosh", "username": "Shahima_khan" }, { "code": "", "text": "Hi @Shahima_khan ,\nCan you display your configuration file please?\nI think it Is set internally the fork option so you don’ t need to explicate the --fork option from the Shell.Regards", "username": "Fabio_Ramohitaj" }, { "code": "", "text": "Hey @Fabio_Ramohitaj\nI would like to thank u for your attention to my problem.\nHere is my configuration file\n\nScreenshot 2023-02-11 at 5.11.42 PM1382×732 68.1 KB\n", "username": "Shahima_khan" }, { "code": "", "text": "Hi @Shahima_khan ,\nCan you see if the port 27017 isn’t busy from another process?\nFor example in Linux you can see that with the netstat command.Regards", "username": "Fabio_Ramohitaj" }, { "code": "", "text": "@Fabio_Ramohitaj the port is not busy. I checked there was no port 27017", "username": "Shahima_khan" }, { "code": "", "text": "@Shahima_khan , can you try to change the port in the configuration file of mongo to 27018 and the restart the instance?", "username": "Fabio_Ramohitaj" }, { "code": " brew services list ─╯\nName Status User File\nemacs none\nmongodb-community started shahima ~/Library/LaunchAgents/homebrew.mxcl.mongodb-community.plist\nunbound none\nmongod --version ─╯\ndb version v6.0.4\nBuild Info: {\n \"version\": \"6.0.4\",\n \"gitVersion\": \"44ff59461c1353638a71e710f385a566bcd2f547\",\n \"modules\": [],\n \"allocator\": \"system\",\n \"environment\": {\n \"distarch\": \"aarch64\",\n \"target_arch\": \"aarch64\"\n }\n}\n", "text": "Let me try to explain everything from the start.\nI downloaded MongoDB using Homebrew by following this official post ( https://www.mongodb.com/docs/manual/tutorial/install-mongodb-on-os-x/)\nI used the following commands:-(P.S. everything was up to date before starting this process)brew tap mongodb/brewbrew install [email protected] services start [email protected] I checked if the services are running using this commandbrew services listI get the following output:-So according to the output, MongoDB is runningwhen I try to get the version of MongoDB after closing the current terminal window using the commandmongod --versionI get the outputeverything seems gr8 so far …\nbut now as I try to get into Mongo Shell using the commandmongoshmongothis is the output I get\nScreenshot 2023-02-11 at 6.51.28 PM1140×496 99.5 KB\nI have a good enough Mac Configuration to run this process but I don’t know what is causing the problem.", "username": "Shahima_khan" }, { "code": "", "text": "@Fabio_Ramohitaj umm… what port? There is no field named as “port” in the configuration file.", "username": "Shahima_khan" }, { "code": "", "text": "You need to add the parameter port in the configuration file:“net:\nbindIp: 127.0.0.1\nport: 27017”\nhttps://www.mongodb.com/docs/manual/reference/configuration-options/#:~:text=net%3A,port%3A%2027017", "username": "Fabio_Ramohitaj" }, { "code": "", "text": "@Shahima_khan Yes, but change the number of port to 27018 as i mentioned before and restart the process", "username": "Fabio_Ramohitaj" }, { "code": "mongodmongod ─╯\n{\"t\":{\"$date\":\"2023-02-11T20:07:22.645+05:30\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23285, \"ctx\":\"thread2\",\"msg\":\"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\"}\n{\"t\":{\"$date\":\"2023-02-11T20:07:22.646+05:30\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4915701, \"ctx\":\"thread2\",\"msg\":\"Initialized wire specification\",\"attr\":{\"spec\":{\"incomingExternalClient\":{\"minWireVersion\":0,\"maxWireVersion\":17},\"incomingInternalClient\":{\"minWireVersion\":0,\"maxWireVersion\":17},\"outgoing\":{\"minWireVersion\":6,\"maxWireVersion\":17},\"isInternalClient\":true}}}\n{\"t\":{\"$date\":\"2023-02-11T20:07:22.646+05:30\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4648602, \"ctx\":\"thread2\",\"msg\":\"Implicit TCP FastOpen in use.\"}\nmongod --port 27018\n", "text": "@Fabio_Ramohitaj\n\nScreenshot 2023-02-11 at 7.59.12 PM1452×694 121 KB\nI did as u asked but I got a bunch of logs in my terminal.\nI changed the port to 27018 in the editor and then after saving the file I restarted the mongodb server using the commandbrew services restart mongodb-community(I hope this is what u mean by restarting the process. Pls correct me if I’m wrong)Then when I used mongod . I got a lot of logsThere was a lot more of this.And the same thing happed when I specifically tried to connect to the 27018 port using the command", "username": "Shahima_khan" }, { "code": "", "text": "@Shahima_khanbrew services restart mongodb-communityYes. Can you attach the status of the service here?", "username": "Fabio_Ramohitaj" }, { "code": "brew services restart mongodb-community ─╯\nStopping `mongodb-community`... (might take a while)\n==> Successfully stopped `mongodb-community` (label: homebrew.mxcl.mongodb-commu\n==> Successfully started `mongodb-community` (label: homebrew.mxcl.mongodb-commu\n\n╭─    ~ ───────────────────────────────────────────── ✔   ─╮\n╰─ brew services list ─╯\nName Status User File\nemacs none\nmongodb-community started shahima ~/Library/LaunchAgents/homebrew.mxcl.mongodb-community.plist\nunbound none\n\n╭─    ~ ───────────────────────────────────────────────────────────────────── ✔   ─╮\n╰─\n", "text": "@Fabio_Ramohitaj sure here it is :-", "username": "Shahima_khan" }, { "code": "", "text": "Hi @Shahima_khan,\nAre you able to connect to the instance with mongo --port 27018 or mongosh --port 27018?\nCan you execute command like show dbs?Regards", "username": "Fabio_Ramohitaj" }, { "code": "╭─    ~ ──────────────────────────────────────── KILL ✘  01:50:14 AM  ─╮\n╰─ mongo --port 27018 ─╯\nzsh: command not found: mongo\n\n╭─    ~ ───────────────────────────────────────── 127 ✘   ─╮\n╰─ mongosh --port 27018 ─╯\n[1] 9058 killed mongosh --port 27018\n\n╭─    ~ ──────────────────────────────────────── KILL ✘   ─╮\n╰─\n", "text": "mongosh --port 27018@Fabio_Ramohitaj Hey !!No Im unable to connect to the instance using mongo --port 27018 or mongosh --port 27018here is the result:-Regards!", "username": "Shahima_khan" }, { "code": "", "text": "Hello @Shahima_khan,I just had the same issue and the problem for me was that I did not stop the services before running the ‘mongod --config /opt/hombrew/etc/mongod.conf --fork’ command. From reading the thread I believe your issue is the same.Try running ‘brew services stop mongodb-community@{version}’Then run 'mongod --config /opt/hombrew/etc/mongod.conf --fork’.Hope this helps!", "username": "Tuhrel_Johnson" }, { "code": "", "text": "Except for testing you do not want to start mongod manually withmongod --config /opt/hombrew/etc/mongod.conf --fork’You should use brew to stop and start since it is the safest way to get clean shutdown.", "username": "steevej" }, { "code": "", "text": "@Tuhrel_Johnson thank you!Mine was solved after reinstalling MongoDB completely and installing Mongosh separately.", "username": "Shahima_khan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
About to fork child process, waiting until server is ready for connections. forked process: 6558 ERROR: child process failed, exited with 48
2023-02-11T07:27:05.241Z
About to fork child process, waiting until server is ready for connections. forked process: 6558 ERROR: child process failed, exited with 48
7,836
null
[ "replication" ]
[ { "code": "", "text": "Hello all,\nI have configured 3-node replica set cluster for a pre-prod environment. Replica set is configured to receive connection from all ips. But when I try to connect to the replica set using IP address without adding nodes to the host file, I am unable to connect. But after adding ips to the host file the connection can be established. What is the reason for this?\nThanks", "username": "Ravindra_Pandey" }, { "code": "", "text": "Hello @Ravindra_Pandey ,To get better understanding of your use-case, could you please share below mentioned details?Replica set is configured to receive connection from all ipsHow did you configure the replica set?How are you trying to connect to the host? are you using specific url? can you share the connection url? (redact the sensitive information like username and password before sharing)Why do you want to use IPs instead of setting up hostnames?I am unable to connect.How can you confirm that you are not connecting to the Database? Are you facing any error? If yes, please share the error message.Please share output of rs.conf() and rs.status().I would recommend you go through below thread as a similar has been discussed in that post.Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "You will find some useful information in the following post.", "username": "steevej" }, { "code": "getaddrinfo ENOTFOUND SERVER2mongodb://username:[email protected]:27017,192.168.56.101:27017,192.168.56.102:27017/TestDB?authSource=admin", "text": "Hi @Tarun_Gaur\nThank you for your response!\nI am curretnly trying to connect to the replica set through mongodb compass remotely. The main issue is I am able to connect to the replica set when I add the ips and hostname to the host file but if I remove them from the host fine I am unable to connect to the replica set. My replica set are running without any issues. I have configured relica set with the host names.\nThe error is:\ngetaddrinfo ENOTFOUND SERVER2But after adding files to host files the connection can be established using IP as well as hostname.\nCan’t we make connection with ip only and not adding to the host file?The connection string is:\nmongodb://username:[email protected]:27017,192.168.56.101:27017,192.168.56.102:27017/TestDB?authSource=admin", "username": "Ravindra_Pandey" } ]
Cannot connect to Mongodb Server without adding into Host Files
2023-02-13T03:59:58.726Z
Cannot connect to Mongodb Server without adding into Host Files
1,313
null
[ "queries" ]
[ { "code": "", "text": "Looks like that when same field is used in multiple conditions in match or find, those sonditions are treated as joinned by logical “OR”, not “AND”:\nmatch {\n{‘responseMessage’: { $exists: true }},\n{‘responseMessage’: {$ne: “No Suggestions Found”}}\n}\nreturns all documents where responseMessage exists OR responseMessage is not equal to “No Suggestions Found”, which actually returns documents where responseMessage DOES NOT EXIST.\nIf I need to have those confitions to be joined with “AND”, I need to put them like:{ $and: [\n{‘responseMessage’: { $exists: true }},\n{‘responseMessage’: {$ne: “No Suggestions Found”}}\n]}This is different from behavior when 2 conditions are using different fields - those conditions will be treated as joined by “AND”", "username": "SERGIY_MITIN" }, { "code": "db.coll.find({ responseMessage: { $exists: true, $ne: \"no suggestions found\"}})", "text": "Hi @SERGIY_MITIN. Welcome to the community.Yes, here’s the documentation when specifying the same field. Your query can be rewritten with an implicit AND, like belowdb.coll.find({ responseMessage: { $exists: true, $ne: \"no suggestions found\"}})Hope this adds clarity.", "username": "Mahi_Satyanarayana" }, { "code": "query = {\n \"responseMessage\" : \"any value\" ,\n \"responseMessage\" : { \"$exists\" : true } ,\n \"responseMessage\" : { \"$ne\" : \"No Suggestions Found\" }\n}\n{ responseMessage: { '$ne': 'No Suggestions Found' } }\n", "text": "Looks like that when same field is used in multiple conditions in match or find, those sonditions are treated as joinned by logical “OR”, not “AND”This is not what really happen.The reason for that is that in most JSON implementations when a field name is repeated only the last occurrence is kept.So in your case, responseMessage:{$exists:true} is not even seen by the server.You may experiment in mongosh with :You will see that mongosh outputs:showing that all other occurrences of responseMessage are ignored.", "username": "steevej" } ]
Same field in multiple conditions in match or find: they are treated as "OR"?
2023-02-15T23:58:38.357Z
Same field in multiple conditions in match or find: they are treated as &ldquo;OR&rdquo;?
5,066
null
[ "php" ]
[ { "code": "Could not establish stream for node servername:27017: [connection closed calling hello on 'servername:27017']", "text": "I’m trying to connect to Mongo Atlas using PHP and I get the following error:\nCould not establish stream for node servername:27017: [connection closed calling hello on 'servername:27017']I have searched high and low and can’t find anyone with his error exactly. I’ve tried looking at SSL issues, but nothing helped. I am able to connect using mongo cli.Centos 7 with PHP 7 on Apache 2.Thank you!", "username": "Everett_Glovier" }, { "code": "", "text": "Please post your PHP code including the mogodb uri you are using to connect BUT be sure to obscure your username and password!!", "username": "Jack_Woehr" }, { "code": "", "text": "Hello @Everett_Glovier and welcome to the MongoDB community!It would be nice to have more information, but what you are seeing might be the result of something in the earlier initialization that went undetected. I wrote an article about MongoDB PHP Error handling, that might give you checking ideas.This article shows you common mechanisms to deal with potential PHP Errors and Exceptions triggered by connection loss, temporary inability to read/write, initialization failures, and more.Let us know!\nHubert", "username": "Hubert_Nguyen1" } ]
Error using PHP: Could not establish stream for node
2023-02-10T01:51:48.309Z
Error using PHP: Could not establish stream for node
1,133
null
[ "node-js", "replication", "mongoose-odm", "connecting", "containers" ]
[ { "code": "\"mongodb://localhost:27001,localhost:27002,localhost:27003/MeshedDev?replicaSet=myReplicaSet\"\n\"mongodb://127.0.0.1:27001,127.0.0.1:27002,127.0.0.1:27003/MeshedDev?replicaSet=myReplicaSet\"\n\"mongodb://mongo1.127.0.0.1:27001,mongo2.127.0.0.1:27002,mongo3.127.0.0.1:27003/MeshedDev?replicaSet=myReplicaSet\"\n\"mongodb://mongo1:27001,mongo2:27002,mongo3:27003/MeshedDev?replicaSet=myReplicaSet\"\n\"mongodb://mongo1:27017,mongo2:27017,mongo3:27017/MeshedDev?replicaSet=myReplicaSet\"\nMongooseServerSelectionError: Server selection timed out after 30000 ms\n at NativeConnection.Connection.openUri (/Users/me/Meshed/server/node_modules/mongoose/lib/connection.js:825:32)\n at /Users/me/Meshed/server/node_modules/mongoose/lib/index.js:409:10\n at /Users/me/Meshed/server/node_modules/mongoose/lib/helpers/promiseOrCallback.js:41:5\n at new Promise (<anonymous>)\n at promiseOrCallback (/Users/me/Meshed/server/node_modules/mongoose/lib/helpers/promiseOrCallback.js:40:10)\n at Mongoose._promiseOrCallback (/Users/me/Meshed/server/node_modules/mongoose/lib/index.js:1262:10)\n at Mongoose.connect (/Users/me/Meshed/server/node_modules/mongoose/lib/index.js:408:20)\n at mongoConnect (/Users/me/Meshed/server/src/database/mongo/connection.ts:9:12)\n at Object.<anonymous> (/Users/me/Meshed/server/src/app.ts:35:13)\n at Module._compile (node:internal/modules/cjs/loader:1218:14) {\n reason: TopologyDescription {\n type: 'ReplicaSetNoPrimary',\n servers: Map(3) {\n 'mongo1:27017' => [ServerDescription],\n 'mongo2:27017' => [ServerDescription],\n 'mongo3:27017' => [ServerDescription]\n },\n stale: false,\n compatible: true,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n setName: 'myReplicaSet',\n maxElectionId: ObjectId { [Symbol(id)]: [Buffer [Uint8Array]] },\n maxSetVersion: 1,\n commonWireVersion: 0,\n logicalSessionTimeoutMinutes: null\n },\n code: undefined\n}\n{\n set: 'myReplicaSet',\n date: ISODate(\"2023-02-15T12:39:21.928Z\"),\n myState: 1,\n term: Long(\"7\"),\n syncSourceHost: '',\n syncSourceId: -1,\n heartbeatIntervalMillis: Long(\"2000\"),\n majorityVoteCount: 2,\n writeMajorityCount: 2,\n votingMembersCount: 3,\n writableVotingMembersCount: 3,\n optimes: {\n lastCommittedOpTime: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n lastCommittedWallTime: ISODate(\"2023-02-15T12:39:13.254Z\"),\n readConcernMajorityOpTime: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n appliedOpTime: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n durableOpTime: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n lastAppliedWallTime: ISODate(\"2023-02-15T12:39:13.254Z\"),\n lastDurableWallTime: ISODate(\"2023-02-15T12:39:13.254Z\")\n },\n lastStableRecoveryTimestamp: Timestamp({ t: 1676464723, i: 1 }),\n electionCandidateMetrics: {\n lastElectionReason: 'electionTimeout',\n lastElectionDate: ISODate(\"2023-02-15T11:54:12.530Z\"),\n electionTerm: Long(\"7\"),\n lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 0, i: 0 }), t: Long(\"-1\") },\n lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1676461982, i: 1 }), t: Long(\"6\") },\n numVotesNeeded: 2,\n priorityAtElection: 1,\n electionTimeoutMillis: Long(\"10000\"),\n numCatchUpOps: Long(\"0\"),\n newTermStartDate: ISODate(\"2023-02-15T11:54:12.584Z\"),\n wMajorityWriteAvailabilityDate: ISODate(\"2023-02-15T11:54:12.999Z\")\n },\n members: [\n {\n _id: 0,\n name: 'mongo1:27017',\n health: 1,\n state: 1,\n stateStr: 'PRIMARY',\n uptime: 2732,\n optime: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n optimeDate: ISODate(\"2023-02-15T12:39:13.000Z\"),\n lastAppliedWallTime: ISODate(\"2023-02-15T12:39:13.254Z\"),\n lastDurableWallTime: ISODate(\"2023-02-15T12:39:13.254Z\"),\n syncSourceHost: '',\n syncSourceId: -1,\n infoMessage: '',\n electionTime: Timestamp({ t: 1676462052, i: 1 }),\n electionDate: ISODate(\"2023-02-15T11:54:12.000Z\"),\n configVersion: 1,\n configTerm: 7,\n self: true,\n lastHeartbeatMessage: ''\n },\n {\n _id: 1,\n name: 'mongo2:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 2719,\n optime: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n optimeDurable: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n optimeDate: ISODate(\"2023-02-15T12:39:13.000Z\"),\n optimeDurableDate: ISODate(\"2023-02-15T12:39:13.000Z\"),\n lastAppliedWallTime: ISODate(\"2023-02-15T12:39:13.254Z\"),\n lastDurableWallTime: ISODate(\"2023-02-15T12:39:13.254Z\"),\n lastHeartbeat: ISODate(\"2023-02-15T12:39:21.153Z\"),\n lastHeartbeatRecv: ISODate(\"2023-02-15T12:39:21.828Z\"),\n pingMs: Long(\"0\"),\n lastHeartbeatMessage: '',\n syncSourceHost: 'mongo1:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 7\n },\n {\n _id: 2,\n name: 'mongo3:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 2707,\n optime: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n optimeDurable: { ts: Timestamp({ t: 1676464753, i: 1 }), t: Long(\"7\") },\n optimeDate: ISODate(\"2023-02-15T12:39:13.000Z\"),\n optimeDurableDate: ISODate(\"2023-02-15T12:39:13.000Z\"),\n lastAppliedWallTime: ISODate(\"2023-02-15T12:39:13.254Z\"),\n lastDurableWallTime: ISODate(\"2023-02-15T12:39:13.254Z\"),\n lastHeartbeat: ISODate(\"2023-02-15T12:39:21.153Z\"),\n lastHeartbeatRecv: ISODate(\"2023-02-15T12:39:21.342Z\"),\n pingMs: Long(\"0\"),\n lastHeartbeatMessage: '',\n syncSourceHost: 'mongo2:27017',\n syncSourceId: 1,\n infoMessage: '',\n configVersion: 1,\n configTerm: 7\n }\n ],\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1676464753, i: 1 }),\n signature: {\n hash: Binary(Buffer.from(\"0000000000000000000000000000000000000000\", \"hex\"), 0),\n keyId: Long(\"0\")\n }\n },\n operationTime: Timestamp({ t: 1676464753, i: 1 })\n}\n\nCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n569f32314512 mongo:latest \"docker-entrypoint.s…\" 24 minutes ago Up 24 minutes 0.0.0.0:27003->27017/tcp mongo3\n1efd0830a5c5 mongo:latest \"docker-entrypoint.s…\" 24 minutes ago Up 24 minutes 0.0.0.0:27002->27017/tcp mongo2\n2f4ef16a31b9 mongo:latest \"docker-entrypoint.s…\" 24 minutes ago Up 24 minutes 0.0.0.0:27001->27017/tcp mongo1\n2023-02-15 21:00:33 \n{\n \"t\": { \n \"$date\" : \"2023-02-15T12:00:33.824+00:00\"\n },\n \"s\":\"I\", \n \"c\":\"NETWORK\", \n \"id\":22943, \n \"ctx\":\"listener\",\n \"msg\":\"Connection accepted\",\n \"attr\": {\n \"remote\":\"172.19.0.1:48010\",\n \"uuid\":\"c54cf73c-b5c6-480b-b6e0-1cc2caaa1298\",\n \"connectionId\":29,\n \"connectionCount\":13\n }\n\n}\n2023-02-15 21:00:33 \n{\n \"t\": {\n \"$date\":\"2023-02-15T12:00:33.834+00:00\"\n },\n \"s\":\"I\", \n \"c\":\"NETWORK\", \n \"id\":51800, \n \"ctx\":\"conn29\",\n \"msg\":\"client metadata\",\n \"attr\": {\n \"remote\":\"172.19.0.1:48010\",\n \"client\":\"conn29\",\n \"doc\":{\n \"driver\":{\n \"name\":\"nodejs|Mongoose\",\n \"version\":\"4.12.1\"\n },\n \"os\": { \n \"type\":\"Darwin\",\n \"name\":\"darwin\",\n \"architecture\":\"arm64\",\n \"version\":\"22.1.0\"\n },\n \"platform\":\"Node.js v18.13.0, LE (unified)\",\n \"version\":\"4.12.1|6.8.3\"\n }\n }\n}\n\n2023-02-15 21:00:33 \n{\n \"t\": {\n \"$date\":\"2023-02-15T12:00:33.842+00:00\"\n },\n \"s\":\"I\", \n \"c\":\"NETWORK\", \n \"id\":22944, \n \"ctx\":\"conn29\",\n \"msg\":\"Connection ended\",\n \"attr\":{\n \"remote\":\"172.19.0.1:48010\",\n \"uuid\":\"c54cf73c-b5c6-480b-b6e0-1cc2caaa1298\",\n \"connectionId\":29,\n \"connectionCount\":12\n }\n}\n", "text": "I made three dockerized mongo container, one primary and two secodaries. But, I can’t connect to the DBs. I get Server Selection Error either “server selection timed out” or “getaddrinfo ENOTFOUND”.I’m pretty much sure that replica set is fine, because when I use the connection string like this,“mongodb://localhost:27001/MeshedDev?directConnection=true&replicaSet=myReplicaSet”this works. However, when I restart the server and if the primary replica has been changed, this connection string will fail unless I change the port number manually.So, ideally, I want to list the members so that the server can find the primary by itself. I tried some different formats of connection string according to connection formatting doc of yours and some other posts I found about similar issues, but none of them worked for me.I feel, I’m missing something with docker, finding the right host address, but cannot figure it out.Any help with connection string uri please? Below are some logs", "username": "Sewook_Kim" }, { "code": " servers: Map(3) {\n 'mongo1:27017' => [ServerDescription],\n 'mongo2:27017' => [ServerDescription],\n 'mongo3:27017' => [ServerDescription]\n },\n stale: false,\n compatible: true,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n setName: 'myReplicaSet',\nmongodb://mongo1:27017,mongo2:27017,mongo3:27017/MeshedDev?replicaSet=myReplicaSet\n", "text": "The only connection string that corresponds to the following replica set configuration:isPlease share what you get when you use it.", "username": "steevej" }, { "code": "MongooseServerSelectionError: Server selection timed out after 30000 ms\n at NativeConnection.Connection.openUri (/Users/me/Meshed/server/node_modules/mongoose/lib/connection.js:825:32)\n at /Users/me/Meshed/server/node_modules/mongoose/lib/index.js:409:10\n at /Users/me/Meshed/server/node_modules/mongoose/lib/helpers/promiseOrCallback.js:41:5\n at new Promise (<anonymous>)\n at promiseOrCallback (/Users/me/Meshed/server/node_modules/mongoose/lib/helpers/promiseOrCallback.js:40:10)\n at Mongoose._promiseOrCallback (/Users/me/Meshed/server/node_modules/mongoose/lib/index.js:1262:10)\n at Mongoose.connect (/Users/me/Meshed/server/node_modules/mongoose/lib/index.js:408:20)\n at mongoConnect (/Users/me/Meshed/server/src/database/mongo/connection.ts:8:12)\n at Object.<anonymous> (/Users/me/Meshed/server/src/app.ts:35:13)\n at Module._compile (node:internal/modules/cjs/loader:1218:14) {\n reason: TopologyDescription {\n type: 'ReplicaSetNoPrimary',\n servers: Map(3) {\n 'mongo1:27017' => [ServerDescription],\n 'mongo2:27017' => [ServerDescription],\n 'mongo3:27017' => [ServerDescription]\n },\n stale: false,\n compatible: true,\n heartbeatFrequencyMS: 10000,\n localThresholdMS: 15,\n setName: 'myReplicaSet',\n maxElectionId: null,\n maxSetVersion: null,\n commonWireVersion: 0,\n logicalSessionTimeoutMinutes: null\n },\n code: undefined\n}\n", "text": "Thanks for your reply.I still get timed out error.", "username": "Sewook_Kim" }, { "code": "type: 'ReplicaSetNoPrimary', name: 'mongo1:27017',\n health: 1,\n state: 1,\n stateStr: 'PRIMARY',\n", "text": "It looks like your replica set is not configured correctly.type: 'ReplicaSetNoPrimary',Connect with the same connection string but with mongosh or Compass. It is funny that mongoose report ReplicatSetNoPrimary yet you also sharedwhich indicates that mongo1 is PRIMARY.Note that running all the nodes of a replica set on the same hardware is not really safe. If this hardware fails you lose your data.", "username": "steevej" }, { "code": "\"mongodb://localhost:27001/MeshedDev?directConnection=true&replicaSet=myReplicaSet\"\"mongodb://localhost:27001,localhost:27002,localhost:27003/MeshedDev?replicaSet=myReplicaSet\"", "text": "Note that running all the nodes of a replica set on the same hardware is not really safe. If this hardware fails you lose your data.First, thanks for your kind advice. I’m aware of it and will have separate nodes in separate instances for actual deployment. Your connection string works well with mongosh. This might be an awkward question, but how am I suppose to access the DB from my application then? I still can’t connect the DB from my application with any connection string, nor send a query without server connected to the DB.By the way, my server is not in the docker and just running in local. I just realized that this might confused you and is the reason why container names weren’t working. Still, it’s weird because I can connect to DBs and send transactions with\n\"mongodb://localhost:27001/MeshedDev?directConnection=true&replicaSet=myReplicaSet\" but not with any other listed form of connection string such as \"mongodb://localhost:27001,localhost:27002,localhost:27003/MeshedDev?replicaSet=myReplicaSet\" which I think should work.", "username": "Sewook_Kim" }, { "code": "let opt = {poolSize:1, useNewUrlParser: true, useUnifiedTopology: true}\nlet conn = await MongoClient.connect(\n 'mongodb://username:[email protected]:27017/test?authSource=admin&replicaSet=rs0',\n opt)\n", "text": "I just found this post(Mongoose unable to connect with Mongo Standalone replicaSet - #2 by kevinadi) suggesting some options to try out.However, this makes error “saying option poolSize is not supported” and neither can I find other properties in MongoClientOptions in mongodb.d.ts.I thought this was because the post was using MongoDB 5.0 when I’m using 6.0, but couldn’t find any reference to those options above in 5.0 doc(https://www.mongodb.com/docs/v5.0/reference/connection-string/#connection-options).Does anyone know what happened since last summer?", "username": "Sewook_Kim" }, { "code": "", "text": "I’m getting the following error on my website. It’s a new website.{“status”: false, “message”: \"MongoServerSelectionError:connect EADDRNOTAVAIL 127.0.0.1:27017 - Local (127.0.0.1:0) In at Timeout. onTimeout (/var/www/CertificationPlannerNode/node modules/mongo db/lib/sdam/topology.is:292:38)In at listOnTimeout (internal/timers.js:554:17) Inat processTimers(internal/timers.js:497:7)\", “data”:", "username": "Nomad_Family" }, { "code": "", "text": "Please do not pollute an existing thread with a question you already posted elsewhere.Thanks", "username": "steevej" }, { "code": "MongooseServerSelectionError\"mongodb://localhost:27001/MeshedDev?directConnection=true0.0.0.0:27001->27017/tcp ", "text": "Your connection string works well with mongosh.This confirms that your replica set is alive and configured correctly.Is your application running on the same machine as the one you used to connect with mongosh?Since the error isMongooseServerSelectionErrorI suspect that the mongoose version you are using is incompatible with the node driver. Can you try a different version, try with a newer otherwise try an older one.Still, it’s weird because I can connect to DBs and send transactions with\n\"mongodb://localhost:27001/MeshedDev?directConnection=trueIt is not really weird because you are specifying directConnection=true and you are doing ports forwarding0.0.0.0:27001->27017/tcp With directConnection=true, the driver does not use the replica set configuration, it connects directly.", "username": "steevej" }, { "code": "", "text": "The thread Access mongodb via External IP - #3 by Stennie should be of interest to you.", "username": "steevej" } ]
ServerSelectionError when connecting dockerized replica set
2023-02-15T12:59:51.240Z
ServerSelectionError when connecting dockerized replica set
1,873
null
[ "aggregation", "dot-net" ]
[ { "code": "// Search definition (analyzer provides mql as information message)\nvar searchTitle = Builders<Movie>.Search.Wildcard(p => p.Title, \"Green D*\");\n\n// MQL is displayed for 'searchTitle' variable\nmoviesCollection.Aggregate().Search(searchTitle)\nDefaultLinqVersion", "text": "This is the general availability release for the 1.2.0 version of the analyzer.The main new features in 1.2.0 include:Default LINQ provider version is set automatically to V2 in 2.18.0 and lower versions and to V3 in 2.19.0 and higher versions.\nFor more information see DefaultLinqVersion option in Analyzer configuration.Main bug fixes in 1.2.0 include:The full list of JIRA issues resolved in this release is available here.", "username": "Boris_Dogadov" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB .NET Analyzer 1.2.0 Released
2023-02-15T23:37:30.674Z
MongoDB .NET Analyzer 1.2.0 Released
1,029
null
[ "replication" ]
[ { "code": "node1_ip,node2_ip,node_ip/?replicaSet=dev-mongo-cluster", "text": "I have a replication set which is configured using private IPs, and we are able to access inside the VPC. All are fine,But when I tried to access via the Public IP of the replica set like node1_ip,node2_ip,node_ip/?replicaSet=dev-mongo-cluster then it is not working. There is network level issues(the port is opened to our IP address)But If I try to access a single node using Public IP without mentioning the replica set then it is working.\nAny idea how to resolve this?", "username": "Bhuvanesh_R" }, { "code": "", "text": "The IP addresses used to configure a replica set must be accessible from all the members of the replica set and all the clients connecting to the replica set.When a client connect to a replica set it reads the replica set configuration and establish connections to all members.When a client connect directly to a single node, the replica set configuration is not considered.", "username": "steevej" }, { "code": "", "text": "Hi @Bhuvanesh_R ,As @steevej mentioned, with a replica set connection clients use the hostnames listed in the replica set configuration per the Server Discovery and Monitoring (SDAM) specification implemented by official MongoDB drivers.The rationale for this behaviour is to support failover and zero-downtime reconfiguration, which are key features of a replica set connection. Establishing a connection to a single replica set member (aka a “direct connection”) does not include failover or discovery of the replica set configuration, so will work if you are port forwarding via a different inbound hostname/IP/port combination.If you want to establish a remote connection to a replica set hosted on a private network and take advantage of failover and automatic reconfiguration, the most straightforward approach would be to have your clients connect using via the VPC or a VPN.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Thanks for the 2 links.", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Access mongodb via External IP
2023-02-11T12:30:54.691Z
Access mongodb via External IP
1,613
null
[ "aggregation" ]
[ { "code": " {\n \"BilledTo\": {\n \"Address\": {...},\n \"Name\": \"Steve's Rad Brokerage\"\n },\n ... inconsequential fields ...\n \"Payments\": [\n {\n \"AmountBillable\": 16630.0,\n \"AmountReceivable\": 13719.75,\n \"AmountReceived\": 0,\n \"BillDate\": \"2022-02-01\",\n \"DueDate\": \"2022-02-16\",\n \"Id\": \"an id\",\n \"Notes\": null\n }\n ]\n }\n{\n \"Steve's Rad Brokerage\": [\n A payment object,\n Another payment object\n ],\n \"Some Other BilledTo.Name\": [\n Yet another payment object\n ]\n}\n", "text": "Hi all,\nI’m having trouble with an easy to explain but thus far difficult to achieve (for me) aggregation task.\nI have a Bills collection that looks like this:What I’d like to do is group all the payments with BillDate = some date by the BilledTo.Name.\nFor example for 2022-02-01 I might get something like:I have gotten a good amount of the way there with unwind and a simple match, but getting the resulting payments to group by BilledTo.Name is turning my brain inside out.Any help would be much appreciated. Thanks!", "username": "Steve_Discenza" }, { "code": "match = { \"$match\" : {\n \"Payments.BillDate\" : \"2022-01-01\"\n} }\nfilter = { \"$set\" : {\n \"Payments\" : { \"$filter\" : {\n \"input\" : \"$Payments\" ,\n \"as\" : \"payment\" ,\n \"cond\" : { \"$eq\" : [ \"$$payment.BillDate\" , \"2022-02-01\" ] }\n } }\n} }\n{ \"$unwind\" : \"$Payments\" }\n{ \"$group\" : {\n \"_id\" : \"$BilledTo.Name\" ,\n \"Payments\" : { \"$push\" : \"$Payments\" } \n} }\n", "text": "My approach would be:A match stage forBillDate = some dateThe I would filter Payments withThe I would unwind the filtered Payments array withThe data is now ready for grouping using:You might need a final $project to produce the exact format you wish.I recommend you store your dates using the Date data type. It takes less spaces, it is faster and there is rich date specific API.", "username": "steevej" }, { "code": "", "text": "Thanks @steevej! This works great and has provided a nice framework to do a few fancier things with the data. FYI I am using the native date type, just easier to show as a string here ", "username": "Steve_Discenza" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Aggregating child objects within an array by root level data
2023-02-14T22:45:22.714Z
Aggregating child objects within an array by root level data
656
null
[ "queries", "crud" ]
[ { "code": "[\n {\n _id: ObjectId(\"63ea2e2e376ea74c07e96205\"),\n sections: [\n {\n questionsIds: [\n \"mg7y0aluqkyui487\",\n \"nonmatchingvaluetoremove\"\n ],\n _id: ObjectId(\"63ea566e7dc0b332cb34c4b1\")\n }\n ],\n questions: [\n {\n value: \"mg7y0aluqkyui487\",\n _id: ObjectId(\"63ea2e28376ea74c07e961e8\"),\n },\n {\n value: \"00fbk5o57p9le49q\",\n _id: ObjectId(\"63ea2e28376ea74c07e961e9\"),\n }\n ],\n }\n]\nQuestionnaire.updateMany(\n\t{\n\t\tsections: { $exists: true, $ne: [] },\n\t\tquestions: { $exists: true, $ne: [] },\n\t},\n\t{\n\t\t$pull: {\n\t\t\t'sections.$[].questionsIds': {\n\t\t\t\t$nin: '$questions.id',\n\t\t\t},\n\t\t},\n\t},\n\t{ multi: true, timestamps: false }\n)\n\t.exec();\n", "text": "Hey thereHere Is some data from a collection named Questionnaire:I am trying to remove from questionsIds only the values that are not exact matching the property “value” from the object array questions.To do so i tried this:What happens is that all the elements are being removed from questionsIds, the matching and the non matching elements… Only the non matching ones should be removed.Any suggestions would be gladly welcome.Thanks", "username": "isaacHaxATon" }, { "code": "", "text": "Here is a playground doing what i want to achieve, I need to find a way to pass $questions.value instead of hardcoded ones: Mongo playground", "username": "isaacHaxATon" }, { "code": "filter = { \"$filter\" : {\n \"input\" : \"$$section.questionsIds\" ,\n \"as\" : \"id\" ,\n \"cond\" : { \"$in\" : [ \"$$id\" , \"$questions.value\" ] }\n} }\nmap = { \"$map\" : {\n \"input\" : \"$sections\" ,\n \"as\" : \"section\" ,\n \"in\" : { \"$mergeObjects\" : [\n \"$$section\" ,\n { \"questionsIds\" : filter }\n ] }\n} }\nQuestionnaire.updateMany(\n your_existing_query ,\n [ { \"$set\" : { \"sections\" : map } } ]\n)\nQuestionnaire.aggregate( [ { \"$set\" : { \"sections\" : map } } ] )\n", "text": "Using an aggregation pipeline with a $set in your updateOne you may do it with:Test doing an aggregation first before updating. Try firstand verify you get what you want.", "username": "steevej" } ]
Remove array elements based on values from another array located in the same model
2023-02-13T20:14:33.507Z
Remove array elements based on values from another array located in the same model
1,148
https://www.mongodb.com/…_2_1024x952.jpeg
[ "node-js", "data-modeling", "mongodb-shell", "server", "installation" ]
[ { "code": "", "text": "I have mongo correctly installed but and the services are up and running but I cannot get into the mongo shell and the mongo command is not found\nScreenshot 2023-02-13 at 10.00.37 PM1920×1786 175 KB\nMy configuration file and installation process are here:-", "username": "Shahima_khan" }, { "code": "mongomongoshmongomongosh", "text": "The legacy mongo shell no longer ships with MongoDB. The new shell is mongosh, which from your screenshot I see you tried to use so I assume you have it installed.\nAnything you would have done with mongo, now you should be able to do with mongosh.", "username": "Massimiliano_Marcon" }, { "code": "mongosh", "text": "@Massimiliano_Marcon Hi!\nbut the mongosh command isn’t working as well. What can I do to solve this issue?Thank You for your attention to my problem!\nRegards.", "username": "Shahima_khan" }, { "code": "mongosh", "text": "part of your installation seems broken and your OS does not let you run mongosh. try uninstalling then reinstalling it.another possible reason would be the cpu architecture difference of your mac and the installation, namely being x64 versus arm64. so check which should you install.", "username": "Yilmaz_Durmaz" }, { "code": "", "text": "@Yilmaz_Durmaz\nI have a Mac M1 that is arm64 based architecture but regardless of that i have used “Homebrew” to install MongoDB Community and It automatically installs the version best suited for the mac.\nPlus when i manaully tried to do it I installed MongoDB , arm64 based package but it did not work doing that as well.But ill try uninstalling then reinstalling it.", "username": "Shahima_khan" }, { "code": "", "text": "@Yilmaz_Durmaz I reinstalled it but I’m still getting the same issue.", "username": "Shahima_khan" }, { "code": "", "text": "Since the process is killed after t starts, I thought it would be broken, but instead, it might be a security setting preventing it to run.check the “note” part few lines below this section of the installation guide: https://www.mongodb.com/docs/manual/tutorial/install-mongodb-on-os-x/#connect-and-use-mongodb", "username": "Yilmaz_Durmaz" }, { "code": "mongoshmongosh", "text": "I just tried on my M1 following the instructions here: https://www.mongodb.com/docs/manual/tutorial/install-mongodb-on-os-x/. mongosh is installed and works well.Can you clarify what it means that mongosh is not working? It doesn’t start?", "username": "Massimiliano_Marcon" }, { "code": "mongosh", "text": "she (he?) supplied only a screenshot, yet apparently mongosh starts but is “killed” by the OS before it actually runs (even printing the version) for some reason. Does anything come to your mind?", "username": "Yilmaz_Durmaz" }, { "code": "$HOME/.mongodb/mongosh", "text": "Not really. It’s worth checking in $HOME/.mongodb/mongosh if there are any log files.", "username": "Massimiliano_Marcon" }, { "code": ".zshrc", "text": "There were two main reasons why I was getting this error.1st there was a typo in the export path of brew in .zshrc file. Due to which I could not run even some node commands like nodemon.2nd I had to download Mongosh separately.Now everything works well.", "username": "Shahima_khan" }, { "code": "", "text": "@Yilmaz_Durmaz @Massimiliano_Marcon Thank you so much for your valuable time on my problem.Regards!", "username": "Shahima_khan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongosh ─╯ [1] 13127 killed mongosh. mongo ─╯ zsh: command not found: mongo
2023-02-13T16:27:41.922Z
Mongosh ─╯ [1] 13127 killed mongosh. mongo ─╯ zsh: command not found: mongo
5,586
null
[ "queries", "node-js" ]
[ { "code": "class Parameters extends Realm.Object<Parameters> {\n param_id?: string;\n param_value?: string;\n param_name?: string;\n\n static primaryKey = 'param_id';\n\n constructor(realm: Realm) {\n super(realm, {});\n }\n}\nclass Tags extends Realm.Object<Tags> {\n tag_name?: string;\n\n constructor(realm: Realm) {\n super(realm, {});\n }\n}\nclass Contacts extends Realm.Object<Contacts> {\n id?: string;\n whatsapp_availability?: string;\n projectId?: string;\n is_subscribed?: boolean;\n candidate_id?: string;\n contact_name?: string;\n phoneNumber?: string;\n created_at?: Date;\n tags?: Realm.List<Tags> | [];\n parameters?: Realm.List<Parameters> | [];\n\n static primaryKey = 'id';\n\n constructor(realm: Realm) {\n super(realm, {});\n }\n}\n\nrealm.write(() => {\n realm.delete(realm.objects('Parameters'))\n const contact = realm.create(\n 'Contacts',\n {\n whatsapp_availability: props.whatsapp_availability,\n projectId: props.projectId,\n id: props.id,\n is_subscribed: props.is_subscribed,\n candidate_id: props.candidate_id,\n contact_name: props.contact_name,\n phoneNumber: props.phoneNumber,\n created_at: new Date(props.created_at),\n parameters: props?.parameters,\n tags: props?.tags,\n },\n 'modified',\n );\n", "text": "I have contacts array with nested objects.When i try to insert to contacts table its not automatically getting inserted to parameters array useing below fn.ALso please guide me on how to retrieve the contacts list along with respective parameters, tags arrayThanks", "username": "Jose_K_J" }, { "code": "export const BookSchema = {\n name:'Book',\n primaryKey: 'id',\n properties: {\n id: 'string',\n name: 'string',\n tags: 'Tags[]',\n },\n};\n\nexport const TagSchema = {\n name:'Tags',\n primaryKey: 'id',\n properties: {\n id: 'string',\n label: 'string',\n },\n};\n// create\n{\n id: 'Tags-1676464592406',\n label: 'cartoon'\n}\n// create\n{ \n id: 'Books-1676464592406',\n label: 'web',\n tags: [{\n id: 'Tags-1676464592406',\n label: 'cartoon'\n}\n ]\n}\n Attempting to create an object of type 'Tags' with an existing primary key value ''Tags-1676464592406''.\n", "text": "I have a problemI have two realm objects:Page1 Manage tag informationA tag is added to page2.Page 2 Information of management bookWhen adding a tag to tags in page1, you will be promptedWhat method should I use to solve the problem", "username": "gao_xiaosong" } ]
Example of of storing one to many relation data
2023-02-14T05:24:41.270Z
Example of of storing one to many relation data
545
null
[]
[ { "code": "", "text": "Hello, Currently my mongodb have 4.4 version and JDK : 1.8.0_u121_12. how do i know if my application supports the mongodb upgrade 6.0 ?", "username": "Dhwani_Parikh_59684" }, { "code": "", "text": "This page has all of the drivers and the compatibility with certain MongoDB versions.", "username": "tapiocaPENGUIN" } ]
Mongodb upgarde from 4.4 to 6.0
2023-02-15T14:58:55.634Z
Mongodb upgarde from 4.4 to 6.0
541
null
[ "aggregation", "node-js", "crud", "mongoose-odm", "transactions" ]
[ { "code": "_id{\n _id: ObjectId,\n ok: Boolean,\n myArray: [\n {\n _id: ObjectId,\n user: ObjectId,\n createdAt: Date,\n }\n ]\n}\ndb.collection.findOneAndUpdate(\n { \n _id: ObjectId('63d6fd0a14953235d7880dfd'),\n 'myArray.$._id': ObjectId('63d6fbcc01ba646ee2e2b37f') // <-- Here\n },\n { $set: { ok: true } },\n {\n $sort: { 'myArray.createdAt': -1 }\n }\n);\n", "text": "Hi guys, I’m trying to update a document only if the _id of the latest element in the array is equal to the provided, considering the transaction needs to be atomic.(I’m using mongoose)Currently, I’m trying to do this:Does anyone know how to do this atomically?Thanks!", "username": "Matias_Lopez" }, { "code": "_id_id of the latest elementmyArrayok: true", "text": "Hello @Matias_Lopez ,Welcome to The MongoDB Community Forums! I notice you haven’t had a response to this topic yet - were you able to find a solution? If not then please confirm if my understanding of your use-case is correct. I believe you want to do the findOneAndUpdate() operation atomically.As per the documentation on Atomicity and TransactionsIn MongoDB, a write operation is atomic on the level of a single document, even if the operation modifies multiple embedded documents within a single document.So as you are updating just one document while using findOneAndUpdate() hence MongoDB will make sure the operation is atomic.Additionally, is your query working as expected? if not can you share below details for me to understand your use-case better?I’m trying to update a document only if the _id of the latest element in the array is equal to the providedRegards,\nTarun", "username": "Tarun_Gaur" }, { "code": "myArray_id_idDateStringNumbermyArraycreatedAtoktruemyArray", "text": "Hi @Tarun_Gaur! Yes, I confirm.I want to update the document’s root properties only if the latest element on myArray matches the provided _id. The important thing is trying to update something if the latest element of an array match with another thing, in this case, _id, but could be Date, String, Number, etc.I think the best strategy to do this is sorting myArray based on the createdAt key, for after the update stage setting the ok root property as true, but sort myArray is not a requirement.Regards!", "username": "Matias_Lopez" }, { "code": "db.collection.findOneAndUpdate(\n { \n _id: ObjectId('63d6fd0a14953235d7880dfd'),\n \"$expr\" : { \n \"$eq\" : [ { \"$last\" : \"myArray._id\" } , ObjectId(\"63d6fbcc01ba646ee2e2b37f\") ]\n } \n },\n { $set: { ok: true } },\n {\n $sort: { 'myArray.createdAt': -1 }\n }\n);\n", "text": "Something like the following should work.", "username": "steevej" } ]
Update document only if the latest element in object array match
2023-01-29T23:07:07.820Z
Update document only if the latest element in object array match
1,059
null
[ "aggregation", "queries" ]
[ { "code": "{ \n_id,\nsupplierId: { type: Schema.Types.ObjectId, ref: 'Supplier', required: true}\nUOMs: [\n {\n uomCode: { type: Schema.Types.ObjectId, ref: 'UOM', required: true },\n packSize: { type: Number, required: true },\n //other fields\n }\n// other fields\n],\n}\n{\n_id: UOM_id,\nisActual: { type: Boolean, required: true },\nwidth: { type: Number }\n//other fields\n}\nconst aggregateArray = [\n {\n $match: itemQuery\n },\n {\n $unwind: {\n path: '$UOMs'\n }\n },\n {\n $lookup: {\n from: 'inv_uoms',\n localField: 'UOMs.uomCode',\n foreignField: '_id',\n let: { id: '$UOMs._id' },\n pipeline: [\n {\n $match: {\n $expr: {\n $and: [ $eq: { ['$id', queryCode] }, $eq: { ['$isActual', true] } ]\n }\n }\n }\n ],\n as: 'UOMs.uomCode'\n }\n },\n {\n $project: {\n supplierId: 1,\n UOMs: 1\n //other fields\n }\n }\n];\n\nconst items = await Item.aggregate(aggregateArray);\n", "text": "Hi everyone! I need help with writing an aggregation query.Items collection:UOM collection:What I intend to do:\nI receive a query with supplierId in a header and a queryCode as a query parameter, and filter it using $match by supplierId. Then I want to populate uomCode field in every object in UOMs array from UOM collection, $unwind the items based on UOMs array and filter the result: select only unwound items with isActual: true and UOM_id === queryCode. I hope this is possible.My current query looks as this:Currently I receive an array of items with UOMs as an array containing one populated object but uomCode is an EMPTY array. What am I doing wrong?Thanks for your help!", "username": "Eugene_G" }, { "code": "", "text": "One error that is often made is that the query value is exchanged as string so it does not match the stored value. The latter being an ObjectId. You might simply need to create an ObjectId from queryCode.", "username": "steevej" } ]
Write an aggregation with $match and $unwind
2023-02-14T11:37:11.949Z
Write an aggregation with $match and $unwind
417
null
[ "queries" ]
[ { "code": "find().min().max().hint(index)find().min({ f1: 10000, f2: 10000 })\n .max({ f1: 100000, f2: 1000000 })\n .skip(numbers have been read)\n .hint({ f1: 1, f2: 1})\nfind().min({ f1: 10000, f2: 10000 })\n .max({ f1: 100000, f2: 1000000 })\n .skip(numbers have been read)\n .hint({ f1: 1, f2: 1})\n .sort({ f1: 1, f2: 1}) // Is this sort necessary?\n", "text": "In some scenarios, we need to use find().min().max().hint(index) to specify indexBounds for query.When the data set is relatively large, it may not be done at once. When there is no complete reading due to some errors, we need to restore the cursor and prevent repeated reading of the read data.A simple way is to record the number of records that have been read, then reopen the cursor and skip the number of records that have been read.For this to work properly though MongoDB needs to return records in the exact same order for every query.\nI would like to confirm that does cursor.hint(index) always return the data in the same order, or we need to explicitly declare a sort() to ensure the same order.", "username": "Jiabao" }, { "code": "", "text": "The only thing that guaranty order of document is sort.", "username": "steevej" } ]
Does cursor.hint(index) always return the data in the same order?
2023-02-14T17:28:37.158Z
Does cursor.hint(index) always return the data in the same order?
371
null
[ "indexes" ]
[ { "code": "findOneuserIduserAccessCodeuserIduserAccessCodedb.collection.createIndex({ userId: 1, userAccessCode: 1 }){\n userId: \"someid\" // string,\n userAccessCode: \"adcdsdfdf\" // string,\n date: 'currentDate',\n // Other fields...\n}\n", "text": "Hello there,I’m trying to optimize my MongoDB collection for queries made from a service using the findOne method. Specifically, I need to query based on both the userId and userAccessCode fields, where each user can have up to 32 associated codes. The userId and userAccessCode properties are both saved as strings in the database. So each user can have upto 32 documents.To improve query performance, I’ve created an index on these fields using the following code: db.collection.createIndex({ userId: 1, userAccessCode: 1 }). However, I haven’t noticed any performance improvements yet, and I’m wondering if there is a better type of index to use or if I’ve done everything correctly so far.Here’s an example of what a document in my collection might look like:Thanks in advance ", "username": "Ashvin_Pal" }, { "code": "db.collection.findOne({\"userId\":\"someid\", \"userAccessCode\": \"adcdsdfdf\"}).explain(\"executionStats\")\n", "text": "If you can access the mongodb shell and run the findOne query you are running you can then add on the .explain(“executionStats”) and post it here that will be the best way to figure out what’s going on during the query.If you could post the results of the explain that would be great ", "username": "tapiocaPENGUIN" }, { "code": "", "text": "I haven’t noticed any performance improvements yetCompared to what? Is your query slow? How many userId you have? What are your collection sizes?", "username": "steevej" } ]
Confused on the type of index to use
2023-02-14T18:48:51.689Z
Confused on the type of index to use
938
null
[ "data-modeling" ]
[ { "code": "", "text": "Hi there,\nI’m in the early stages of my journey so please feel free to fix my flows.I have a sale tracker app which will have users and each user can generate thousands of sale documents in a sales collection. I’ve read about data modelling options on the MongoDB blogs as much as I can understand and find a little solution but I’m not sure if it’s applicable or not. I’m going to create a new sales collection for each user and store the name of the collection in each users document. I’ll generate the collection name as {userId}_sales and store it with the user. So whenever a user logs in, the application will look for that collection and call it. My app is like a cashier, users submit their sales and track the financial performance so it’s heavy both on read and write. What do you think about this approach? Is there any better way you can suggest?", "username": "Sezen_Cetin" }, { "code": "", "text": "generallyI don’t see any issue with this approach.But since you are not explaining your query patterns, we won’t be able to know if this is best or not.", "username": "Kobe_W" }, { "code": "", "text": "Since yesterday I’ve tried to implement this perspective to my code but all ended with more failure. Right now it’s not possible for me to write a node.js driver to create a collection whenever a user creates account. The way I’ve learned, I need the name of the collection to create mongodb connection and route. It’s best if I learn to create a healthy connection before overthinking whether my database could slow down after thousand update ", "username": "Sezen_Cetin" }, { "code": "", "text": "Hey @Sezen_Cetin,Welcome to the MongoDB Community Forums! Generally, when designing a schema in MongoDB, one follows a thumb rule that “things that are queried together should stay together”. Some other things to consider given your use case:Hence, as you have noticed, it’s hard to say without more information whether the approach you described would be the best one for your use case or not. But one great thing about schema designing in MongoDB is its flexible nature. One can easily evolve their schema with time with little or almost no downtime. It would be good if you start with identifying your queries - and then based on that consider how you want to define your schema. I would suggest you try to experiment with different schema design ideas and try to simulate the expected workload, and see how the design behaves with more and more data. mgeneratejs is a great tool to create random documents for testing purposes.I am also attaching a MongoDB Blog that you can refer to MongoDB Schema Design Best Practices. You can also read about different patterns in MongoDB here: Building with Patterns. Also, if you’re new to data modelling in MongoDB, it might be worthwhile to check out our University Course: Introduction to Data ModellingPlease let us know if there are any more questions about this. Feel free to reach out for anything else as well.Regards,\nSatyam", "username": "Satyam" }, { "code": "", "text": "Massive number of CollectionsThanks a lot, these sources you refer to will respond all the struggles I’m having.", "username": "Sezen_Cetin" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Suggestion for data modelling for big heavy read-write collection
2023-02-12T13:57:17.256Z
Suggestion for data modelling for big heavy read-write collection
646
null
[ "golang" ]
[ { "code": "", "text": "In our project, we got into a problem. When the tokens collection was created it was the only type of tokens in our systems and that didn’t add any issues but now we have internal tokens and external tokens (old tokens collection) this adds confusion. We want to rename and refactor all of the token collection to external tokens and rename the collection during migration, not by hand. But it seems Golang driver of Mongo DB does not support this functionality is there a possibility to create that functionality?", "username": "Mantas_Silanskas" }, { "code": "renameCollectionrenameCollectionmydb.tokensmydb.external_tokens// renameCollection must be run against the \"admin\" database,\n// even to rename collections in other databases.\ndb := client.Database(\"admin\")\n\nerr := db.RunCommand(context.Background(), bson.D{\n\t{\"renameCollection\", \"mydb.tokens\"},\n\t{\"to\", \"mydb.external_tokens\"},\n}).Err()\nif err != nil {\n\tlog.Print(\"Error:\", err)\n}\nrenameCollection", "text": "Hey @Mantas_Silanskas welcome and thanks for the question. To run administrative commands like renameCollection with the Go driver, you can use the RunCommand method with a renameCollection command.E.g. to rename mydb.tokens to mydb.external_tokens:Check out the full documentation on the renameCollection command here:", "username": "Matt_Dale" }, { "code": "// renameCollection must be run against the \"admin\" database,\n// even to rename collections in other databases.\ndb := client.Database(\"admin\")\n", "text": "Hello @Matt_Dale and thank you for a fast and great answer.I just wanted to ask is there a way to do this without:as most of our clients won’t have access to the admin database?", "username": "Mantas_Silanskas" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Is there possible to rename collection function for golang driver?
2023-02-14T13:04:48.621Z
Is there possible to rename collection function for golang driver?
1,097
null
[]
[ { "code": "", "text": "Hi,I am executing a query from Primary and then Secondary, the order (sequence of the result is different).\nWhen I am executing a query using _id sort, then the order is same.\nI there anyway we can have same query results without doing sorting on _id field.", "username": "Aayushi_Mangal" }, { "code": "", "text": "What is your query like?", "username": "Kobe_W" }, { "code": "", "text": "Hi Kobe,these is my sample query:mongos> db.products.find().readPref(“primary”)\n{ “_id” : 10, “item” : “1”, “qty” : 20 }\n{ “_id” : 11, “item” : “2”, “qty” : 20 }\n{ “_id” : 12, “item” : “3”, “qty” : 20 }\nmongos> db.products.find().readPref(“secondary”)\n{ “_id” : 10, “item” : “1”, “qty” : 20 }\n{ “_id” : 12, “item” : “3”, “qty” : 20 }\n{ “_id” : 11, “item” : “2”, “qty” : 20 }Here orders getting changed", "username": "Aayushi_Mangal" }, { "code": "", "text": "If no sort condition is specified, natural ordering is used by mongodb.This is internal implementation so relying on this order is almost always bad practice. You should explicitly use a sort (e.g. on _id). Otherwise the order of returned docs is nondeterministic.", "username": "Kobe_W" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Reading from primary and secondary changing the order of the documents of the same query
2023-02-15T05:00:55.175Z
Reading from primary and secondary changing the order of the documents of the same query
525
null
[ "queries" ]
[ { "code": "", "text": "I inserted a record with function.test > db.testing.insertOne({name:‘testfunc’, sayhi: function(){ console.log(“hi”) } } )I would like to execute this function, but it is not working.test> db.testing.findOne({name:‘testfunc’}).sayhi()\nTypeError: db.testing.fi … unc’}).sayhi is not a functionHow can excute the Code(…)?", "username": "Marv" }, { "code": "", "text": "First time seeing this. Does mongodb support such feature?", "username": "Kobe_W" } ]
How can I execute Code(...) in a document?
2023-02-15T02:50:58.410Z
How can I execute Code(&hellip;) in a document?
644
https://www.mongodb.com/…0008274d1a11.png
[ "queries" ]
[ { "code": "", "text": "Hello Team,We have started the mongodb services with this command (mongod --dbpath path…)\n\nimage671×589 42.6 KB\nPlease find below error logs for issue while uploading data through mongodb, Please help.\nConnection fails: MongoNetworkError: failed to connect to server [localhost:27017] on first connect [MongoNetworkTimeoutError: connection timed out\nat connectionFailureError (D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\connect.js:362:14)\nat Socket. (D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\connect.js:330:16)\nat Object.onceWrapper (node:events:627:28)\nat Socket.emit (node:events:513:28)\nat Socket._onTimeout (node:net:568:8)\nat listOnTimeout (node:internal/timers:564:17)\nat process.processTimers (node:internal/timers:507:7)]\nIt will be retried for the next request.\n[2023-02-09T05:21:04.908Z] ERROR: oe-logger on DESKTOP-TP7LI63: (boot-db-models, system)\nmodelDefinition.findOne name=\" User \" Error: MongoNetworkError: failed to connect to server [localhost:27017] on first connect [Error: connect ECONNREFUSED ::1:27017\nat TCPConnectWrap.afterConnect [as oncomplete] (node:net:1487:16) {\nname: ‘MongoNetworkError’\n}]\nat Pool. (D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\topologies\\server.js:441:11)\nat Pool.emit (node:events:513:28)\nat D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\pool.js:564:14\nat D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\pool.js:1000:11\nat D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\connect.js:32:7\nat callback (D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\connect.js:300:5)\nat Socket. (D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\connect.js:330:7)\nat Object.onceWrapper (node:events:628:26)\nat Socket.emit (node:events:513:28)\nat emitErrorNT (node:internal/streams/destroy:151:8)\nat emitErrorCloseNT (node:internal/streams/destroy:116:3)\nat process.processTicksAndRejections (node:internal/process/task_queues:82:21)\n(node:852) MaxListenersExceededWarning: Possible EventEmitter memory leak detected. 17 connected listeners added to [DataSource]. Use emitter.setMaxListeners() to increase limit\n(node:852) MaxListenersExceededWarning: Possible EventEmitter memory leak detected. 17 error listeners added to [DataSource]. Use emitter.setMaxListeners() to increase limit\nUnhandled rejection MongoNetworkError: failed to connect to server [localhost:27017] on first connect [Error: connect ECONNREFUSED ::1:27017\nat TCPConnectWrap.afterConnect [as oncomplete] (node:net:1487:16) {\nname: ‘MongoNetworkError’\n}]\nat Pool. (D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\topologies\\server.js:441:11)\nat Pool.emit (node:events:513:28)\nat D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\pool.js:564:14\nat D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\pool.js:1000:11\nat D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\connect.js:32:7\nat callback (D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\connect.js:300:5)\nat Socket. (D:\\SpeedUp\\node_modules\\mongodb\\lib\\core\\connection\\connect.js:330:7)\nat Object.onceWrapper (node:events:628:26)\nat Socket.emit (node:events:513:28)\nat emitErrorNT (node:internal/streams/destroy:151:8)\nat emitErrorCloseNT (node:internal/streams/destroy:116:3)\nat process.processTicksAndRejections (node:internal/process/task_queues:82:21)We have tried every solution given on community forum.\nmongodb version : 5.0.14\nOS : windows 10Any help would be greatly appreciated.", "username": "Kratika_Dete1" }, { "code": "", "text": "If your mongod is already running as Windows service there is no need to start another mongod\nCheck whether you can connect by shell and investigate why it is failing with your app", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Thanks Ramachandra sir for your assistance,\nWe have tried in shell and tried with every possible thing still no luck, but it is working fine in mac.\nIt would be greatly appreciated if you could extend your help a little bit.", "username": "Kratika_Dete1" }, { "code": "", "text": "Normally you get connection refused if no process is actively listening on that port.\nSo make sure the mongodb process is indeed listening on local:27017.You can use something like netstat to verify that.", "username": "Kobe_W" }, { "code": "", "text": "Show us output of mongo/mongosh (depending on your mongodb version) from Windows command prompt as screenshot", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Thanks, Ramachandra\nPlease find attached screenshots.\n\nimage985×594 35.4 KB\n", "username": "Kratika_Dete1" }, { "code": "", "text": "", "username": "Kratika_Dete1" }, { "code": "", "text": "I don’t see any issue\nIn your first snapshot you are connected to mongodb\nJust run some commands like\ndb\nShow dbs\nThe command you ran in second snapshot is not correct\nEither you should use mongo or mongosh depending on what is installed/available on your machine", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Hello Team,Many thanks for you support \nIssue resolved by replacing node version with v16.14.2", "username": "Kratika_Dete1" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Connection refused ( Error: connect ECONNREFUSED ::1:27017 )
2023-02-09T06:12:01.934Z
Connection refused ( Error: connect ECONNREFUSED ::1:27017 )
2,759
null
[ "atlas-search", "atlas" ]
[ { "code": "{\n _id: 'bson_id',\n name: 'Jon',\n groups: {\n ids: [101, 104, 105]\n names: ['Sales UK', 'Head of Sales', 'UK people'],\n count: 3\n }\n}\n{\n _id: 'bson_id',\n name: 'James',\n groups: {\n ids: [234, 105, 110]\n names: ['Sales US', 'Head of Marketing', 'US people'],\n count: 3\n }\n}\n[\n {\n '$search': {\n 'compound': {\n 'filter': [\n {\n 'text': {\n 'path': 'groups.ids',\n 'query': [101, 105, 110] # not accepted\n }\n }\n ]\n }\n }\n }\n]\nRemote error from mongot :: caused by :: \"compound.filter[0].text.query[0]\" must be a string\n{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"enrolled_school_teams\": {\n \"fields\": {\n \"names\": {\n \"analyzer\": \"lucene.keyword\",\n \"searchAnalyzer\": \"lucene.keyword\",\n \"type\": \"string\"\n }\n },\n \"type\": \"document\"\n }\n }\n }\n}\n", "text": "Hi all,\nData - Consider there is a User collection having documents as follows:-There is User information with the details of groups they are part of. Eg: Jon is part of Sales UK group which has ID - 101 in Groups Collection.Questions:But it is giving errorIf I change this integer array to a string array it is working.\nSo is it not possible to query on an integer array or is it not supported in $search??It is now not showing any data for ‘Sales’. But it is also not showing any results when for ‘Sales UK’ term is searched as well. I suppose as Keyword analyzer will not work with strings having space.\nSo what could be the possible solution for this\nIs there any changes will be required in terms of indexing or in search query?", "username": "Viraj_Chheda" }, { "code": "enrolled_school_teams groups", "text": "Hey @Viraj_Chheda,I want to get users which belong to 101, 105, 110 groups.So you need to find all documents that have all these three group IDs or at least of them? The equals operator might suit your use case. Please check and let us know if this helps or if you’re looking for something else.It is now not showing any data for ‘Sales’. But it is also not showing any results when for ‘Sales UK’ term is searched as well. I suppose as Keyword analyzer will not work with strings having space.Can you please share your query and the output that you are expecting along with it? Also, I noticed that in the index definition plan you shared, it says enrolled_school_teams while in the sample document, the field name is groups. Kindly check if all field names used are correct or not.Regards,\nSatyam", "username": "Satyam" }, { "code": "enrolled_school_teams groupsgroupsenrolled_school_teams", "text": "Thanks, for the response @Satyam.So you need to find all documents that have all these three group IDs or at least of them? The equals operator might suit your use case. Please check and let us know if this helps or if you’re looking for something else.Yes, but if there are many ids then that many no.of equals block will be required right?Can you please share your query and the output that you are expecting along with it? Also, I noticed that in the index definition plan you shared, it says enrolled_school_teams while in the sample document, the field name is groups. Kindly check if all field names used are correct or not.My bad it is groups and not enrolled_school_teams\nIt is working as expected.", "username": "Viraj_Chheda" }, { "code": "valuecompoundequals", "text": "Hey @Viraj_Chheda,Yes, but if there are many ids then that many no.of equals block will be required right?The value field does not take an array of numbers. You can try using the compound with equals and see if they can help you in your use case. You can read more about these from the documentation:\nCompound\nEqualsRegards,\nSatyam", "username": "Satyam" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to query on object stored as hash in $search
2023-02-10T09:07:03.230Z
How to query on object stored as hash in $search
1,716
null
[ "spring-data-odm" ]
[ { "code": "", "text": "I am using Mongo DB as Database for my springboot application.I wanted to split the data into 2 collections -primary and auxillary just like how we do it in sql- priamary and secondary table.Can i do the above and use them in my Springboot application??", "username": "Sanjay_Naik" }, { "code": "", "text": "Hi @Sanjay_Naik and welcome back to the MongoDB community forum!!I wanted to split the data into 2 collections -primary and auxillary just like how we do it in sql- priamary and secondary table.To understand the requirement in more details, it would be helpful if you could share what are primary and auxillary table in sql? Are these two two identical tables created for fault tolerance or these are two table with data divided between them for faster retrieval.Best Regards\nAasawari", "username": "Aasawari" } ]
How to split data into 2 collections in Mongo DB and access the same in SpringBoot
2023-02-08T15:49:17.810Z
How to split data into 2 collections in Mongo DB and access the same in SpringBoot
1,135
null
[ "cluster-to-cluster-sync" ]
[ { "code": "mongosyncmongosync", "text": "Hello,I was looking for solutions to synchronise some collections from the database PRODUCTION in the cluster PRODUCTION to the database STAGING in the cluster STAGING. My research leads me to mongosync which seems to be the best solution for my case. Is it possible to exclude some collections with mongosync?Thanks in advance!", "username": "Tom_Boscher" }, { "code": "", "text": "Hi Tom,As of the most recent release of mongosync - v1.1 (current) - we support namespace filtering:https://www.mongodb.com/docs/cluster-to-cluster-sync/current/reference/collection-level-filtering/ ", "username": "Alexander_Komyagin" } ]
Exclude some collections with mongosync
2022-12-15T10:26:35.267Z
Exclude some collections with mongosync
1,832
null
[ "replication", "python", "compass" ]
[ { "code": "will not run compact on an active replica set primarydb.runCommand ( { compact: 'collection_name'} )>>> db.command({\"compact\": collection})\n...\n...\npymongo.errors.OperationFailure: will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force, full error: {'ok': 0.0, 'errmsg': 'will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force', 'operationTime': Timestamp(1676176471, 1), '$clusterTime': {'clusterTime': Timestamp(1676176471, 1), 'signature': {'hash': b'\\x7f\\xe9\\x18w\\x86\\xb41\\r[\\x8a\\x8fT;\\xe3\\x8c\\xf5\\x92\\xac\\x04\\xd1', 'keyId': 7194618855627423746}}}\n", "text": "I’m trying to run a compact command using pymongo but it returns me will not run compact on an active replica set primary even though the node I’m trying to run is a SECONDARY node. When I connect to to node, using a db client app (MongoDB Compass), I’m able to run the command without force (db.runCommand ( { compact: 'collection_name'} )) just fine. This error only returns on primary node. But how come am I getting this error on a secondary node in my pymongo?Please help me understand the root cause. This is frustrating!", "username": "Ercin_Demir" }, { "code": "directConnection=True", "text": "Hi @Ercin_Demirpymongo 4.0+ default to autodiscovery and connection to the replicaset. For these operations you want to connect directly to the Secondary. Add the directConnection=True as a connection parameter and see if it starts to work for you.", "username": "chris" }, { "code": "", "text": "This worked! Thanks very much @chris", "username": "Ercin_Demir" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can't run compact command on secondary nodes (will not run compact on an active replica set primary)
2023-02-12T14:53:50.014Z
Can&rsquo;t run compact command on secondary nodes (will not run compact on an active replica set primary)
1,658
null
[]
[ { "code": "{$concat['$','string']}{$concat['\\\\$','string']}{ $replaceOne: { input: {$concat: ['\\\\$',\"$Specification.name\"]}, find: '\\\\', replacement: '' } }", "text": "Hi,\njust an observation, I needed to add a “$” character to a string in a project clause to create some self generating code .\n{$concat['$','string']} does not work and I did not find a way to escape the $ character,\nbut did find that {$concat['\\\\$','string']} produced ‘\\$string’ and { $replaceOne: { input: {$concat: ['\\\\$',\"$Specification.name\"]}, find: '\\\\', replacement: '' } } produced the required “$string”.\nOr am I missing a simpler way of doing this?", "username": "Ido_Lelie" }, { "code": "$literal$> db.myCollection.aggregate([\n\t{ $project: {\n\t\tmyString: {\n\t\t\t$concat: [ { $literal: \"$\"}, \"string\" ]\n\t\t}\n\t}}\n])\n[ { _id: ObjectId(\"63ec193b85a71b2dc61637c7\"), myString: '$string' } ]\n", "text": "Welcome to the MongoDB community @Ido_Lelie!You can use the $literal expression to avoid evaluating values like those containing a $:Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Working with '$' as a character
2023-02-14T22:46:48.251Z
Working with &lsquo;$&rsquo; as a character
429
null
[ "flexible-sync" ]
[ { "code": "import Foundation\nimport RealmSwift\nimport Realm\n\nclass RealmManager: ObservableObject {\n private(set) var syncedRealm: Realm?\n @ObservedResults(Cycle.self, sortDescriptor: \"startDate\") var cycles\n @ObservedResults(Day.self, sortDescriptor: \"date\") var days\n let app = App(id: \"redacted\")\n var loggedInUser: User? = nil\n \n init() {\n print(Realm.Configuration.defaultConfiguration.fileURL as Any)\n Task.init {\n try await login()\n if let user = loggedInUser {\n try await openSyncedRealm(user: user)\n }\n }\n }\n \n @MainActor\n func login() async throws {\n do{\n let user = try await app.login(credentials: Credentials.anonymous)\n print(\"Successfully logged in user: \\(user) with id : \\(user.id)\")\n self.loggedInUser = user\n } catch {\n print(\"Failed to log in user: \\(error)\")\n }\n }\n \n @MainActor\n func openSyncedRealm(user: User) async throws {\n do {\n var config = user.flexibleSyncConfiguration()\n config.objectTypes = [Cycle.self, Day.self]\n let realm = try await Realm(configuration: config, downloadBeforeOpen: .never)\n let subscriptions = realm.subscriptions\n try await subscriptions.update {\n if let currentSubscription = subscriptions.first(named: \"cycles\") {\n print(\"Replacing subscription for cycles\")\n currentSubscription.updateQuery(toType: Cycle.self)\n } else {\n print(\"Appending subscription for cycles\")\n subscriptions.append(QuerySubscription<Cycle>(name: \"cycles\"))\n }\n if let currentSubscription = subscriptions.first(named: \"days\") {\n print(\"Replacing subscription for days\")\n currentSubscription.updateQuery(toType: Day.self) { day in\n day.ownerID == user.id\n }\n } else {\n print(\"Appending subscription for days\")\n subscriptions.append(QuerySubscription<Day>(name: \"days\") { day in\n day.ownerID == user.id\n })\n }\n }\n self.syncedRealm = realm\n print(\"Successfully opened realm\")\n } catch {\n print(\"Error opening realm: \\(error.localizedDescription)\")\n }\n }\n", "text": "I’m having some trouble integrating my Realm-based iOS app with Device Sync. I’ve confirmed that I’m writing to the synced realm successfully as I can see the results on the Mongo DB dashboard, but my subscriptions don’t seem to be working. cycles is always empty, even immediately after adding a cycle to the realm. Not sure if my schema or something else on the backend could be a problem but any help would be greatly appreciated as I’m at my wit’s end.syncedRealm.subscriptions.count is 2 as expected.\npermissions are set such that all users can read and write all data.", "username": "Violeta_Druga" }, { "code": "https://realm.mongodb.com/groups/${}/apps/${}", "text": "Hi, can you send the link to your app. It will look like https://realm.mongodb.com/groups/${}/apps/${}? I can see if there is anything interesting going on. Is there data in your cluster that you would expect to be send back for cycles?", "username": "Tyler_Kaye" }, { "code": "", "text": "Hi Tyler, here’s the link: App ServicesYes, in Collections under the cluster I can see cycle objects written by the client that should be getting sent back", "username": "Violeta_Druga" }, { "code": "@ObservedResults", "text": "Turns out @ObservedResults only works inside views in SwiftUI. Would have been tremendously helpful if this was mentioned in the documentation.### How frequently does the bug occur?\n\nAll the time\n\n### Description\n\nI would l…ike to put my `@ObservedResults` variable in my view model (which is an `ObservableObject`) instead of putting it directly inside the SwiftUI view. When I do this, my variable is correctly populated but the view is not updated anymore.\n\n### Stacktrace & log output\n\n_No response_\n\n### Can you reproduce the bug?\n\nYes, always\n\n### Reproduction Steps\n\nPut the `@ObservedResults` inside a view model instead of the view directly:\n```swift\n@MainActor class ViewModel: ObservableObject {\n // Implicitly use the default realm's objects(Group.self)\n @ObservedResults(Group.self) var groups\n}\n\n/// The main content view if not using Sync.\nstruct LocalOnlyContentView: View {\n // My view model\n @ObservedObject var viewModel = ViewModel()\n \n var body: some View {\n if let group = viewModel.groups.first {\n // Pass the Group objects to a view further\n // down the hierarchy\n ItemsView(group: group)\n } else {\n // For this small app, we only want one group in the realm.\n // You can expand this app to support multiple groups.\n // For now, if there is no group, add one here.\n ProgressView().onAppear {\n viewModel.$groups.append(Group())\n }\n }\n }\n}\n```\n\nYou will see that the view is not updating anymore.\n\n### Version\n\n10.24.0\n\n### What SDK flavour are you using?\n\nLocal Database only\n\n### Are you using encryption?\n\nNo, not using encryption\n\n### Platform OS and version(s)\n\niPhone 13 - iOS 15.4 - Simulator\n\n### Build environment\n\nXcode version: 13.3\nDependency manager and version: SPM", "username": "Violeta_Druga" }, { "code": "", "text": "My apologies for not getting back to you. Must have slipped through the cracks. I have just filed a documentation ticket to make this clearer.Best,\nTyler", "username": "Tyler_Kaye" }, { "code": "@ObservedResults", "text": "Hey @Violeta_Druga - sorry our docs let you down! I’m thinking about where to make this clearer, and am wondering where you encountered documentation for @ObservedResults. Was it the SwiftUI React to Changes page that was unclear, or the autogenerated API reference, or somewhere else?", "username": "Dachary_Carey" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Subscriptions to Flexible Sync not updating @ObservedResults
2023-01-24T02:49:06.615Z
Subscriptions to Flexible Sync not updating @ObservedResults
1,624
null
[ "atlas-search" ]
[ { "code": "{\n \"_id\":{\n \"$oid\":\"6368ca3fcb0c042cbc5b198a\"\n },\n \"articleid\":\"159447148\",\n \"headline\":\"News of the day!\",\n \"subtitle\":\"\",\n \"fulltext\":\"sony will partnership with Honda in making cars\"\n}\n", "text": "How can I build a query to search with multiple operators like if I need to search -Honda and car or mobile and iPhoneMongoDB Atlas and fulltext or elasticsearch and fulltextBased on the above 2 queries, and need to search in must and or needs to search in should but how will these relate with or between them? I mean if it finds Honda and car or mobile and iPhone then it needs to show both results right?How will I build a query to search for the above-mentioned situation?\nI have an index name - fulltext, where fields are headline, fulltext, and subtitle as I need to search keywords within these fields.\ndocument structure -", "username": "Utsav_Upadhyay2" }, { "code": " \"mappings\": {\n \"dynamic\": true\n }\n}\n[\n {\n \"$search\": {\n \"index\": \"default\",\n \"text\": {\n \"query\": \"string\",\n \"path\": {\n \"wildcard\": \"*\"\n }\n }\n }\n }\n]\n", "text": "Hi @Utsav_Upadhyay2 , can you share your index definition? Have you tried using an index with dynamic mapping and searching with a wildcard query path?\nIndex definition:Query:", "username": "amyjian" }, { "code": "", "text": "I already have dynamic indexing name - fulltext, my question was - how will you search with this combination belowHonda and car or mobile and iPhoneThis means either a document has text containing- Honda and car together or mobile and iPhone.", "username": "Utsav_Upadhyay2" }, { "code": "[\n {\n headline: 'News of the day!',\n subtitle: '',\n fulltext: 'sony will partnership with Honda in making cars'\n },\n {\n headline: 'News of the day!',\n subtitle: 'mobile iPhones test',\n fulltext: 'this is just random text '\n },\n/// the one document below this contains only honda and mobile\n {\n headline: 'nothing',\n subtitle: 'mobile only',\n fulltext: 'honda only'\n },\n {\n headline: 'cars here',\n subtitle: 'honda here',\n fulltext: 'test only'\n },\n {\n headline: 'iphones here',\n subtitle: 'mobile here',\n fulltext: 'test 2 only'\n }\n]\n$search {\n '$search': {\n index: 'compoundindex',\n compound: {\n should: [\n {\n compound: {\n must: [\n { text: { query: 'honda', path: { wildcard: '*' } } },\n { text: { query: 'cars', path: { wildcard: '*' } } }\n ]\n }\n },\n {\n compound: {\n must: [\n { text: { query: 'iphones', path: { wildcard: '*' } } },\n { text: { query: 'mobile', path: { wildcard: '*' } } }\n ]\n }\n }\n ]\n }\n }\n }\n[\n/// The document contains \"car\" AND \"honda\"\n {\n headline: 'cars here',\n subtitle: 'honda here',\n fulltext: 'test only'\n },\n/// The document contains \"iphone\" AND \"mobile\"\n {\n headline: 'iphones here',\n subtitle: 'mobile here',\n fulltext: 'test 2 only'\n },\n/// The document contains \"car\" AND \"honda\"\n {\n headline: 'News of the day!',\n subtitle: '',\n fulltext: 'sony will partnership with Honda in making cars'\n },\n/// The document contains \"iphone\" AND \"mobile\"\n {\n headline: 'News of the day!',\n subtitle: 'mobile iPhones test',\n fulltext: 'this is just random text '\n }\n]\ncompound", "text": "Based on the above 2 queries, and need to search in must and or needs to search in should but how will these relate with or between them? I mean if it finds Honda and car or mobile and iPhone then it needs to show both results right?This means either a document has text containing- Honda and car together or mobile and iPhone.Please see the example below which I believe matches what you have described above. 5 sample documents in which 4 match the above description:The $search stage used:Output:I have only tested this on the 5 sample documents mentioned above and if you believe it suits your use case please test thoroughly and and adjust the search query accordingly to verify it meets your requirements.You can refer to the nested example as well for the compound operator.Regards,\nJason", "username": "Jason_Tran" } ]
How to perform search with multiple operators?
2023-02-10T06:12:21.349Z
How to perform search with multiple operators?
1,007
null
[ "transactions" ]
[ { "code": "", "text": "I have two collections - collection A and collection B. Collection A has a trigger which is invoked when a new record is inserted into this collection and updates a record in Collection B.By default a transaction is mongodb is scoped to a document. My question is that if trigger on collection A and associated change in collection B by this trigger will be executed as part of same transaction whenever a new document will be inserted into collection A? If not, is there any other way to achieve this without triggering a multi document transaction from client code?", "username": "shailendra" }, { "code": "", "text": "Hi @shailendra – Today all Triggers will be fired on operations after they have been fully committed to MongoDB so a Trigger’s response to an operation will always be out of scope of the original operation. I think the best way to accomplish this might be with a field that indicated whether a corresponding Trigger had acted on the document after the insertion (but as you point out a multi-documents transaction from the client should also be able to accomplish this).", "username": "Drew_DiPalma" }, { "code": "", "text": "Hi @Drew_DiPalma , Thanks for your reply.The purpose of using such a trigger is to enforce referential integrity so that parent documents in collection B can’t be deleted when one or more children documents are present in collection A. I have a plan to use a field called childrenCount in collection B and then validate that the value of this field is 0 before deleting a document in collection B. However if trigger is not running in same transaction then I have no option other than using multi documents transaction at app level.Does mongodb has any plan to support referential/data integrity in near future? If not, what are the possible ways to achieve it other than doing it at app level (where it can be too difficult to implement it if entity / object graph has complex relationships)?", "username": "shailendra" } ]
Transaction scope and triggers
2023-02-07T10:50:06.818Z
Transaction scope and triggers
971
null
[ "aggregation", "atlas-search" ]
[ { "code": "", "text": "So, We are building a Full Text Search and Filtering option for a Furniture market place. Given a search query we can have over 100 products which are returned, Is there a way we can extract all the filters which can be applied on the results without specifying them in the aggregation pipeline.For Example:\nIf I am searching for product by the name bed, I want the result to return all the beds along with all the facets which can be applied on the bed, like the Sizes, Colours, Material etc without specifying these facets before hand.", "username": "Syed_Umair" }, { "code": "facet_attributesfacet_attributes=[\"Size\", Colour\", \"Material\"]facet_attributes=[\"Length\", \"Width\", \"Height\", \"Material\", \"Colour\"]$searchMetafacet_attributes$search", "text": "One trick for this is a two pass query technique. First, adjust your documents to contain another field, say facet_attributes. This field will be an array of strings, each value representing the attributes for that particular product. A couch would have facet_attributes=[\"Size\", Colour\", \"Material\"] whereas a table document would have facet_attributes=[\"Length\", \"Width\", \"Height\", \"Material\", \"Colour\"] and so on where you’d have facet fields on each document with those respective attributes.An initial query, which could only be for facets via $searchMeta, would facet on the field facet_attributes for a given query. The facet values returned will give you the attributes represented in the subset of matching products. Using those returned values, issue a second query ($search this time) using the desired attributes returned from the first query.", "username": "Erik_Hatcher" }, { "code": "$searchMeta{\n \"pricing\": {\n \"discount\": {\n \"value\": 990,\n \"displayValue\": \"₹ 990\"\n },\n \"discountPercent\": {\n \"value\": 0.23,\n \"displayValue\": \"-23%\"\n },\n \"strikePrice\": {\n \"value\": 4289,\n \"displayValue\": \"₹ 4,289\"\n },\n \"basePrice\": {\n \"value\": 3299,\n \"displayValue\": \"₹ 3,299\"\n }\n },\n \"filters\": {\n \"collectionFilters\": [\n {\n \"collectionName\": \"living_room_buy\",\n \"collectionLabel\": \"Living Room\",\n \"collectionType\": \"CATEGORY_BUY\",\n \"collectionRank\": 26\n }\n ],\n \"attributeFilters\": [\n {\n \"attributeName\": \"primary_material\",\n \"attributeLabel\": \"Primary Material\",\n \"attributeValue\": \"Engineered Wood\",\n \"attributeRank\": 3\n },\n {\n \"attributeName\": \"finish\",\n \"attributeLabel\": \"Finish\",\n \"attributeValue\": \"Slate Grey\",\n \"attributeRank\": 4\n },\n {\n \"attributeName\": \"config_type\",\n \"attributeLabel\": \"Config type\",\n \"attributeValue\": \"Side Unit\",\n \"attributeRank\": 7\n }\n ],\n \"subCollectionFilters\": [\n {\n \"collectionName\": \"chest_of_drawers_buy\",\n \"collectionLabel\": \"Chest of Drawers\",\n \"collectionType\": \"CATEGORY_BUY\",\n \"collectionRank\": 34\n }\n ]\n }\n}\n", "text": "Let’s say I update my document to have a facet_attributes=[“Size”,“Colour”…], Now I want to fetch all the possible filter options with their values. For example Colour : Grey, Blue, Purple etc… and so on for every fields, How Can I accomplish this using the $searchMeta stageMy Current structure is like this", "username": "Syed_Umair" }, { "code": "", "text": "I think you’ll need to restructure your document layout such that each attribute/value has its own field name, such as Colour and Size. At that point you can do an initial query with faceting on the facet_attributes field to get the attributes in a result set, and then make another request to facet on all of those attributes and get their unique values. As it is, each of your attribute values are in an attributeValue field that can’t be distinguished from any other attribute.", "username": "Erik_Hatcher" }, { "code": "\"filters\" : [\n {\n \"primary_material\" : {\n \"value\" : \"Engineered Wood\",\n }, \n \"finish\" : {\n \"value\" : \"Slate Grey\",\n },\n \"config_type\" : {\n \"value\" : \"Side Unit\",\n },\n \"collection\" : {\n \"value\" : \"living_room_buy\",\n },\n \"sub_collection\" : {\n \"value\" : \"chest_of_drawers_buy\",\n }\n \n }\n]\ngroupunwindaddToSet$search$search", "text": "I could either restructure the documents like how you have laid it out , Which would look like thisOr I can just use a mongo pipeline which does things like group, unwind, addToSetIs there any significant advantage if I use the `searchMeta or can i just do it the native mongo way? Will there be a huge performance impact?I am thinking I will run two queries on runtime simultaneously , The first one will be searching for all the products and returning them and will only have the $search stage.\nThe second query which will run parallely, It will first perform a $search and then I will only project out the filters field and pass it to a mongo pipeline which will perform the group, unwind and addToSet etc to extract all the available filters.\nIs there a better approach for this?Also, I am a M20 Dedicated customer, Is there a way I can get better support?", "username": "Syed_Umair" } ]
Atlas Search to return all the applicable filters for a given search query without specifying
2023-01-03T10:27:10.360Z
Atlas Search to return all the applicable filters for a given search query without specifying
1,911
https://www.mongodb.com/…00017da6dcea.png
[ "installation" ]
[ { "code": "", "text": "Hi, I followed the steps outlined here and i even did the steps in knowledge article - but it still wont install. I tried to re-install after updating security &privacy, but i get the errors (attached pics - 1st pic asked for the updates and i fixed it! however, it always gives installation failed error). Please help!\n\n\nScreen Shot 2023-02-01 at 1.54.38 PM1280×912 123 KB\n", "username": "sowmya_swaminathan" }, { "code": "", "text": "Are you using mac os M1 or mac os M2?", "username": "linupy_chiang" }, { "code": "", "text": "pdating security &privaNo, my mac os is nor M1. See below:\n", "username": "sowmya_swaminathan" }, { "code": "", "text": "Did you try restart system or reinstall Virtualbox or other tips like spctl disable or allow all access\nCheck this link.May help though it is for lower version of Vbox", "username": "Ramachandra_Tummala" }, { "code": "", "text": "ok, i just did the spctl disable/enable via recovery mode and reinstalled, it still gave an installation failure message. but i am able to open VirtualBox application:\n\nimage1840×1090 88.7 KB\nDoes that mean that it is installed? I’m not sure!", "username": "sowmya_swaminathan" }, { "code": "", "text": "It does seem to have failed:\nI got this error when I did a vagrant up:\n\nimage1598×482 84 KB\n\nI see this error inside Virtualbox as well:\n\nimage1858×1058 81.4 KB\n", "username": "sowmya_swaminathan" }, { "code": "", "text": "Check these links.Issues with Vbox install on Macos discussed and some fixes given", "username": "Ramachandra_Tummala" }, { "code": "", "text": "ok installing latest version of VirtualBox worked for me! thanks", "username": "sowmya_swaminathan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Unable to install VirtualBox for M312
2023-02-01T18:55:18.948Z
Unable to install VirtualBox for M312
1,303
null
[ "aggregation", "queries", "transactions" ]
[ { "code": "[\n {\n \"_id\": {\n \"$binary\": {\n \"base64\": \"9JzexK9uSO241gskRnnifg==\",\n \"subType\": \"04\"\n }\n },\n \"TransactionDate\": {\n \"$date\": {\n \"$numberLong\": \"1647993600000\"\n }\n },\n \"Value\": {\n \"$numberDecimal\": \"-59.00\"\n },\n \"CategoryId\": {\n \"$binary\": {\n \"base64\": \"qT1+Iu7QQ2uguFh0I/+JUw==\",\n \"subType\": \"04\"\n }\n }\n },\n {\n \"_id\": {\n \"$binary\": {\n \"base64\": \"88+SyjhBT3+wHyg021UccA==\",\n \"subType\": \"04\"\n }\n },\n \"TransactionDate\": {\n \"$date\": {\n \"$numberLong\": \"1646438400000\"\n }\n },\n \"Value\": {\n \"$numberDecimal\": \"-9.60\"\n },\n \"CategoryId\": {\n \"$binary\": {\n \"base64\": \"zMHauQ8CSvKYFNOQ39+dwQ==\",\n \"subType\": \"04\"\n }\n }\n },\n {\n \"_id\": {\n \"$binary\": {\n \"base64\": \"wOOxv6KCT0KNpuhWaXs5Mg==\",\n \"subType\": \"04\"\n }\n },\n \"TransactionDate\": {\n \"$date\": {\n \"$numberLong\": \"1646956800000\"\n }\n },\n \"Value\": {\n \"$numberDecimal\": \"-45.00\"\n },\n \"CategoryId\": {\n \"$binary\": {\n \"base64\": \"qT1+Iu7QQ2uguFh0I/+JUw==\",\n \"subType\": \"04\"\n }\n }\n },\n {\n \"_id\": {\n \"$binary\": {\n \"base64\": \"vRQ6SgAdQCaWdUa1EtNaQg==\",\n \"subType\": \"04\"\n }\n },\n \"TransactionDate\": {\n \"$date\": {\n \"$numberLong\": \"1675123200000\"\n }\n },\n \"Value\": {\n \"$numberDecimal\": \"-18.69\"\n },\n \"CategoryId\": {\n \"$binary\": {\n \"base64\": \"seHoLjnmSjqeMqzSXk+Dpw==\",\n \"subType\": \"04\"\n }\n }\n },\n {\n \"_id\": {\n \"$binary\": {\n \"base64\": \"UPQwlqV6QvGc2NO8SHexWQ==\",\n \"subType\": \"04\"\n }\n },\n \"TransactionDate\": {\n \"$date\": {\n \"$numberLong\": \"1675123200000\"\n }\n },\n \"Value\": {\n \"$numberDecimal\": \"-15.69\"\n },\n \"CategoryId\": {\n \"$binary\": {\n \"base64\": \"seHoLjnmSjqeMqzSXk+Dpw==\",\n \"subType\": \"04\"\n }\n }\n },\n {\n \"_id\": {\n \"$binary\": {\n \"base64\": \"iEee/gYlSmKhjYi09CtoAQ==\",\n \"subType\": \"04\"\n }\n },\n \"TransactionDate\": {\n \"$date\": {\n \"$numberLong\": \"1675123200000\"\n }\n },\n \"Value\": {\n \"$numberDecimal\": \"253.81\"\n },\n \"CategoryId\": {\n \"$binary\": {\n \"base64\": \"WCDMVi7iRPKiPD6Oaqbhhw==\",\n \"subType\": \"04\"\n }\n }\n },\n {\n \"_id\": {\n \"$binary\": {\n \"base64\": \"C+O/awJWSjWhA57QCdvjhA==\",\n \"subType\": \"04\"\n }\n },\n \"TransactionDate\": {\n \"$date\": {\n \"$numberLong\": \"1675123200000\"\n }\n },\n \"Value\": {\n \"$numberDecimal\": \"-15.69\"\n },\n \"CategoryId\": {\n \"$binary\": {\n \"base64\": \"seHoLjnmSjqeMqzSXk+Dpw==\",\n \"subType\": \"04\"\n }\n }\n }\n]\n[\n {\n \"_id\": {\n \"month\": 3,\n \"year\": 2022\n }\n \"Transactions\": [\n {\n \"CategoryId\": BinData(4, \"qT1+Iu7QQ2uguFh0I/+JUw==\"),\n \"Transactions\": [\n {\n \"CategoryId\": BinData(4, \"qT1+Iu7QQ2uguFh0I/+JUw==\"),\n \"TransactionDate\": ISODate(\"2022-03-11T00:00:00Z\"),\n \"Value\": NumberDecimal(\"-45.00\"),\n \"_id\": BinData(4, \"wOOxv6KCT0KNpuhWaXs5Mg==\")\n },\n {\n \"CategoryId\": BinData(4, \"qT1+Iu7QQ2uguFh0I/+JUw==\"),\n \"TransactionDate\": ISODate(\"2022-03-23T00:00:00Z\"),\n \"Value\": NumberDecimal(\"-59.00\"),\n \"_id\": BinData(4, \"9JzexK9uSO241gskRnnifg==\")\n },\n ]\n },\n {\n \"CategoryId\": BinData(4, \"zMHauQ8CSvKYFNOQ39+dwQ==\"),\n \"Transactions\": [\n {\n \"CategoryId\": BinData(4, \"zMHauQ8CSvKYFNOQ39+dwQ==\"),\n \"TransactionDate\": ISODate(\"2022-03-05T00:00:00Z\"),\n \"Value\": NumberDecimal(\"-9.60\"),\n \"_id\": BinData(4, \"88+SyjhBT3+wHyg021UccA==\")\n }\n ]\n },\n ],\n },\n {\n \"_id\": {\n \"month\": 1,\n \"year\": 2023\n }\n \"Transactions\": [\n {\n \"CategoryId\": BinData(4, \"seHoLjnmSjqeMqzSXk+Dpw==\"),\n \"Transactions\": [\n {\n \"CategoryId\": BinData(4, \"seHoLjnmSjqeMqzSXk+Dpw==\"),\n \"TransactionDate\": ISODate(\"2023-01-31T00:00:00Z\"),\n \"Value\": NumberDecimal(\"-18.69\"),\n \"_id\": BinData(4, \"vRQ6SgAdQCaWdUa1EtNaQg==\")\n },\n {\n \"CategoryId\": BinData(4, \"seHoLjnmSjqeMqzSXk+Dpw==\"),\n \"TransactionDate\": ISODate(\"2023-01-31T00:00:00Z\"),\n \"Value\": NumberDecimal(\"-15.69\"),\n \"_id\": BinData(4, \"UPQwlqV6QvGc2NO8SHexWQ==\")\n },\n {\n \"CategoryId\": BinData(4, \"seHoLjnmSjqeMqzSXk+Dpw==\"),\n \"TransactionDate\": ISODate(\"2023-01-31T00:00:00Z\"),\n \"Value\": NumberDecimal(\"-15.69\"),\n \"_id\": BinData(4, \"C+O/awJWSjWhA57QCdvjhA==\")\n }\n ]\n },\n {\n \"CategoryId\": BinData(4, \"WCDMVi7iRPKiPD6Oaqbhhw==\"),\n \"Transactions\": [\n {\n \"CategoryId\": BinData(4, \"WCDMVi7iRPKiPD6Oaqbhhw==\"),\n \"TransactionDate\": ISODate(\"2023-01-31T00:00:00Z\"),\n \"Value\": NumberDecimal(\"253.81\"),\n \"_id\": BinData(4, \"iEee/gYlSmKhjYi09CtoAQ==\")\n },\n ]\n },\n ],\n }\n]\n $group: {\n _id: {\n year: {\n $year: \"$TransactionDate\",\n \n },\n month: {\n $month: \"$TransactionDate\",\n \n },\n \n },\n Transactions: {\n $push: \"$$ROOT\",\n }\n }\n", "text": "Hello!,I have problems with writing aggregation with multiple groupings. I have collection:Using this collection I want result to be grouped by year and month of TransactionDate and then grouped by CategoryId. I have prepared the expected result:First step is easy for me, because it’s simple group aggregation:But I don’t know, how to group inner Transactions list by CategoryId. I tried to use unwind and then group by CategoryId, but the result is different that I need.Mongo playground:Mongo playground: a simple sandbox to test and share MongoDB queries online", "username": "Mateusz_Wroblewski" }, { "code": "\"_id\" : {\n \"year\" : { \"$year\" : \"$TransactionDate\" } ,\n \"month\" : { \"$month\" : \"$TransactionDate\" } ,\n \"category\" : \"$CategoryId\"\n} ,\n\"Transactions\" : { \"$push\" : \"$$ROOT\" }\n\"_id\" : { \"year\" : \"$_id.year\" , \"month\" : \"$_id.month\" }\n\"transactions\" : { \"$push\" : {\n \"category\" : \"$_id.category\" ,\n \"transactions\" : \"$Transactions\"\n} }\n", "text": "You could try to first $group on year/month/category first withA second $group will then be used to make your year/month group with something like:", "username": "steevej" }, { "code": "", "text": "Thanks @steevej! It was simpler than I thought ", "username": "Mateusz_Wroblewski" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to group result of different group aggregation
2023-02-12T15:49:43.556Z
How to group result of different group aggregation
800
null
[ "serverless" ]
[ { "code": "", "text": "I have successfully added my Github Student promo code at my organizationWhen I try to create a serverless database and add my credit card, It rejects it saying that it has insufficient fund.I though the credit card payment part was for verification just like AWS, Google cloud right?", "username": "Ibrahim_Rouis" }, { "code": "", "text": "Hi Ibrahim,Sorry you’re experiencing this issue. Do you mind attempting to contact MongoDB Support through the support portal? I think they’ll be better able to help you since they are more knowledgable on payments. If you still have concerns please feel free to post here again or DM me!", "username": "Aiyana_McConnell" } ]
MongoDB Atlas still asking for credit card fund after adding Github Student 50$ promo code
2023-02-14T10:58:58.750Z
MongoDB Atlas still asking for credit card fund after adding Github Student 50$ promo code
1,582
null
[ "queries", "java", "crud", "performance" ]
[ { "code": "import com.mongodb.client.MongoCollection;\nimport com.mongodb.client.model.FindOneAndUpdateOptions;\nimport org.bson.Document;\n\nimport java.util.Date;\n\npublic class EventProcessor {\n private static final int LEASE_TIMEOUT_SECONDS = 60;\n\n private final MongoCollection<Document> collection;\n private final String instanceId;\n\n public EventProcessor(MongoCollection<Document> collection, String instanceId) {\n this.collection = collection;\n this.instanceId = instanceId;\n }\n\n public void processEvents() {\n while (true) {\n // Acquire a lease on the next available document\n Document document = acquireLease();\n if (document == null) {\n // No documents available for processing, sleep and try again later\n Thread.sleep(1000);\n continue;\n }\n\n try {\n // Process the document\n processDocument(document);\n } finally {\n // Release the lease on the document\n releaseLease(document);\n }\n }\n }\n\n private Document acquireLease() {\n Document updatedDocument = collection.findOneAndUpdate(\n new Document(\"locked_by\", null)\n .append(\"lease_expires_at\", new Document(\"$lt\", new Date())),\n new Document(\"$set\", new Document(\"locked_by\", instanceId)\n .append(\"lease_expires_at\", new Date(new Date().getTime() + (LEASE_TIMEOUT_SECONDS * 1000)))),\n new FindOneAndUpdateOptions()\n .returnDocument(ReturnDocument.AFTER)\n );\n return updatedDocument;\n }\n\n private void releaseLease(Document document) {\n collection.updateOne(\n new Document(\"_id\", document.getObjectId(\"_id\"))\n .append(\"locked_by\", instanceId),\n new Document(\"$set\", new Document(\"locked_by\", null)\n .append(\"lease_expires_at\", new Date()))\n );\n }\n\n private void processDocument(Document document) {\n // TODO: Implement your document processing logic here\n System.out.println(\"Processing document: \" + document.toJson());\n }\n}\n\n", "text": "We store certain events in a mongo collection that must be fetched and processed. We want to ensure each record is processed once(at least once is also acceptable) and have some parallelism while processing the documents by having multiple instances and each instance running multiple threads to process.To achieve this, I plan on using a lease-based approach with locking to ensure each record is processed only once. Another method I can think of is to create a change stream and publish events to Kafka and consume and process the Kafka events but this seems a bit too much for this. I am sharing a sample code for the lease-based approach below. In the lease-based approach, I am fetching a record that is not locked using the findOneAndUpdate method and updating the record with locked_by and lease_expires_at so other threads/processes don’t process the same message.Does using findOneAndUpdate in multiple instances with multiple threads to fetch records parallelly affect performance? Is the findOneAndUpdate thread-safe for multiple threads to call parallelly? Is it possible that two calls find the same record and update it resulting in two threads processing the same request? What type of locking is used and even though numerous threads are querying parallelly will mongo execute them sequentially due to locking? What other options can be explored in this case?Also, currently not planning on sharding the database and records will be deleted once processed successfully. Another service will be inserting new records but there won’t be any other queries on this collection apart from those mentioned above.", "username": "Reddington" }, { "code": "", "text": "Generally databases implement this kind of thing with similar/same approach. When selectAndUpdate with same condition are performed by multiple clients, only one client request can succeed, and all the others will fail. This is the guarantee by “compare and swap” , an atomic operation.Locking is definitely needed since multiple threads are trying to update the same resources, be it a database item or not. All the other threads will have to to wait until the first one to finish as all those should run atomically.In my project i was also using a lease like mechanism so that only one thread works on one particular document.", "username": "Kobe_W" }, { "code": "", "text": "What I wanted to know is if there are 10 records available that meet the query criteria and if 5 threads run the findOneAndUpdate operation at the same time, will all 5 calls succeed with each thread getting one record of the available 10 or will all 5 threads initially pick the same record and whichever threads update it first will succeed and other fail to update the record? In case the other threads fail to update the record, will they try to find if there are any other records that match the query criteria and update it or just propagate the initial failure upstream?@Kobe_W you are saying the other threads fail to update and propagate the error upstream. I guess this is kind of expected to fail and not retry. Is there any documentation for this?", "username": "Reddington" }, { "code": "", "text": "i believe the final result would be , 5 threads all succeed (assume no failure to update happens) with each successfully updating different document. However, it’s not clear if they initially try to pick up the same or will automatically pick up different records. Probably it also depends on the underlying locking type.No error should be returned just because 4 threads fail to compete with the first thread, since this is not an error . Rather it’s an expected behaviour.this explains atomic updates in mongodb, but all general purpose database are supposed to have similar/same semantics.", "username": "Kobe_W" }, { "code": "When selectAndUpdate with same condition are performed by multiple clients, only one client request can succeed, and all the others will fail", "text": "When selectAndUpdate with same condition are performed by multiple clients, only one client request can succeed, and all the others will failThis is in contrast to your example, this will be the case where only one such item exists and multiple threads try to compete on it (Failed threads will return “no such document”). If more than one item exists, then multiple thread should succeed as well.", "username": "Kobe_W" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
FindOneAndUpdate internal working, locking mechanism and impact on performance on multiple parallel calls on same collection
2023-02-13T13:33:15.430Z
FindOneAndUpdate internal working, locking mechanism and impact on performance on multiple parallel calls on same collection
1,904
null
[ "dot-net", "atlas-functions", "atlas-triggers" ]
[ { "code": "", "text": "Azure Functions don’t have a trigger for MongoDB right out of the box. Is there some custom MongoDB trigger out there that will allow for me to take advantage of Change Streams in MongoDB? Ideally, I would like to find a MongoDB trigger equivalent to the “CosmosDB” trigger in Azure Functions that takes advantage of Change Feed. If that doesn’t exist, is there some other way that I can take advantage of MongoDB change stream with Azure Functions? We use Azure Functions extensively and need some way to incorporate it with MongoDB. Specifically, we need a trigger for database changes in MongoDB. I’ve seen examples of using Azure Functions with MongoDB with a Http Trigger, but we need a trigger that makes use of the MongoDB change stream.", "username": "Sam_Lanza" }, { "code": "", "text": "Hi Sam – Have you looked at Atlas Triggers? These are essentially serverless functions attached to Change Streams and will allow you to call into an Azure Function or simply write your business logic within the function in Atlas.", "username": "Drew_DiPalma" }, { "code": "", "text": "Yeah, I’ve been looking into the Atlas Functions. That’s my backup plan. I was just hoping that there was some MongoDB trigger for Azure Functions out there somewhere to make our lives easier since we’re extensively using Azure Functions in our application.", "username": "Sam_Lanza" }, { "code": "", "text": "What is your Azure Function written in? Many mongodb drivers have change stream features. And there is one for .NET. Listening to a change stream isn’t really serverless, but it could be a nice option if you want to stay in your language. Otherwise you need to create an Atlas Trigger and function that will call your HTTP trigger for an azure function or something like that.", "username": "Lukas_deConantseszn1" }, { "code": "", "text": "It’s written in c#. I suspect I will have to use Atlas Functions since Microsoft only cares about CosmosDB triggers.", "username": "Sam_Lanza" } ]
MongoDB Trigger for Azure Functions
2023-02-11T20:33:57.088Z
MongoDB Trigger for Azure Functions
2,066
null
[ "queries", "transactions", "storage" ]
[ { "code": "", "text": "oldest timestamp: (1676311295, 1) , meta checkpoint timestamp: (1676311295, 1) base write gen: 16310\"}}\n{“t”:{“$date”:“2023-02-14T10:01:46.724+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22430, “ctx”:“WTCheckpointThread”,“msg”:“WiredTiger message”,“attr”:{“message”:“[1676349106:724557][9612:140730506107696], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1268, snapshot max: 1268 snapshot count: 0, oldest timestamp: (1676311295, 1) , meta checkpoint timestamp: (1676311295, 1) base write gen: 16310”}}\n{“t”:{“$date”:“2023-02-14T10:02:46.739+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22430, “ctx”:“WTCheckpointThread”,“msg”:“WiredTiger message”,“attr”:{“message”:“[1676349166:739466][9612:140730506107696], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1270, snapshot max: 1270 snapshot count: 0, oldest timestamp: (1676311295, 1) , meta checkpoint timestamp: (1676311295, 1) base write gen: 16310”}}\n{“t”:{“$date”:“2023-02-14T10:03:46.755+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22430, “ctx”:“WTCheckpointThread”,“msg”:“WiredTiger message”,“attr”:{“message”:“[1676349226:755064][9612:140730506107696], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1272, snapshot max: 1272 snapshot count: 0, oldest timestamp: (1676311295, 1) , meta checkpoint timestamp: (1676311295, 1) base write gen: 16310”}}\n{“t”:{“$date”:“2023-02-14T10:04:46.767+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22430, “ctx”:“WTCheckpointThread”,“msg”:“WiredTiger message”,“attr”:{“message”:“[1676349286:766988][9612:140730506107696], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1274, snapshot max: 1274 snapshot count: 0, oldest timestamp: (1676311295, 1) , meta checkpoint timestamp: (1676311295, 1) base write gen: 16310”}}\n{“t”:{“$date”:“2023-02-14T10:05:04.477+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:23315, “ctx”:“serviceShutdown”,“msg”:“got {controlCodeName} request from Windows Service Control Manager, {globalInShutdownDeprecated_already_in_shutdown_will_terminate_after_current_cmd_ends}”,“attr”:{“controlCodeName”:“SERVICE_CONTROL_STOP”,“globalInShutdownDeprecated_already_in_shutdown_will_terminate_after_current_cmd_ends”:“will terminate after current cmd ends”}}\n{“t”:{“$date”:“2023-02-14T10:05:04.479+05:30”},“s”:“I”, “c”:“REPL”, “id”:4784900, “ctx”:“serviceShutdown”,“msg”:“Stepping down the ReplicationCoordinator for shutdown”,“attr”:{“waitTimeMillis”:10000}}\n{“t”:{“$date”:“2023-02-14T10:05:04.481+05:30”},“s”:“I”, “c”:“COMMAND”, “id”:4784901, “ctx”:“serviceShutdown”,“msg”:“Shutting down the MirrorMaestro”}\n{“t”:{“$date”:“2023-02-14T10:05:04.481+05:30”},“s”:“I”, “c”:“SHARDING”, “id”:4784902, “ctx”:“serviceShutdown”,“msg”:“Shutting down the WaitForMajorityService”}\n{“t”:{“$date”:“2023-02-14T10:05:04.484+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:4784903, “ctx”:“serviceShutdown”,“msg”:“Shutting down the LogicalSessionCache”}\n{“t”:{“$date”:“2023-02-14T10:05:04.485+05:30”},“s”:“I”, “c”:“NETWORK”, “id”:20562, “ctx”:“serviceShutdown”,“msg”:“Shutdown: going to close listening sockets”}\n{“t”:{“$date”:“2023-02-14T10:05:04.486+05:30”},“s”:“I”, “c”:“NETWORK”, “id”:4784905, “ctx”:“serviceShutdown”,“msg”:“Shutting down the global connection pool”}\n{“t”:{“$date”:“2023-02-14T10:05:04.486+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:4784906, “ctx”:“serviceShutdown”,“msg”:“Shutting down the FlowControlTicketholder”}\n{“t”:{“$date”:“2023-02-14T10:05:04.486+05:30”},“s”:“I”, “c”:“-”, “id”:20520, “ctx”:“serviceShutdown”,“msg”:“Stopping further Flow Control ticket acquisitions.”}\n{“t”:{“$date”:“2023-02-14T10:05:04.487+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:4784908, “ctx”:“serviceShutdown”,“msg”:“Shutting down the PeriodicThreadToAbortExpiredTransactions”}\n{“t”:{“$date”:“2023-02-14T10:05:04.487+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:4784934, “ctx”:“serviceShutdown”,“msg”:“Shutting down the PeriodicThreadToDecreaseSnapshotHistoryCachePressure”}\n{“t”:{“$date”:“2023-02-14T10:05:04.487+05:30”},“s”:“I”, “c”:“REPL”, “id”:4784909, “ctx”:“serviceShutdown”,“msg”:“Shutting down the ReplicationCoordinator”}\n{“t”:{“$date”:“2023-02-14T10:05:04.487+05:30”},“s”:“I”, “c”:“SHARDING”, “id”:4784910, “ctx”:“serviceShutdown”,“msg”:“Shutting down the ShardingInitializationMongoD”}\n{“t”:{“$date”:“2023-02-14T10:05:04.487+05:30”},“s”:“I”, “c”:“REPL”, “id”:4784911, “ctx”:“serviceShutdown”,“msg”:“Enqueuing the ReplicationStateTransitionLock for shutdown”}\n{“t”:{“$date”:“2023-02-14T10:05:04.488+05:30”},“s”:“I”, “c”:“-”, “id”:4784912, “ctx”:“serviceShutdown”,“msg”:“Killing all operations for shutdown”}\n{“t”:{“$date”:“2023-02-14T10:05:04.488+05:30”},“s”:“I”, “c”:“-”, “id”:4695300, “ctx”:“serviceShutdown”,“msg”:“Interrupted all currently running operations”,“attr”:{“opsKilled”:3}}\n{“t”:{“$date”:“2023-02-14T10:05:04.488+05:30”},“s”:“I”, “c”:“COMMAND”, “id”:4784913, “ctx”:“serviceShutdown”,“msg”:“Shutting down all open transactions”}\n{“t”:{“$date”:“2023-02-14T10:05:04.488+05:30”},“s”:“I”, “c”:“REPL”, “id”:4784914, “ctx”:“serviceShutdown”,“msg”:“Acquiring the ReplicationStateTransitionLock for shutdown”}\n{“t”:{“$date”:“2023-02-14T10:05:04.488+05:30”},“s”:“I”, “c”:“INDEX”, “id”:4784915, “ctx”:“serviceShutdown”,“msg”:“Shutting down the IndexBuildsCoordinator”}\n{“t”:{“$date”:“2023-02-14T10:05:04.488+05:30”},“s”:“I”, “c”:“REPL”, “id”:4784916, “ctx”:“serviceShutdown”,“msg”:“Reacquiring the ReplicationStateTransitionLock for shutdown”}\n{“t”:{“$date”:“2023-02-14T10:05:04.488+05:30”},“s”:“I”, “c”:“REPL”, “id”:4784917, “ctx”:“serviceShutdown”,“msg”:“Attempting to mark clean shutdown”}\n{“t”:{“$date”:“2023-02-14T10:05:04.488+05:30”},“s”:“I”, “c”:“NETWORK”, “id”:4784918, “ctx”:“serviceShutdown”,“msg”:“Shutting down the ReplicaSetMonitor”}\n{“t”:{“$date”:“2023-02-14T10:05:04.489+05:30”},“s”:“I”, “c”:“SHARDING”, “id”:4784921, “ctx”:“serviceShutdown”,“msg”:“Shutting down the MigrationUtilExecutor”}\n{“t”:{“$date”:“2023-02-14T10:05:04.490+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:4784925, “ctx”:“serviceShutdown”,“msg”:“Shutting down free monitoring”}\n{“t”:{“$date”:“2023-02-14T10:05:04.490+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:20609, “ctx”:“serviceShutdown”,“msg”:“Shutting down free monitoring”}\n{“t”:{“$date”:“2023-02-14T10:05:04.490+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:4784927, “ctx”:“serviceShutdown”,“msg”:“Shutting down the HealthLog”}\n{“t”:{“$date”:“2023-02-14T10:05:04.491+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:4784929, “ctx”:“serviceShutdown”,“msg”:“Acquiring the global lock for shutdown”}\n{“t”:{“$date”:“2023-02-14T10:05:04.491+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:4784930, “ctx”:“serviceShutdown”,“msg”:“Shutting down the storage engine”}\n{“t”:{“$date”:“2023-02-14T10:05:04.491+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22320, “ctx”:“serviceShutdown”,“msg”:“Shutting down journal flusher thread”}\n{“t”:{“$date”:“2023-02-14T10:05:04.491+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22321, “ctx”:“serviceShutdown”,“msg”:“Finished shutting down journal flusher thread”}\n{“t”:{“$date”:“2023-02-14T10:05:04.492+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:20282, “ctx”:“serviceShutdown”,“msg”:“Deregistering all the collections”}\n{“t”:{“$date”:“2023-02-14T10:05:04.495+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22372, “ctx”:“OplogVisibilityThread”,“msg”:“Oplog visibility thread shutting down.”}\n{“t”:{“$date”:“2023-02-14T10:05:04.497+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22261, “ctx”:“serviceShutdown”,“msg”:“Timestamp monitor shutting down”}\n{“t”:{“$date”:“2023-02-14T10:05:04.498+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22317, “ctx”:“serviceShutdown”,“msg”:“WiredTigerKVEngine shutting down”}\n{“t”:{“$date”:“2023-02-14T10:05:04.498+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22318, “ctx”:“serviceShutdown”,“msg”:“Shutting down session sweeper thread”}\n{“t”:{“$date”:“2023-02-14T10:05:04.498+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22319, “ctx”:“serviceShutdown”,“msg”:“Finished shutting down session sweeper thread”}\n{“t”:{“$date”:“2023-02-14T10:05:04.498+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22322, “ctx”:“serviceShutdown”,“msg”:“Shutting down checkpoint thread”}\n{“t”:{“$date”:“2023-02-14T10:05:04.498+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22323, “ctx”:“serviceShutdown”,“msg”:“Finished shutting down checkpoint thread”}\n{“t”:{“$date”:“2023-02-14T10:05:04.498+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:4795902, “ctx”:“serviceShutdown”,“msg”:“Closing WiredTiger”,“attr”:{“closeConfig”:“leak_memory=true,”}}\n{“t”:{“$date”:“2023-02-14T10:05:04.501+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22430, “ctx”:“serviceShutdown”,“msg”:“WiredTiger message”,“attr”:{“message”:“[1676349304:501092][9612:140730506107696], close_ckpt: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 1276, snapshot max: 1276 snapshot count: 0, oldest timestamp: (1676311295, 1) , meta checkpoint timestamp: (1676311295, 1) base write gen: 16310”}}\n{“t”:{“$date”:“2023-02-14T10:05:04.570+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:4795901, “ctx”:“serviceShutdown”,“msg”:“WiredTiger closed”,“attr”:{“durationMillis”:72}}\n{“t”:{“$date”:“2023-02-14T10:05:04.571+05:30”},“s”:“I”, “c”:“STORAGE”, “id”:22281, “ctx”:“serviceShutdown”,“msg”:“shutdown: removing fs lock…”}\n{“t”:{“$date”:“2023-02-14T10:05:04.572+05:30”},“s”:“I”, “c”:“-”, “id”:4784931, “ctx”:“serviceShutdown”,“msg”:“Dropping the scope cache for shutdown”}\n{“t”:{“$date”:“2023-02-14T10:05:04.573+05:30”},“s”:“I”, “c”:“FTDC”, “id”:4784926, “ctx”:“serviceShutdown”,“msg”:“Shutting down full-time data capture”}\n{“t”:{“$date”:“2023-02-14T10:05:04.573+05:30”},“s”:“I”, “c”:“FTDC”, “id”:20626, “ctx”:“serviceShutdown”,“msg”:“Shutting down full-time diagnostic data capture”}\n{“t”:{“$date”:“2023-02-14T10:05:04.582+05:30”},“s”:“I”, “c”:“CONTROL”, “id”:20565, “ctx”:“serviceShutdown”,“msg”:“Now exiting”}", "username": "Hemanth_Perepi_1" }, { "code": "", "text": "Are you getting the same error 1503?\nCan you share your config file\nCould be some invalid param causing the issue", "username": "Ramachandra_Tummala" }, { "code": "", "text": "yes. 1503 error\n\nme1139×623 18.1 KB\n\n\nmongo error1245×623 56.8 KB\n", "username": "Hemanth_Perepi_1" }, { "code": "", "text": "For internal member authentication you need a keyfile in replica setup\nPlease check mongodb documentation and other threads in our forum", "username": "Ramachandra_Tummala" } ]
Once security enable config file mongo service not stared what's reason for mongodb 3 node replication on windows
2023-02-14T07:07:13.571Z
Once security enable config file mongo service not stared what&rsquo;s reason for mongodb 3 node replication on windows
967
null
[ "atlas-device-sync", "graphql" ]
[ { "code": "", "text": "I would like to use Atlas Device Sync in our reactJS web app. My use-case is our trading app & I would like to not manage my own frontend APIs + graphql servers.I can use the mongo APIs in the realm SDK, but using the watch() function falls under the Request pricing which is cost prohibitive. The DeviceSync pricing structure is not.Are there plans to support Device Sync in Web? What is the limitation of the browser?", "username": "Darren_Furr" }, { "code": "", "text": "We’ve been exploring what it would take to bring Device Sync to Web - the issue is that Realm is traditionally stored on disk and we don’t have access to that in a browser. We’ve been experimenting but it would be a long-term project. Your best bet now is to use the Data API, GraphQL, or HTTPS endpoints.", "username": "Ian_Ward" }, { "code": "", "text": "Device Sync doesn’t use the indexedb OR localstorage equivalent in react-native or electron?", "username": "Darren_Furr" }, { "code": "", "text": "No - for Electron and React Native we commit data on disk in the realm file format. We are exploring IndexedDB and localstorage - they are not too amenable to an object database file format but we are researching options.", "username": "Ian_Ward" }, { "code": "", "text": "OK. Makes sense. Thanks for the update.", "username": "Darren_Furr" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Device Sync: Web support?
2023-02-07T16:11:09.674Z
Device Sync: Web support?
1,253
null
[ "aggregation", "queries", "python", "crud" ]
[ { "code": "{\n\t\"_id\": {\"$oid\": \"63e1c33027033952dc767b8a\"},\n\t\"User_id\": \"[email protected]\",\n\t\"User_Name\": \"Aditya Mishra\",\n\t\"country\": [\n\t\t{\"Country_Code\": \"MY\",\"Country_Name\": \"Malaysia\",\n\t\t \"Content\": [{\"Content_Type\": \"Service6\",\"Publication\": [{\"Publication_Name\": \"ACFM\"},{\"Publication_Name\": \"MPF\"},{\"Publication_Name\": \"CAM\"}]},\n\t\t {\"Content_Type\": \"Journal\",\"Publication\": [{\"Publication_Name\": \"MLJA\"},{\"Publication_Name\": \"SHLRA\"}]},\n\t\t {\"Content_Type\": \"HAP\",\"Publication\": [{\"Publication_Name\": \"HLM\"}]},\n\t\t {\"Content_Type\": \"Cases\",\"Publication\": [{\"Publication_Name\": \"MLJ\"},{\"Publication_Name\": \"MLJU\"},{\"Publication_Name\": \"SHLR\"}]}\n\t\t ]\n\t\t},\n\t\t{\"Country_Code\": \"AU\",\"Country_Name\": \"Australia\",\n \"Content\": [{\"Content_Type\": \"Service5\",\"Publication\": [{\"Publication_Name\": \"ABCE\"},{\"Publication_Name\": \"CLSA\"},{\"Publication_Name\": \"FS\"}]},\n {\"Content_Type\": \"HAP\",\"Publication\": [{\"Publication_Name\": \"HLA\"}]},\n {\"Content_Type\": \"Cases\",\"Publication\": [{\"Publication_Name\": \"ABR\"}]},\n {\"Content_Type\": \"Periodical\",\"Publication\": [{\"Publication_Name\": \"ELBA\"},{\"Publication_Name\": \"CLNQ\"}]}\n ]\n },\n\t\t{\"Country_Code\": \"HK\",\"Country_Name\": \"HongKong\",\n \"Content\": [{\"Content_Type\": \"Service6\",\"Publication\": [{\"Publication_Name\": \"ABCE\"},{\"Publication_Name\": \"CLSA\"}]},\n {\"Content_Type\": \"HAP\",\"Publication\": [{\"Publication_Name\": \"HLA\"}]},\n {\"Content_Type\": \"Cases\",\"Publication\": [{\"Publication_Name\": \"ABR\"},{\"Publication_Name\": \"HKC\"},{\"Publication_Name\": \"HKCU\"}]},\n {\"Content_Type\": \"Periodical\",\"Publication\": [{\"Publication_Name\": \"ELBA\"},{\"Publication_Name\": \"CLNQ\"}]}\n ]\n\n\t\t}\n\n\t]\n\n}\nEmp_col.update_one({\"country.Content[0].Publication.Publication_Name\":\"MPF\"},{'$set':{\"country.Content[0].Publication.Publication_Name\":\"MRF\"}})\n", "text": "Hi mates, I am trying to update the nested array. This is the sample arrayI tried in following way, didn’t get any exception but haven’t reflected the expected changes in DB.\nI want to update the Publication_name from “MPF” to “MRF” for “Country_Code”: “MY” and “Content_Type”: “Service6”If anyone can help me out and provide me solutions, it will be really grateful", "username": "Shivam_Modi2" }, { "code": "", "text": "You need to pass arrayFilters for your Country_Code to specify which element of the country you want to update.", "username": "steevej" } ]
Update the nested array data
2023-02-10T05:35:00.068Z
Update the nested array data
685
https://www.mongodb.com/…756cd2622556.png
[ "node-js", "mongoose-odm" ]
[ { "code": "const mongoose = require('mongoose');\n\nconst userSchema = new mongoose.Schema({\n username: {\n type: String,\n },\n email: {\n type: String,\n unique: true,\n lowercase: true\n },\n password: {\n type: String,\n },\n});\n\nconst User = mongoose.model('User', userSchema);\n\nmodule.exports = User;\nconst User = require('../../models/userModel');\nconst bcrypt = require('bcryptjs');\n\nconst signup = async (req, res) => {\n try {\n const { username, email, password } = req.body;\n\n // Check if user exists\n const user = await User.exists({ email });\n if (user) {\n return res.status(409).send('This email is already taken!')\n }\n\n // Encrypt password\n const encryptedPassword = await bcrypt.hash( password, 10 );\n\n // create the user document\n const newUser = await User.create({\n username,\n email,\n password: encryptedPassword,\n })\n\n // create JWT token\n const token = 'Some token data here'\n\n res.status(201).json({\n userData: {\n username: newUser.username,\n email: newUser.email,\n token,\n }\n })\n\n } catch (error) {\n return res.status(500).send('Something went wrong, please try again')\n }\n};\n\nmodule.exports = signup;\n", "text": "I’m trying to persist data in my users’ collection that is inside a database that I created on MongoDB Atlas, the problem is every time I create a new user using my nodejs code and postman a new “test” database created with my users collection inside below is my sample code:\nmodel:signup:I attached a screenshot from MongoDB Atlas account that shows what I explained above:\n", "username": "Moez_Ben_Rebah" }, { "code": "...mongodb.net/your_db_name?retryWrites...", "text": "Probably you need to specify database name in the connection string (...mongodb.net/your_db_name?retryWrites... )", "username": "menteuse" }, { "code": "", "text": "Thank you @menteuse this is exactly what I needed to add, thank you buddy!", "username": "Moez_Ben_Rebah" }, { "code": "", "text": "Not really menteuse if you are providing a true solution. B-)", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoDB create a test database
2023-02-02T06:58:59.178Z
MongoDB create a test database
3,650
null
[ "queries" ]
[ { "code": "[\n {\n Title:\"hello\",\n keyPoints: [\n {\n text: \"Line 1\"\n },\n [\n {\n text: \"Line 2\"\n },\n {\n text: \"Line 2 version 2\"\n }\n ],\n {\n text: \"Line 3\"\n }\n ]\n }\n]\n", "text": "Hi, I’m trying to build a projection that can help me with versioning. I’ve taken guidance from Building with Patterns: The Polymorphic Pattern | MongoDB this leaves me with a document which has an array of objects, some of these objects may also be arrays. I’ve abstracted an example document below:I aspire to have an output of:Title:“hello”,\ntext: “Line 1”,\ntext: “Line 2 version 2”,\ntext: “Line 3”I’ve been trying so far (unsuccessfully with the arrayElemAt and the slice operators) I haven’t been able to test the $last operator as its not supported on my db ( I plan to upgrade).", "username": "Mi_B" }, { "code": "", "text": "unsuccessfully with the arrayElemAt and the slice operatorsPlease share what you tried because intuitively it would be the way I would go.I would have a $map on keyPoints that would check if I have an object or an array. In the case of an object the mapped value would be $$this and for array I the mapped value would be $slice with -1 position.", "username": "steevej" }, { "code": "$getField$last", "text": "HI,\nI’ve attached screenshots of the errors I got:\nScreenshot 2023-02-11 at 11.53.09820×952 192 KB\nIn the meantime I’ve upgraded from mdb:4.04 to 6.0.4.\nI’m hoping this means I can use $getField and $last in my query.", "username": "Mi_B" }, { "code": "[\n {\n Title:\"hello\",\n keyPoints: [\n {\n text: \"Line 1\"\n },\n [\n {\n text: \"Line 2\"\n },\n {\n text: \"Line 2 version 2\"\n }\n ],\n {\n text: \"Line 3\"\n }\n ]\n }\n]\ndb.collection.aggregate([\n {\n $project: {\n Title: 1,\n keyPoints: {\n $map: {\n input: \"$keyPoints\",\n in: {\n $arrayElemAt: [\n \"$$this\",\n {\n $subtract: [\n {\n $size: \"$$this\"\n },\n 1\n ]\n }\n ]\n }\n }\n }\n }\n }\n])\n", "text": "I used ChatGPT to create a mongo query to achieve this. I think it could be done neater using the operators available in 6.0+\n\nScreenshot 2023-02-11 at 15.40.201786×1860 176 KB\nsee this working mongoplayground", "username": "Mi_B" }, { "code": "{ \"$map\" : {\n \"input\" : \"$keyPoints\" ,\n in : { '$slice': [ \"$$this\" , -1 ] }\n} }\n", "text": "The document you have in your playground is different from the one you shared in your first post.The one in your playground presents an easier problem since all elements of keyPoints in an array. In your original document keyPoints had a mix of objects and arrays confirmed by your screenshot.With an array of arrays you do not have tocheck if I have an object or an arrayYou simply$map on keyPoints … the mapped value would be $slice with -1 positionThe above give the following trivial $map for your $project.As forChatGPT to create a mongo queryChatGPT 0 - steevej 2I will not tell you where I scored my other point.", "username": "steevej" } ]
Project Last element in nested array
2023-02-02T21:30:33.125Z
Project Last element in nested array
736
null
[ "queries", "next-js" ]
[ { "code": "query Products {\n products {\n category\n code\n id\n name\n sku\n }\n}\nproducts: async () => {\n return await Product.find().lean(true)\n },\n type Product {\n id: ID!\n name: String\n code: String\n sku: String\n category: String\n stores:[ProductStore]\n vendors: [ProductVendor]\n }\n\n type ProductStore {\n name: String\n price: Float\n onHand: Int\n onOrder: Float\n sales: Float\n }\n\n type ProductVendor {\n name: String\n cost: Float\n quantity: Float\n }\n", "text": "Yesterday while working on a project that includes a next.js front end and an express backend/api.I have 4 collections in my DB. I am simply trying to query the products collection and get all products. There are ~15k documents in the collection. I did not change anything to do with the query itself, just changes to the font end. One second the query was working fine, the next it was taking minutes to finish.I have no clue where it went wrong. If anyone has had this happen before let me know.here is my query:here is my resolver:here is my shcema:I went back and tested the queries in Apollo Server and ran into the same issue. All other calls to my other collections are working normally.Any help would be appreciated.Thanks", "username": "Brendan_Kersey" }, { "code": "", "text": "Found a work around by exporting all the data, then terminating the cluster. Starting a new cluster and then importing everything.This fixed it for about 12 hours, and then it happened again. None of the server code was touched. It occurred only after I ran queries to test my table.I feel like mongo is delaying the query for some reason but i cant find out why. Let me know if i can provide more info.", "username": "Brendan_Kersey" }, { "code": "", "text": "Found a work around by exporting all the data, then terminating the cluster. Starting a new cluster and then importing everything.This fixed it for about 12 hours, and then it happened again. None of the server code was touched. It occurred only after I ran queries to test my table.The above scenario make me feel like you are hitting some operational limits of shared clusters.", "username": "steevej" } ]
Query that was taking 14s now takes 13 mins
2023-02-09T15:19:47.503Z
Query that was taking 14s now takes 13 mins
1,172
null
[ "swift" ]
[ { "code": "", "text": "Hello, we need to migrate our Realm records to CoreData.Are there any good practices for doing this? We don’t have a lot of records in the Realm database, ideal thing would be completely removing Real framework and still migrating data to the CoreData.Thank you in advance.", "username": "Erekle_Meskhi" }, { "code": "", "text": "That seems fairly straight forward: read in the Realm Objects, instantiate CoreData objects from that data and then write them out?Do you have some code you’re having difficulty with or perhaps some other issue?Why move from Realm to Core Data - that may give up some functionality.", "username": "Jay" }, { "code": "", "text": "@Jay We want to remove Realm framework and migrate without it, if possible, that’s the complicated part.", "username": "Erekle_Meskhi" }, { "code": "", "text": "While describing the issue is helpful, including details about why you’re having difficulty is equally as important.Is there some code that’s not working? What’s preventing that process? What’s your coding environment, and version of XCode, Realm, Cocoapods?Are you getting an error?Also, the question is a bit broad - is the issue removing Realm or how to migrate data? You should migrate your data first and then remove Realm if that’s the case but please keep questions to one question per post so we can focus our attention on in.Provide more details and we’ll take a look.", "username": "Jay" } ]
Migrating to CoreData
2023-02-13T06:09:35.098Z
Migrating to CoreData
1,066
null
[ "queries", "java" ]
[ { "code": "BasicDBObject getQuery = new BasicDBObject();\n\t\t\tgetQuery.put(pkfield, pkvalue.longValue());\n\t\t\tFindIterable<Document> cursor = collection.find(getQuery).projection(Projections.fields(Projections.include(fieldvalue), Projections.excludeId()));\n\t\t\t\n\t\t\tIterator it = cursor.iterator();\n\t\t\tDocument doc=(Document)it.next();\n\t\t\tBigInteger rs=new BigInteger(String.valueOf(doc.get(fieldvalue)));\n", "text": "Java is taking 4000 ms to get one record where as from command prompt it took 25 ms. Why?\nMy code is;", "username": "Sridhar_v" }, { "code": "", "text": "From both JAVA and command prompt i am connecting to Atlas cluster (shared) running on AWS mumbai.Please help", "username": "Sridhar_v" }, { "code": "", "text": "Share what you do the command prompt. We need to be sure you are doing the same thing. Share the document you are querying. Share the query. Details about the size of your collections and installation are important.Note that the same query performs twice in a row may leads to different execution time if the index used in the query and/or the documents queried need to be fetch from disk.", "username": "steevej" } ]
Java is taking 4000 ms to get one record where as from command prompt it took 25 ms. Why?
2023-02-10T10:11:07.816Z
Java is taking 4000 ms to get one record where as from command prompt it took 25 ms. Why?
587
https://www.mongodb.com/…2_1019x1024.jpeg
[ "node-js" ]
[ { "code": "", "text": "My JavaScript Code is this (app.js)const MongoClient = require(‘mongodb’).MongoClient;\nconst assert = require(‘assert’);// Connection URL\nconst url = “mongodb://localhost:27017”;// Database Name\nconst dbName = ‘shopDB’;// Create a new MongoClient\nconst client = new MongoClient(url);// Use connect method to connect to the Server\nclient.connect(function(err) {\nassert.equal(null, err);\nconsole.log(“Connected successfully to server”);const db = client.db(dbName);client.close();\n});I run mongod and in other tab when i run node app.js then i am getiing this below errorCcn Any one please fix this or help me in fixing this\nMongoDB1915×1923 186 KB\nI am unable to connect to DataBase.\nPlease Help !!", "username": "Abhishek_Jain4" }, { "code": "", "text": "Either mongod is not running or you have it configured in such a way that it does not answer on the localhost loopback interface.", "username": "Jack_Woehr" }, { "code": "ECONNREFUSED ::1::27017\"mongodb://127.0.0.1:27017\"replSetReconfigc = rs.conf();\nc.members[0].host = \"127.0.0.1:27017\";\nrs.reconfig(c);\n", "text": "Hi @Abhishek_Jain4,It looks like it’s trying to connect on an IPv6 interface, but this is failing (ECONNREFUSED ::1::27017). The driver won’t fall back to IPv4 in these cases (see NODE-4678) so a quick fix is to change your connection string to \"mongodb://127.0.0.1:27017\" and reconfigure your replicaset (replSetReconfig) so the host details also reference the IP address.For example (from the shell):The above code sample assumes you only have a single replica set node. Make sure you check your config is correct before making any changes ", "username": "alexbevi" }, { "code": "", "text": "Still not able to connect. Can you help more. Please!!", "username": "Abhishek_Jain4" } ]
Can anyone fix the error why am i getting this
2023-02-13T18:48:41.919Z
Can anyone fix the error why am i getting this
772
null
[]
[ { "code": "tenantIddb.records.deleteMany({ tenantId: \"abc\" })\n", "text": "I am storing multi tenanted data in collections. Each collection has a tenantId. Sometimes, I will need to delete a tenant and that means I will need to run a query such asHowever, the number of records for a tenant in some collections can reach even half a billion records and this is very slow, I cannot easily get progress too. tenantId is indexed. How best should I delete data in such instances?Thinking if some of these are recommended to try?", "username": "jiewmeng" }, { "code": "", "text": "Hi, it could be worth looking at the bulk operation for removing documents, and here’s a discussion about it on StackOverflow.", "username": "Wayne_Smallman" }, { "code": "bulk.find(queryDoc).remove()db.collection.remove(queryDoc)", "text": "Will try this. But the docs doesnt say much about how this works under the hood or hows its different apart from doing it with less load on the DB?Bulk operations might be of help here. An unordered bulk.find(queryDoc).remove() basically is a version of db.collection.remove(queryDoc) optimized for large numbers of operations. It’s usage is pretty straightforward:The idea behind this approach is not to speed up the removal, but to produce less load. In my tests, the load was reduced by half and took slightly less time than a db.collection.remove(query).", "username": "jiewmeng" }, { "code": "", "text": "Another thing to keep in mind is that doing a large amount of delete may cause high network bandwidth usage and longer replication lag. (many deployments have alert on the replication lags)", "username": "Kobe_W" }, { "code": "", "text": "Hmm but how should I do this instead?", "username": "jiewmeng" }, { "code": "", "text": "", "username": "Kobe_W" }, { "code": "", "text": "How do I add a rate limit tho? I dont see a limit option?For off peak window guess thats also manually?", "username": "jiewmeng" } ]
How best to delete a huge amount of data from a collection?
2023-02-10T23:21:43.145Z
How best to delete a huge amount of data from a collection?
3,608
null
[ "flutter" ]
[ { "code": "Exception has occurred.\nArgumentError (Invalid argument(s): Failed to load dynamic library 'realm_dart': dlopen(realm_dart, 0x0001): tried: '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime/Contents/Resources/RuntimeRootrealm_dart' (errno=2), '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime/Contents/Resources/RuntimeRoot/usr/lib/swift/realm_dart' (errno=2), '/usr/lib/swift/realm_dart' (errno=2, not in dyld cache), '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime/Contents/Resources/RuntimeRoot/usr/lib/swift/realm_dart' (errno=2), '/usr/lib/swift/realm_dart' (errno=2, not in dyld cache), '/Users/taco/Library/Developer/CoreSimulator/Devices/777429D3-82C2-4EEE-B4B1-E3822E51B9FA/data/Containers/Bundle/Application/69CD69C9-C345-439F-A1BD-2A7815A6EC4E/Runner.app/Frameworks/realm_dart' (errno=2), '/Users/taco/Library/Developer/CoreSimulator/Devices/777429D3-82C2-4EEE-B4B1-E3822E51B9FA/data/Containers/Bundle/Application/69CD69C9-C345-439F-A1BD-2A7815A6EC4E/Runner.app/Frameworks/realm_dart' (errno=2), '/Users/taco/Library/Developer/CoreSimulator/Devices/777429D3-82C2-4EEE-B4B1-E3822E51B9FA/data/Containers/Bundle/Application/69CD69C9-C345-439F-A1BD-2A7815A6EC4E/Runner.app/Frameworks/realm_dart' (errno=2), '/usr/lib/realm_dart' (errno=2, not in dyld cache), 'realm_dart' (errno=2), '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime/Contents/Resources/RuntimeRoot/usr/lib/realm_dart' (errno=2))\n\n\nException has occurred.\nArgumentError (Invalid argument(s): Failed to load dynamic library '/Users/taco/Library/Developer/CoreSimulator/Devices/777429D3-82C2-4EEE-B4B1-E3822E51B9FA/data/Containers/Bundle/Application/69CD69C9-C345-439F-A1BD-2A7815A6EC4E/Runner.app/realm_dart': dlopen(/Users/taco/Library/Developer/CoreSimulator/Devices/777429D3-82C2-4EEE-B4B1-E3822E51B9FA/data/Containers/Bundle/Application/69CD69C9-C345-439F-A1BD-2A7815A6EC4E/Runner.app/realm_dart, 0x0001): tried: '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime/Contents/Resources/RuntimeRoot/Users/taco/Library/Developer/CoreSimulator/Devices/777429D3-82C2-4EEE-B4B1-E3822E51B9FA/data/Containers/Bundle/Application/69CD69C9-C345-439F-A1BD-2A7815A6EC4E/Runner.app/realm_dart' (errno=2), '/Users/taco/Library/Developer/CoreSimulator/Devices/777429D3-82C2-4EEE-B4B1-E3822E51B9FA/data/Containers/Bundle/Application/69CD69C9-C345-439F-A1BD-2A7815A6EC4E/Runner.app/realm_dart' (errno=2))\n", "text": "When I start using Sync I always receive the following Exceptions displayed below. These exceptions only occur when using visual studio code. No exceptions seem to occur when running directly from the command line or from within Xcode. Despite this warning, everything seems to work fine. Any help to avoid this problem is appreciated.Flutter doctor -v does not display any problems.Realm version: ^0.11.0+rc\nFlutter version: 3.7.0", "username": "Taco_Kind" }, { "code": "realm_dart", "text": "Hi @Taco_Kind,\nWhich package do you use ‘realm_dart’ or ‘realm’?\nIf it is realm_dart, you have to make sure that you have run ‘dart run realm_dart install’.", "username": "Desislava_St_Stefanova" }, { "code": "flutter pub run realm install", "text": "@Taco_Kind,\nif you are trying to run the tests from Visual studio code you have to run flutter pub run realm install first.The official Realm SDK for Flutter. Realm is a mobile database - an alternative to SQLite and key-value stores.", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "I’ve got the exact same problem. Running from the terminal works fine. Manually skipping the errors works too. I’ve using the ‘realm’ package. I did try switching everything over to ‘realm_dart’ and got the same problem again. I didn’t have this issue always. I can’t really think of anything I changed in my project that would have caused this error to start popping up.", "username": "Dylan_Smith" }, { "code": "", "text": "I’ve figured that the error occurs specifically when creating a configuration object.", "username": "Dylan_Smith" }, { "code": "", "text": "I think this issue might have something to do with the init.dart file from the realm package. Check out this comment on a github issue I created it might help! GitHub Issue", "username": "Dylan_Smith" }, { "code": "", "text": "a somewhat rigorous solution for me was to regenerate the iOS files. It also seems that changes were made with 0.10rc that fixes it after the regeneration. Also, with creating a new project the problem does not occur anymore. What exactly causes the problem remains unknown to me…", "username": "Taco_Kind" }, { "code": "", "text": "Hi all,\nThanks for reporting!\nI answered to the relevant issue in GitHub Error: Failed to load dynamic library 'realm_dart' on ios · Issue #1157 · realm/realm-dart · GitHub\nYou can follow the link.", "username": "Desislava_St_Stefanova" }, { "code": "", "text": "", "username": "henna.s" } ]
Failed to load dynamic library 'realm_dart': dlopen(realm_dart, 0x0001)
2023-02-03T09:34:38.929Z
Failed to load dynamic library &lsquo;realm_dart&rsquo;: dlopen(realm_dart, 0x0001)
2,025
null
[ "aggregation", "queries", "data-modeling", "indexes" ]
[ { "code": "", "text": "I am building an APP. Suppose that it will haveThis is assuming worst case scenario without any indexes.Is this APP feasible?", "username": "Big_Cat_Public_Safety_Act" }, { "code": "username// MongoDB Playground\n// Select the database to use.\nuse('mongodbVSCodePlaygroundDB');\n\n// The drop() command destroys all data from a collection.\n// Make sure you run it against the correct database and collection.\ndb.sales.drop();\n\nconst docs = (new Array(10**5)).fill(0)\ndocs.map((_, index) => {\n return {\n '_id': index,\n 'username': 'user' + index,\n 'item': 'abc',\n }\n })\n// Insert documents into the sales collection.\ndb.sales.insertMany(docs);\ndb.sales.find({username:'user1000'}).limit(1).explain('executionStats');\n\n\n{\n//...\n \"winningPlan\": {\n \"stage\": \"LIMIT\",\n \"limitAmount\": 1,\n \"inputStage\": {\n \"stage\": \"COLLSCAN\",\n \"filter\": {\n \"username\": {\n \"$eq\": \"user1000\"\n }\n },\n \"direction\": \"forward\"\n }\n },\n \"rejectedPlans\": []\n },\n \"executionStats\": {\n \"executionSuccess\": true,\n \"nReturned\": 0,\n \"executionTimeMillis\": 988, // <---- i.e about 1 second\n \"totalKeysExamined\": 0,\n \"totalDocsExamined\": 100000,\n \"executionStages\": {\n \"stage\": \"LIMIT\",\n \"nReturned\": 0,\n \"executionTimeMillisEstimate\": 543,\n \"isEOF\": 1,\n \"limitAmount\": 1,\n \"inputStage\": {\n \"stage\": \"COLLSCAN\",\n \"filter\": {\n \"username\": {\n \"$eq\": \"user1000\"\n }\n },\n \"nReturned\": 0,\n \"isEOF\": 1,\n \"docsExamined\": 100000\n }\n }\n },\n//...\n\n_idusername{ _id: username }", "text": "It seems unfeasible without querying with indexes. By the way, a username is easily indexable, and you can probably run the query while the user is still typing the password.I run this experiment in a mongo playground, against a free Atlas cluster:This is just a hundred thousand docs, and I’d indeed expect it to scale at least a factor of ten in your case. Also mind that the query is very simple.So I think the answer is “no” in the most crappy cluster, but remember that upgrading hardware you may increase this several orders of magnitude.I wonder whether it is possible to use _id field as your username field, I mean { _id: username }. It would make sense and you would still have an index to query.", "username": "santimir" }, { "code": "", "text": "You will be screwed by disk use without any indexing.", "username": "Kobe_W" } ]
Is a collection scan of a million documents feasible?
2022-12-22T22:05:53.823Z
Is a collection scan of a million documents feasible?
1,939
null
[ "schema-validation" ]
[ { "code": "db.createCollection(\"collect\", {\n validator: {\n $jsonSchema: {\n bsonType: \"object\",\n additionalProperties: true,\n required: [\"component\", \"path\"],\n properties: {\n component: {\n bsonType: \"string\"\n },\n path: {\n bsonType: \"string\",\n description: \"Set to default value\"\n }\n }\n }\n)\n", "text": "Hello Enthusiast,\nI am implementing data schema. Here i have to mention default value to a field of document.is there any possible way to set default value to a field?Best Regards,\nM Jagadeesh.", "username": "Manepalli_Jagadeesh" }, { "code": "", "text": "Hi @Manepalli_Jagadeesh,The schema validation is only intend to enforce/validate a schema definition and not populate values.The server does not have an ability to set default values and we recommend users to set it in their data access application layer or use ODM’s to set those values on the application side.If you use Atlas you can consider having a trigger on the collection to update every inserted document with a specific filed value.Best regards,\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Hi @Pavel_Duchovny,Thank you for information and providing hints to implement default values.Best Regards,\nM Jagadeesh.", "username": "Manepalli_Jagadeesh" }, { "code": "", "text": "Heyyo @Pavel_Duchovny,Just wanted to confirm, this is still accurate. Collection schemas have no mechanism to provide default field values for inference on new records? Instead one can enforce defaults at the client or API level.", "username": "Llama_D_Attore" } ]
Setting default values in data schema
2020-08-27T05:48:56.127Z
Setting default values in data schema
27,093
null
[ "serverless" ]
[ { "code": "getLastRequestStatistics", "text": "Is it possible to, either before executing or immediately afterwards, understand the RPU/WPU impact (cost) of a single query?Azure MongoDB has getLastRequestStatistics but this command does not exist on Atlas instances.From reading other topics on the forum it does seem there is a theme of not entirely understanding how RPU is calculated, or indeed how and when to optimise for it. If the response returned to clients using SDKs included the cost it would allow us to shorten the development cycle.My first usage of Mongo Atlas Serverless resulted in a $60 bill because of a missing index, I wish it was easiest to spot this during development rather than once it hit production under full load. The cost isn’t important, but the lack of tooling and quicker visibility can make this risky business. I’m currently considering alternative options.", "username": "JamesSherlock" }, { "code": "", "text": "Hey @JamesSherlock - Welcome to the community and thanks for providing your feedback regarding Serverless usage.Is it possible to, either before executing or immediately afterwards, understand the RPU/WPU impact (cost) of a single query?Unfortunately this type of feature isn’t available in Atlas. In saying so, I’ve created a post on the MongoDB Feedback engine for this feature in which yourself and others can vote for. If you believe theres some information missing please feel free to comment on the post.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Serverless Query Cost
2023-02-05T22:20:18.671Z
Serverless Query Cost
1,249
null
[ "replication" ]
[ { "code": "", "text": "Mongo Version: 4.4.1 in ReplicaSet with 3 nodes\nDisk Size: 750GBThe collection oplog.rs is much larger than the default limit of 35GB. Right now It has reached the size of over 300GB which is more than 10 times the limit. it’s a normal thing? How can I do to reduce its size?I tried the compact command but it did not reduce the size. Thanks", "username": "Francesco_Rossi" }, { "code": "majority commit point", "text": "Hi @Francesco_Rossi welcome to the community!The oplog is a special collection that is the lifeblood of a replica set, and thus can grow beyond the configured size as mentioned in https://www.mongodb.com/docs/manual/core/replica-set-oplog/Unlike other capped collections, the oplog can grow past its configured size limit to avoid deleting the majority commit point.A majority commit point is:Information, from the viewpoint of this member, regarding the most recent operation that has been written to a majority of replica set members.If the oplog grew to 10x the configured size, you might want to double check that all is well with the replica set. A non-moving majority commit point is typically due to the use of an Arbiter, and one of the data bearing node is down.The typical scenario is: writes keep coming in during this downtime (usually w:1 writes), which is possible due to the Arbiter keeping a Primary available, even though the replica set is in an unhealthy state. The Primary is unable to send the oplog entries to the offline Secondary, and thus the “majority commit point” of the data is held back to the most recently committed entry of the offline Secondary (which keeps falling behind as time goes on and it stays offline). At this point, the Primary’s oplog will keep growing so the offline Secondary does not fall off the oplog, which typically require an expensive maintenance procedure to fix.Having said that, this is only a typical scenario. There are other possible scenario as well, usually involving network issues. Another possibility is that you hit a fixed issue, since you’re using an older version.I recommend you to upgrade to the latest in the 4.4 series (currently 4.4.18 is the latest). I counted 7 mentions of oplog improvements between 4.4.1 and 4.4.18, so you might want to upgrade and see if this persists.Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
The oplog.rs is much larger than the default limit
2023-02-13T09:36:24.993Z
The oplog.rs is much larger than the default limit
1,052
null
[ "aggregation", "atlas-search" ]
[ { "code": "{\n \"_id\": {\n \"$oid\": \"6368ca3fcb0c042cbc5b198a\"\n },\n \"articleid\": \"159447148\",\n \"headline\": \"T20 World Cup: Zomato’s Epic \",\n \"subtitle\": \"Response To ‘Cheat Day’ Remark Is Unmissable\",\n \"fulltext\": \"The trade began on October 21\",\n \"article_type\": \"online\",\n \"pubdate\": \"2022-11-07\"\n}\n[\n {\n \"$search\": {\n \"index\": \"fulltext\",\n \"compound\": {\n \"must\": [\n {\n \"text\": {\n \"query\": \"AI\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n },\n {\n \"text\": {\n \"query\": \"OPENAI\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n }\n ]\n }\n }\n },\n {\n \"$match\": {\n \"pubdate\": {\n \"$gte\": \"2023-01-28\",\n \"$lte\": \"2023-01-28\"\n }\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"articleid\": 1\n }\n }\n]\n[\n {\n \"$search\": {\n \"index\": \"fulltext\",\n \"compound\": {\n \"should\": [\n {\n \"text\": {\n \"query\": \"AI\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n },\n {\n \"text\": {\n \"query\": \"OPENAI\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n }\n ]\n }\n }\n },\n {\n \"$match\": {\n \"pubdate\": {\n \"$gte\": \"2023-01-28\",\n \"$lte\": \"2023-01-28\"\n }\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"articleid\": 1\n }\n }\n]\n[\n {\n \"$search\": {\n \"index\": \"fulltext\",\n \"text\": {\n \"query\": \"OPENAI\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n }\n },\n {\n \"$match\": {\n \"pubdate\": {\n \"$gte\": \"2023-01-28\",\n \"$lte\": \"2023-01-28\"\n }\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"articleid\": 1\n }\n }\n]\n", "text": "I am trying to implement Atlas search on the collection which has data of around 300 million, collection structure is like this -Now I am using MUST, SHOULD, and simple search together but the problem is MUST works really really fast than SHOULD and follow by a simple searchHere is the query -MUST -SHOULD -SIMPLE Search -As you can see all three query I have 2 questions about the above statement.", "username": "Utsav_Upadhyay2" }, { "code": "pubdate", "text": "Hi @Utsav_Upadhyay2 and welcome to the MongoDB community forum!!Why must is faster than should and followed by simple(single search), is anything wrong with my single search (simple search) query?The MUST operator in Atlas search works on the concept of boolean AND operator where as the SHOULD operator uses the concept for boolean OR operator.For more details, you could visit the documentation on Compounds in Atlas search.\nIn saying that, could you help me understand on how did you calculate the execution time for the above three operators without the $match and $project stage.However, based on the above sample data, I tried to run the three queries against it which did not yield any results. For further help, could you share the index definition for the above collection you are using?Before searching for the word using Atlas search, why can’t we filter data first i.e. - pubdate in the above query, and then run the search in this way it will search on fewer data and get data faster, rather than searching on all data first and then do a match/filter of pubdate?For this, you could use $filter with $range for the query. Please make note that, $range works with the ISODate() format so you would need to change the pubdate to ISODate() format.Let us know if you have any further questions.Best Regards\nAasawari", "username": "Aasawari" }, { "code": "{\n \"_id\": {\n \"$oid\": \"6368ca3fcb0c042cbc5b198a\"\n },\n \"articleid\": \"159447148\",\n \"headline\": \"T20 World Cup: Zomato’s Epic \",\n \"subtitle\": \"Response To ‘Cheat Day’ Remark Is Unmissable\",\n \"fulltext\": \"The trade began on October 21\",\n \"article_type\": \"online\",\n \"pubdate\": \"2022-11-07\"\n}\n", "text": "@Aasawari, Thank you so much for your quick response! My goal is to search keywords based on user input, I mean I need to search AND, OR, and single-term searches, I tried to find some example and implement it in the search but not much help as the query is slow,I tried executionstats() and explain() to check the execution timeCould you please give 3 examples based on the below document on AND which is MUST, OR which is SHOULD, and single-term search separate queries, as I found it a little confusing while seeing some examples in the docs?I am using fulltext index (atlas search index), I am trying to search words from 3 fields which are the headline, fulltext, subtitle", "username": "Utsav_Upadhyay2" }, { "code": "\n{\n \"mappings\": {\n \"dynamic\": true,\n \"fields\": {\n \"fulltext\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"autocomplete\"\n }\n ],\n \"headline\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"autocomplete\"\n }\n ],\n \"subtitle\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"autocomplete\"\n }\n ]\n }\n }\n}\nfulltextt[\n {\n '$search': {\n 'index': 'logical', \n 'compound': {\n 'must': [\n {\n 'text': {\n 'query': 'Response', \n 'path': 'subtitle'\n }\n }, {\n 'text': {\n 'query': 'trad', \n 'path': 'fulltext'\n }\n }\n ]\n }\n }\n }\n]\n[\n {\n '$search': {\n 'index': 'logical', \n 'compound': {\n 'should': [\n {\n 'text': {\n 'query': 'Response', \n 'path': 'subtitle'\n }\n }, {\n 'text': {\n 'query': 'trad', \n 'path': 'fulltext'\n }\n }\n ]\n }\n }\n }\n] \n[\n {\n '$search': {\n 'index': 'logical', \n 'text': {\n 'query': 'trade', \n 'path': 'fulltext'\n }\n }\n }\n]\nexecutionStats$search", "text": "Hi @Utsav_Upadhyay2 and thank you for sharing the above information.Could you please give 3 examples based on the below document on AND which is MUST, OR which is SHOULD, and single-term search separate queries, as I found it a little confusing while seeing some examples in the docs?I tried the following queries for the above compound operator.\nHere is how the search index looks like:Please note that above index is created with mingrams as 2, hence trying the below query for MUST and SHOULD:Since MUST comprises the logical AND operator, in the below query, the query responds with 0 documents as for the field fulltext, the search query t is not tokenised.\nTherefore, the below query responds with 0 documents:Using the similar query with SHOULD with is OR operator, responds with the documents when one of the two conditions is true.Now, lastly for simple search:responds with the document, when the condition is satisfied.Lastly, could you confirm if the executionStats mentioned involves for each stage of the pipeline or only for the $search stage?Let us know if you have any further queries.Best Regards\nAasawari", "username": "Aasawari" }, { "code": "", "text": "@Aasawari thank you so much for this solution, I was wondering if we can use Date with a simple search and also with must, should, so if we add filter date or must date with lte & gte in the first stage of the compound and then query the text does this makes query faster as compared to normal search with text only, could you please share a syntax of date, lte & gte with must where we search a text after filtering by date?", "username": "Utsav_Upadhyay2" }, { "code": "pubdatepubdate/Date", "text": "Hi @Utsav_Upadhyay2Could you help me a few more details on what you are trying to achieve:I was wondering if we can use Date with a simple search and also with must, should, so if we add filter date or must date with lte & gte in the first stage of the compoundRegards\nAasawari", "username": "Aasawari" }, { "code": "{\n \"_id\" : ObjectId(\"63cf39df1d7798a846b2eb0e\"),\n \"articleid\" : \"9d23e3ab-9b7d-11ed-a650-b0227af59807\",\n \"headline\" : \"Microsoft to Put $10b More in ChatGPT Maker OpenAI\",\n \"subtitle\" : \"OpenAI needs Microsoft’s funding and cloud-computing power to run increasingly complex models\",\n \"fulltext\" : \"\\nMS Adds $1()B to Investment in ChatGPT Maker\",\n \"pubdate\" : \"2023-01-24\",\n \"article_type\" : \"print\",\n \"date\" : ISODate(\"2023-01-24T00:00:00.000+0000\")\n}\ndb.getCollection(\"article_fulltext\").aggregate([\n {\n \"$search\":{\n \"index\":\"fulltext\",\n \"compound\":{\n \"filter\":[\n {\n \"range\":{\n \"path\":\"date\",\n \"gte\":\"ISODate(\"\"2023-01-01T00:00:00.000Z\"\")\",\n \"lte\":\"ISODate(\"\"2023-01-31T00:00:00.000Z\"\")\"\n }\n }\n ],\n \"should\":[\n {\n \"text\":{\n \"query\":\"CHATGPT\",\n \"path\":[\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n },\n {\n \"text\":{\n \"query\":\"OPENAI\",\n \"path\":[\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n }\n ],\"minimumShouldMatch\": 1\n }\n }\n }\n])\ndb.getCollection(\"article_fulltext\").aggregate([{\n $search:{\n index:\"fulltext\",\n text:{\n query:\"Microsoft\",\n path:[\"headline\", \"fulltext\", \"subtitle\"]\n }\n }\n}])\n{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"articleid\": {\n \"type\": \"string\"\n },\n \"fulltext\": {\n \"type\": \"string\"\n },\n \"headline\": {\n \"type\": \"string\"\n },\n \"subtitle\": {\n \"type\": \"string\"\n }\n }\n }\n}\n", "text": "I have modified the document schema for using data -I am facing 2 Major performance issues.I have 10 Million records in the collection, in the field - headline, subtitle, fulltext I am trying to find the words with Operator like - must, should, and simple search.Now Whenever I run a must query it is always faster and return the result within 10 seconds no matter how big the query is, but in the case of a should or a simple search of a single word it takes more than 50 seconds why?Now, I was thinking due to large data like 10 Million I need to filter data based on date first and then search full text search, in this way I can get result much faster.Below are my sample queries -Should -simple search -Atlas search Index -I am facing issues with the above queries, as I am getting really good performance with must search!", "username": "Utsav_Upadhyay2" }, { "code": "\"must\"\"must\"\"must\": [\n {\n \"text\": {\n \"query\": \"CHATGPT\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n },\n {\n \"text\": {\n \"query\": \"OPENAI\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n }\n]\nmustANDpathmustdb.collection.aggregate([\n {\n \"$search\": {\n \"text\": {\n \"query\": [\"OPENAI\", \"CHATGPT\"],\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n }\n }\n])\n/// OUTPUT (All 5 documents):\n[\n {\n headline: 'CHATGPT and OPENAI is amazing',\n subtitle: 'OPENAI details here',\n fulltext: 'OPENAI is going to exist'\n },\n {\n headline: 'CHATGPT',\n subtitle: 'OPENAI',\n fulltext: 'nothing'\n },\n {\n headline: 'CHATGPT and OPENAI technology is amazing',\n subtitle: 'CHATGPT subtitle details here',\n fulltext: 'OPENAI is going to exist'\n },\n {\n headline: 'OPENAI only is amazing',\n subtitle: 'OPENAI details here',\n fulltext: 'OPENAI is going to exist'\n },\n {\n headline: 'CHATGPT only technology is amazing',\n subtitle: 'CHATGPT details here',\n fulltext: 'CHATGPT is going to exist'\n }\n]\nmustdb.collection.aggregate([\n {\n \"$search\": {\n \"compound\": {\n \"must\":[\n {\n \"text\": {\n \"query\": \"CHATGPT\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n },\n {\n \"text\": {\n \"query\": \"OPENAI\",\n \"path\": [\n \"headline\",\n \"fulltext\",\n \"subtitle\"\n ]\n }\n }\n ]\n }\n }\n }\n])\n/// OUTPUT (Only 3 documents containing both \"OPENAI\" AND \"CHATGPT\" in the specified paths.\n[\n {\n headline: 'CHATGPT and OPENAI is amazing',\n subtitle: 'OPENAI details here',\n fulltext: 'OPENAI is going to exist'\n },\n {\n headline: 'CHATGPT',\n subtitle: 'OPENAI',\n fulltext: 'nothing'\n },\n {\n headline: 'CHATGPT and OPENAI technology is amazing',\n subtitle: 'CHATGPT subtitle details here',\n fulltext: 'OPENAI is going to exist'\n }\n]\nmust\"date\"\"Date\"filter\"date\"{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"articleid\": {\n \"type\": \"string\"\n },\n \"date\": { /// Date field added to index definition\n \"type\": \"date\"\n },\n \"fulltext\": {\n \"type\": \"string\"\n },\n \"headline\": {\n \"type\": \"string\"\n },\n \"subtitle\": {\n \"type\": \"string\"\n }\n }\n }\n}\ndb.collection.stats()", "text": "Hi @Utsav_Upadhyay2,I am facing 2 Major performance issues.In the above, it seems like 1. could be caused by your environment and use case rather than the performance issue itself. In saying this, It seems your concern is with performance, more specifically the performance comparisons between the “Simple Search” versus the usage of the \"must\" clause.I noted that in your initial post, you are doing a “simple search” on a single search term and comparing this performance to the \"must\" clause operator with two search terms. I do not think this is a fair comparison as it would generally result in a different result set. I.e. different amount of documents being returned (or even different documents being returned).As @Aasawari had mentioned, the must clause maps to the AND boolean operator. In the above, documents must have “CHATGPT” AND “OPENAI” in the the specified path’s to be returned. If all the fields in a document only contain 1 of the 2 terms, then it would not be returned. Below is comparison of a simple search and the must clause being used on 5 sample documents:Using “simple search”:Compared to must operator:In this case, the must example is returning less documents than the “simple search” example I had provided above.Lastly, I noticed your most recent index definition did not include the \"date\" field with the \"Date\" data type. Is this expected or a typo? I presume this is for the usage of filter against the \"date\" field. I.e.:Apologies if my understanding is incorrect. However if it is incorrect, could you perform a similar test and help us with more information to reproduce what you’re seeing in our local testing environment, such as:Regards,\nJason", "username": "Jason_Tran" }, { "code": "{\n \"ns\" : \"impact.article_fulltext\",\n \"size\" : NumberLong(19509585533),\n \"count\" : 6393475.0,\n \"avgObjSize\" : 3051.0,\n \"storageSize\" : NumberLong(11533303808),\n \"capped\" : false,\n \"wiredTiger\" : {\n \"metadata\" : {\n \"formatVersion\" : 1.0\n },\n \"creationString\" : \"access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),assert=(commit_timestamp=none,durable_timestamp=none,read_timestamp=none),block_allocation=best,block_compressor=snappy,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=false),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_custom=(prefix=,start_generation=0,suffix=),merge_max=15,merge_min=0),memory_page_image_max=0,memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u\",\n \"type\" : \"file\",\n \"uri\" : \"statistics:table:collection-6-9203085737631202376\",\n \"LSM\" : {\n \"bloom filter false positives\" : 0.0,\n \"bloom filter hits\" : 0.0,\n \"bloom filter misses\" : 0.0,\n \"bloom filter pages evicted from cache\" : 0.0,\n \"bloom filter pages read into cache\" : 0.0,\n \"bloom filters in the LSM tree\" : 0.0,\n \"chunks in the LSM tree\" : 0.0,\n \"highest merge generation in the LSM tree\" : 0.0,\n \"queries that could have benefited from a Bloom filter that did not exist\" : 0.0,\n \"sleep for LSM checkpoint throttle\" : 0.0,\n \"sleep for LSM merge throttle\" : 0.0,\n \"total size of bloom filters\" : 0.0\n },\n \"block-manager\" : {\n \"allocations requiring file extension\" : 108850.0,\n \"blocks allocated\" : 2487158.0,\n \"blocks freed\" : 2260525.0,\n \"checkpoint size\" : NumberLong(11488280576),\n \"file allocation unit size\" : 4096.0,\n \"file bytes available for reuse\" : 44888064.0,\n \"file magic number\" : 120897.0,\n \"file major version number\" : 1.0,\n \"file size in bytes\" : NumberLong(11533303808),\n \"minor version number\" : 0.0\n },\n \"btree\" : {\n \"btree checkpoint generation\" : 24811.0,\n \"btree clean tree checkpoint expiration time\" : NumberLong(9223372036854775807),\n \"column-store fixed-size leaf pages\" : 0.0,\n \"column-store internal pages\" : 0.0,\n \"column-store variable-size RLE encoded values\" : 0.0,\n \"column-store variable-size deleted values\" : 0.0,\n \"column-store variable-size leaf pages\" : 0.0,\n \"fixed-record size\" : 0.0,\n \"maximum internal page key size\" : 368.0,\n \"maximum internal page size\" : 4096.0,\n \"maximum leaf page key size\" : 2867.0,\n \"maximum leaf page size\" : 32768.0,\n \"maximum leaf page value size\" : 67108864.0,\n \"maximum tree depth\" : 5.0,\n \"number of key/value pairs\" : 0.0,\n \"overflow pages\" : 0.0,\n \"pages rewritten by compaction\" : 0.0,\n \"row-store empty values\" : 0.0,\n \"row-store internal pages\" : 0.0,\n \"row-store leaf pages\" : 0.0\n },\n \"cache\" : {\n \"bytes currently in the cache\" : 32718145.0,\n \"bytes dirty in the cache cumulative\" : NumberLong(106693214626),\n \"bytes read into cache\" : NumberLong(517832151735),\n \"bytes written from cache\" : NumberLong(92076373337),\n \"checkpoint blocked page eviction\" : 5947.0,\n \"data source pages selected for eviction unable to be evicted\" : 25887.0,\n \"eviction walk passes of a file\" : 3056723.0,\n \"eviction walk target pages histogram - 0-9\" : 2586282.0,\n \"eviction walk target pages histogram - 10-31\" : 290118.0,\n \"eviction walk target pages histogram - 128 and higher\" : 0.0,\n \"eviction walk target pages histogram - 32-63\" : 51760.0,\n \"eviction walk target pages histogram - 64-128\" : 128563.0,\n \"eviction walks abandoned\" : 54482.0,\n \"eviction walks gave up because they restarted their walk twice\" : 2535193.0,\n \"eviction walks gave up because they saw too many pages and found no candidates\" : 50588.0,\n \"eviction walks gave up because they saw too many pages and found too few candidates\" : 5148.0,\n \"eviction walks reached end of tree\" : 5268235.0,\n \"eviction walks started from root of tree\" : 2645976.0,\n \"eviction walks started from saved location in tree\" : 410747.0,\n \"hazard pointer blocked page eviction\" : 846.0,\n \"in-memory page passed criteria to be split\" : 574.0,\n \"in-memory page splits\" : 287.0,\n \"internal pages evicted\" : 114326.0,\n \"internal pages split during eviction\" : 3.0,\n \"leaf pages split during eviction\" : 110274.0,\n \"modified pages evicted\" : 2179959.0,\n \"overflow pages read into cache\" : 0.0,\n \"page split during eviction deepened the tree\" : 0.0,\n \"page written requiring cache overflow records\" : 135.0,\n \"pages read into cache\" : 11598349.0,\n \"pages read into cache after truncate\" : 0.0,\n \"pages read into cache after truncate in prepare state\" : 0.0,\n \"pages read into cache requiring cache overflow entries\" : 104.0,\n \"pages requested from the cache\" : 234609621.0,\n \"pages seen by eviction walk\" : 83939149.0,\n \"pages written from cache\" : 2471972.0,\n \"pages written requiring in-memory restoration\" : 288515.0,\n \"tracked dirty bytes in the cache\" : 4504688.0,\n \"unmodified pages evicted\" : 10935710.0\n },\n \"cache_walk\" : {\n \"Average difference between current eviction generation when the page was last considered\" : 0.0,\n \"Average on-disk page image size seen\" : 0.0,\n \"Average time in cache for pages that have been visited by the eviction server\" : 0.0,\n \"Average time in cache for pages that have not been visited by the eviction server\" : 0.0,\n \"Clean pages currently in cache\" : 0.0,\n \"Current eviction generation\" : 0.0,\n \"Dirty pages currently in cache\" : 0.0,\n \"Entries in the root page\" : 0.0,\n \"Internal pages currently in cache\" : 0.0,\n \"Leaf pages currently in cache\" : 0.0,\n \"Maximum difference between current eviction generation when the page was last considered\" : 0.0,\n \"Maximum page size seen\" : 0.0,\n \"Minimum on-disk page image size seen\" : 0.0,\n \"Number of pages never visited by eviction server\" : 0.0,\n \"On-disk page image sizes smaller than a single allocation unit\" : 0.0,\n \"Pages created in memory and never written\" : 0.0,\n \"Pages currently queued for eviction\" : 0.0,\n \"Pages that could not be queued for eviction\" : 0.0,\n \"Refs skipped during cache traversal\" : 0.0,\n \"Size of the root page\" : 0.0,\n \"Total number of pages currently in cache\" : 0.0\n },\n \"compression\" : {\n \"compressed page maximum internal page size prior to compression\" : 4096.0,\n \"compressed page maximum leaf page size prior to compression \" : 32768.0,\n \"compressed pages read\" : 11478869.0,\n \"compressed pages written\" : 2421005.0,\n \"page written failed to compress\" : 998.0,\n \"page written was too small to compress\" : 49969.0\n },\n \"cursor\" : {\n \"bulk loaded cursor insert calls\" : 0.0,\n \"cache cursors reuse count\" : 3060833.0,\n \"close calls that result in cache\" : 0.0,\n \"create calls\" : 2331.0,\n \"insert calls\" : 5473321.0,\n \"insert key and value bytes\" : NumberLong(5985885968),\n \"modify\" : 17575771.0,\n \"modify key and value bytes affected\" : NumberLong(64607112871),\n \"modify value bytes modified\" : 216058125.0,\n \"next calls\" : 40669361.0,\n \"open cursor count\" : 0.0,\n \"operation restarted\" : 10.0,\n \"prev calls\" : 1.0,\n \"remove calls\" : 0.0,\n \"remove key bytes removed\" : 0.0,\n \"reserve calls\" : 0.0,\n \"reset calls\" : 74299813.0,\n \"search calls\" : 77818541.0,\n \"search near calls\" : 24160712.0,\n \"truncate calls\" : 0.0,\n \"update calls\" : 0.0,\n \"update key and value bytes\" : 0.0,\n \"update value size change\" : 280870968.0\n },\n \"reconciliation\" : {\n \"dictionary matches\" : 0.0,\n \"fast-path pages deleted\" : 0.0,\n \"internal page key bytes discarded using suffix compression\" : 2170919.0,\n \"internal page multi-block writes\" : 6852.0,\n \"internal-page overflow keys\" : 0.0,\n \"leaf page key bytes discarded using prefix compression\" : 0.0,\n \"leaf page multi-block writes\" : 114718.0,\n \"leaf-page overflow keys\" : 0.0,\n \"maximum blocks required for a page\" : 1.0,\n \"overflow values written\" : 0.0,\n \"page checksum matches\" : 38383.0,\n \"page reconciliation calls\" : 2236972.0,\n \"page reconciliation calls for eviction\" : 1443506.0,\n \"pages deleted\" : 0.0\n },\n \"session\" : {\n \"object compaction\" : 0.0\n },\n \"transaction\" : {\n \"update conflicts\" : 0.0\n }\n },\n \"nindexes\" : 3.0,\n \"indexBuilds\" : [\n\n ],\n \"totalIndexSize\" : 207925248.0,\n \"indexSizes\" : {\n \"_id_\" : 76206080.0,\n \"pubdate_1\" : 36888576.0,\n \"articleid_1\" : 94830592.0\n },\n \"scaleFactor\" : 1.0,\n \"ok\" : 1.0,\n \"$clusterTime\" : {\n \"clusterTime\" : Timestamp(1676273747, 38),\n \"signature\" : {\n \"hash\" : BinData(0, \"xBHdkvvZezlmwL1Bh0naumwr0kM=\"),\n \"keyId\" : NumberLong(7155578345936125954)\n }\n },\n \"operationTime\" : Timestamp(1676273747, 38)\n}\n", "text": "@Jason_Tran thank you so much for this answer I do understand everything. As you asked 3 questions below are the following answers.For one above two questions I am thinking about a solution by using the date filter first and then searching the term you are right I need to index the date too and change the date format too.but could you please share the correct syntax of using a date with the search query in simple search and in must and should too?", "username": "Utsav_Upadhyay2" }, { "code": "filterdatedb.getCollection(\"article_fulltext\").aggregate([ {\n $search:{\n index:\"fulltext\",\n text:{\n query:\"Microsoft\",\n path:[\"headline\", \"fulltext\", \"subtitle\"]\n }\n }\n}])\ndatecompoundmustdb.getCollection(\"article_fulltext\").aggregate([\n {\n \"$search\":{\n \"index\":\"fulltext\",\n \"compound\":{\n \"filter\":[\n {\n \"range\":{\n \"path\":\"date\",\n \"gte\": ISODate(\"2023-01-01T00:00:00.000Z\"),\n \"lte\": ISODate(\"2023-01-31T00:00:00.000Z\")\n }\n }\n ],\n \"must\":[\n { \n text: {\n query: \"Microsoft\",\n path: [\"headline\", \"fulltext\", \"subtitle\"]\n }\n }\n ]\n }\n }\n }\n])\ntextcompoundfilterrange", "text": "but could you please share the correct syntax of using a date with the search query in simple search and in must and should too?It depends on the terms you’re searching for and the output expected. Aasawari had provided some examples of this earlier. However, based off your previous “simple search” provided, take a look at the below example which includes a filter on the date ranges provided along with this “simple search”:simple search -A similar search with date filtering (used from your other example) will be possible with compound and the must clause:I haven’t tested the above so try it out and see how you go. Again, it’ll probably depend on the search terms used and your expected output, so I am unable to provide the full exact query(s) you’re after. You’ll need to adjust these accordingly based off your use case / search terms.Please refer to the following documentation for more examples:Regards,\nJason", "username": "Jason_Tran" } ]
Atlas search with conditions and why MUST works faster than SHOULD and followed by other?
2023-01-28T15:51:09.483Z
Atlas search with conditions and why MUST works faster than SHOULD and followed by other?
2,387
null
[ "node-js" ]
[ { "code": "Server selection timed out after ${serverSelectionTimeoutMS} ms", "text": "PS D:\\Coding\\node-tut> node index.js\nD:\\Coding\\node-tut\\node_modules\\mongodb\\lib\\sdam\\topology.js:284\nconst timeoutError = new error_1.MongoServerSelectionError(Server selection timed out after ${serverSelectionTimeoutMS} ms, this.description);\n^MongoServerSelectionError: connect ECONNREFUSED ::1:27017\nat Timeout._onTimeout (D:\\Coding\\node-tut\\node_modules\\mongodb\\lib\\sdam\\topology.js:284:38)\nat listOnTimeout (node:internal/timers:564:17)\nat process.processTimers (node:internal/timers:507:7) {\nreason: TopologyDescription {\ntype: ‘Single’,\nservers: Map(1) {\n‘localhost:27017’ => ServerDescription {\naddress: ‘localhost:27017’,\ntype: ‘Unknown’,\nhosts: ,\npassives: ,\narbiters: ,\ntags: {},\nminWireVersion: 0,\nmaxWireVersion: 0,\nroundTripTime: -1,\nlastUpdateTime: 486019323,\nlastWriteDate: 0,\nerror: MongoNetworkError: connect ECONNREFUSED ::1:27017\nat connectionFailureError (D:\\Coding\\node-tut\\node_modules\\mongodb\\lib\\cmap\\connect.js:383:20)\nat Socket. (D:\\Coding\\node-tut\\node_modules\\mongodb\\lib\\cmap\\connect.js:307:22)\nat Object.onceWrapper (node:events:628:26)\nat Socket.emit (node:events:513:28)\nat emitErrorNT (node:internal/streams/destroy:151:8)\nat emitErrorCloseNT (node:internal/streams/destroy:116:3)\nat process.processTicksAndRejections (node:internal/process/task_queues:82:21) {\ncause: Error: connect ECONNREFUSED ::1:27017\nat TCPConnectWrap.afterConnect [as oncomplete] (node:net:1300:16) {\nerrno: -4078,\ncode: ‘ECONNREFUSED’,\nsyscall: ‘connect’,\naddress: ‘::1’,\nport: 27017\n},\n[Symbol(errorLabels)]: Set(1) { ‘ResetPool’ }\n},\ntopologyVersion: null,\nsetName: null,\nsetVersion: null,\nelectionId: null,\nlogicalSessionTimeoutMinutes: null,\nprimary: null,\nme: null,\n‘$clusterTime’: null\n}\n},\nstale: false,\ncompatible: true,\nheartbeatFrequencyMS: 10000,\nlocalThresholdMS: 15,\nsetName: null,\nmaxElectionId: null,\nmaxSetVersion: null,\ncommonWireVersion: 0,\nlogicalSessionTimeoutMinutes: null\n},\ncode: undefined,\n[Symbol(errorLabels)]: Set(0) {}\n}Node.js v18.12.1 Facing this issue when I run the node.", "username": "Ameer_Hamza" }, { "code": "", "text": "Your mongod isn’t configured correctly to answer on the localhost interface. Check your configuration.", "username": "Jack_Woehr" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to Connect Locally Installed MongoDB with node?
2023-02-13T21:29:51.812Z
How to Connect Locally Installed MongoDB with node?
711
null
[]
[ { "code": "", "text": "there are 4 documents in my mongodb, i am using realm flexible sync, the four documents are", "username": "Suman_Chakravarty" }, { "code": "", "text": "Hi. We currently allow for you to either set table-specific roles or default roles (applied to any table without a table-specific role). See here for an example of how this is done: https://www.mongodb.com/docs/atlas/app-services/sync/configure/permissions/#type-specific-and-default-rolesNote, that we will soon actually remove the Permissions section in the Sync UI and it will just become a part of the “Rules” page which should be easier to navigate.", "username": "Tyler_Kaye" } ]
Realm device sync permission on document
2023-02-09T10:00:32.235Z
Realm device sync permission on document
409
null
[ "react-native" ]
[ { "code": "realm object invalidatedUserProvider<UserProvider fallback={OfflineAppThatCrashesWhenMigratingLocal/SyncedRealm}>\ncustomDatauseQueryRealm ObjectcustomData...userPaidState\n...checkUserPaid\n\nif (!checkUserPaid) {\n return (\n <PaymentDataRealm.RealmProvider sync={someSyncConfig}>\n {childrenToCheckPaymentAndSetCheckUserPaidAndUserPaidState}\n </PaymentDataRealm.RealmProvider>\n )\n}\n\nif (!userPaid) {\n return (\n <AppDataRealm.RealmProvider>\n <App />\n </AppDataRealm.RealmProvider>\n )\n}\n\nreturn (\n <AppDataRealm.RealmProvider sync={someSyncConfig}>\n <App />\n </AppDataRealm.RealmProvider>\n)\ncustomDatauseQuery// Realms defined elsewhere\nconst UserMetadataRealm = createRealmContext({schema: [userMetadataSchema, paymentDataSchema]});\nconst AppDataRealm = createRealmContext({schema: [...appDataSchema]});\n\n// returned JSX Elements from render function\n<UserMetadataRealm.RealmProvider sync={syncConfig}>\n {\n userPaid\n ? (\n <AppDataRealm.RealmProvider sync={someSyncConfig}>\n <App />\n </AppDataRealm.RealmProvider>\n )\n : (\n <AppDataRealm.RealmProvider>\n <App />\n </AppDataRealm.RealmProvider>\n )\n }\n</UserMetadataRealm.RealmProvider>\nuseQueryuseRealmAppDataRealm.useRealmUserMetadataRealm.useRealmError: Exception in HostFunction: \nObject type 'schemaObjectInUserMetadataRealm' not found in schema.\n", "text": "Hello, I’m building an application where the intended behavior/cases are as such:My misunderstanding/ignorance of Realm and React component lifecycles has made this impossible for me to implement (a lot of app crashes with realm object invalidated errors). Instead, I have almost completed an implementation of this alternative behavior:The major issue I am facing now, however, is that any time I want to access or update a user’s payment data or metadata (customData), I need access to a synced realm. More precisely, I cannot useQuery to find the corresponding Realm Object that represents the user’s customData or payment metadata, which is important if I want to update that object. Moreover, payment requires access to sync so that I can update the Atlas backend upon user payment.My previous setup was as such:This setup let me check a user’s payment data on load to determine whether to provide a synced or local realm. However, this setup does not let me update a user’s customData or payment metadata, as I cannot useQuery or access synced data from a local realm.I’ve tried another setup, namely:However, trying to access the Realm data, even after meticulously separating the useQuery and useRealm functions (instead using AppDataRealm.useRealm or UserMetadataRealm.useRealm), I encounter thise error:Is it possible to use nested Realm providers like this? If not, is there a recommended solution to this problem of needing to access synced metadata while limiting the client’s main app to a local realm? (Do I need to manually write data to the local realm, and then copy that data to the synced realm? :((((( )", "username": "Alexander_Ye" }, { "code": "syncnot found in schemaError: Exception in HostFunction: Cannot access realm that has been closed.\nUserMetadataRealm", "text": "So I have provided all the schemas to both RealmProviders but kept the sync configurations as shown above the same. I am tentatively optimistic, as I am not running into the not found in schema error, but the functionality (of local vs synced realms) is working properly.It seems the naive solution is to provide all schemas to all providers, which… feels iffy to me…UPDATE:\nI’ve discovered that the reverse situation now happens; the app no longer crashes when switching from a local realm to a synced realm. However, when “unsubscribing” (switching from a synced to a local realm), I encounter this issue:The stack trace suggests that the issue comes from a component that calls the UserMetadataRealm.", "username": "Alexander_Ye" } ]
[Realm React] Nested/Multiple Realm Providers?
2023-02-13T18:57:42.558Z
[Realm React] Nested/Multiple Realm Providers?
981
null
[ "node-js", "react-native", "schema-validation", "typescript" ]
[ { "code": "Error: Exception in HostFunction: Schema validation failed due to the following errors:\n\n- Property 'SomeObject.testKey' of type 'object' has unknown object type 'Array' \nexport class SomeObject extends Realm.Object<SomeObject> {\n _id!: Realm.BSON.UUID = new Realm.BSON.UUID();\n userId!: string;\n name!: string;\n createdAt!: Date = new Date();\n lastUpdated!: Date = new Date();\n testObject: EmbeddedObjectClass;\n testKey?: Array<SomeClass>; \n // Realm.List leads to the same error\n // except with \"List\" instead of \"Array\" as the unknown object type\n\n static generate({name, userId=NO_SYNC_USER_ID} testObject, testKey=[]}: {\n name: string,\n userId: string | undefined,\n testObject: EmbeddedObjectClass,\n testKey: Array<SomeClass>\n }) {\n return {\n name,\n userId: userId,\n testObject: testObject\n testKey: testKey,\n }\n }\n}\nlistsArraytestKey: anytestKey: SomeClass[]SomeClassstatic schema = {...}[Error: TransformError <path_to_file_defining_schema_class>: \nClasses extending Realm.Object cannot define their own `schema` static, \nall properties must be defined using TypeScript syntax]\n", "text": "Hello, I am trying to define a schema (using TypeScript) and I am encountering the following error:This is vaguely what the schema looks like, as I’ve defined it:So I’ve somehow gotten embedded objects to work, and I can somewhat simulate an array using an embedded object (arrays in documents are meant to be limited, so having predefined keys isn’t the worst thing in the world), but I can’t seem to get lists or JavaScript Arrays to work—which would be much easier (and perhaps optimal?) to work with.For me, the big question is: How do I get lists/arrays to work in Realm—how do I write lists to documents?Note, also, that if I set the type, testKey: any or testKey: SomeClass[], and pass an array of SomeClass objects in the constructor, I get a silent failure: the application runs, nothing crashes, I don’t get an error message, but the array never gets written to the resultant Realm object/document.I also cannot define a separate, static schema = {...}, as this leads to the following error:", "username": "Alexander_Ye" }, { "code": "publisher!: Realm.List<Publisher>static schema@realm/reactstatic generateconstructor", "text": "Hi again, @Alexander_Ye! I really appreciate you bringing your concerns here. I think the Embedded Objects page I linked you to in the other post may have lead you astray. Apologies! There are a few things we need to look at in your schema:Here’s a complex schema from a personal project. I wouldn’t necessarily consider this best practice (I’m a TypeScript novice), but hopefully it helps illustrate the embedded object and array syntax.I’ll work on making this clearer in the new React Native SDK docs. All of this was a personal pain point for me when I joined the team a few months ago.", "username": "Kyle_Rollins" }, { "code": "List", "text": "Re: my first bullet, it sounds like you tried the correct syntax, but List is an unknown type. Assuming you’ve imported things correctly, it could be an issue with how you’ve configured your realm. You need to pass all of your schema models into your realm config, including your embedded object schemas. See here in my personal project.", "username": "Kyle_Rollins" }, { "code": "Realm.ListListtest: Realm.List<ObjectName> = new Realm.List()TypeError: Illegal constructor\ntest: Realm.List<ObjectName> = []typeType 'undefined[]' is missing the following properties from type \n'List<ObjectName>': type, optional, toJSON, description, and 12 more.\ntest: ObjectName[] = []typeType 'ObjectName[]' is missing the following properties from type \n'List<RecentSpeaker>': type, optional, toJSON, description, and 12 more.\n", "text": "Hey @Kyle_Rollins, thanks for the assist.When I use Realm.List instead of List, I no longer see the error. (I have passed all my schema models, including the embedded object schemas, into the realm config, but that’s a great reminder—thanks.)My next question is how do I use a Realm List? More specifically:How do I initialize a Realm List?", "username": "Alexander_Ye" }, { "code": "realm.write() realm.write(() => {\n // New instance of your SomeObject class/realm object\n const newListParentObject = new SomeObject(\n \trealm,\n \t// other things you asserted in your SomeObject model would exist\n );\n \n // New instance of realm object that you're adding to your Realm List\n const newListItemObject = new SomeClass(\n realm,\n // other things you asserted in your SomeClass model would exist\n )\n \n // Push your new list item to your Realm List\n newListParentObject.testKey.push(newListItemObject);\n });\n", "text": "Apologies, @Alexander_Ye. Work and life got busy. If you’ve worked this out already, please disregard!In your data model, you don’t initialize the Realm List. That happens when you create a new instance of your object class. You can only write data to a realm inside of realm.write().So, a contrived example using some of the wording from your example:", "username": "Kyle_Rollins" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
[Realm React: Arrays] Schema Validation Error: Property 'Object.key' of type 'object' has unknown object type 'Array'
2023-02-09T15:22:09.129Z
[Realm React: Arrays] Schema Validation Error: Property &lsquo;Object.key&rsquo; of type &lsquo;object&rsquo; has unknown object type &lsquo;Array&rsquo;
2,585
null
[ "swift", "transactions" ]
[ { "code": "subscriptions.append(\n\t\t\t\t\t\t\tQuerySubscription<TransactionObject>(name: RealmSyncSubscription.transactions.rawValue) {\n\t\t\t\t\t\t\t\t$0.ownerId == user.id\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\nsubscriptions.append(\n\t\t\t\tQuerySubscription<TransactionObject>(name: RealmSyncSubscription.transactions.rawValue) { object in\n\t\t\t\t\t\t\t\t// refresh custom data\n\t\t\t\t\t\t\t\tvar syncingEnabled = false\n\t\t\t\t\t\t\t\tuser.refreshCustomData { result in\n\t\t\t\t\t\t\t\t\tswitch result {\n\t\t\t\t\t\t\t\t\tcase .success(let customData):\n\t\t\t\t\t\t\t\t\t\tsyncingEnabled = customData[\"syncingEnabled\"]\n\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tcase .failure(let failure):\n\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\treturn object.ownerId == user.id && syncingEnabled ⚠️ // Cannot convert value of type 'Bool' to expected argument type 'Query<Bool>'\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\nuser.refreshCustomData { }", "text": "Please what is the best way to control flexible sync for a particular Realm Object based on User Settings in Realm Swift.I have subscription setup for a Realm Object and want to add a query based on a flag on the client side.The subscription in the code above shows the query that determines if a TransactionObject should be synced or not but now I want users to be able to turn on/off syncing of this particular object alone with a flag and I’m not sure the best way to achieve this.The initial approach I took is add a new property to the user object I used to setup RLMUser with custom data. The problem is trying to refresh the user custom data when the new property changes to set new flexible sync rule.The first problem I have is that user.refreshCustomData { } is asynchronous and don’t know if that would affect the performance of the app or the functionality of the subscription since the refresh would have to complete before returning whether the property is true or false.Secondly, I’m not sure how to construct a valid Query for the subscription since this property is not part of the TransactionObject which I’m trying to setup subscription for so I get the error: “Cannot convert value of type ‘Bool’ to expected argument type ‘Query’”", "username": "tobitech" }, { "code": "", "text": "Synced realms do not have a concept of “not syncing” a specific object. If you want the users to be able to “turn off Sync” for a specific object type, you would need to manage that particular object type with a non-synced realm. So your app logic would be something like “if customerSyncsTransaction, open a synced realm for that object type, else open a non-synced realm for that object type.” There are docs here on using configuration.objectTypes to manage the object types you’re using with a realm.So with this in mind, the Flexible Sync query subscription would not need to care about the custom user data. It would be something you would check before determining whether to open a synced realm or a non-synced realm.This gets more complex because the Swift SDK does not currently have an API that supports copying data between a Flexible synced realm and non-synced realm. So if you’d want to make existing transactions sync or not-sync as the user enables or disables this setting, you’d need to write your own logic to iterate over and copy those transactions between the synced and the non-synced realm.", "username": "Dachary_Carey" }, { "code": "", "text": "Okay, thanks for the explanation for the synced realm and non-synced realm.What I mean by not syncing is just checking against a value that the subscription query doesn’t satisfy, similar to how I compare if an object’s ownerId matches the id of the currently logged in user to sync specific user’s data with their device.The approach of copying data from one realm to another seems like a complex solution for the feature schedule as at this time.Could you please suggest a way to write a query for the subscription based on an external boolean value that’s not a property of the object type? Basically I’m trying to work my way around the error: “Cannot convert value of type ‘Bool’ to expected argument type ‘Query’.Thank you for your time.", "username": "tobitech" }, { "code": "", "text": "The query is specific to the object type so I don’t believe you can query against a field that is not in the object from within the query. A better approach would be to check the external object field’s value before adding the query and then add or don’t add the query based on the value you’re checking. i.e. if customData[“syncingEnabled”] then append a subscription for the Transaction Object, else break or return or do something else instead.", "username": "Dachary_Carey" }, { "code": "", "text": "That makes a lot of sense. Thank you.", "username": "tobitech" } ]
Controlling Realm Flexible Sync Subscription based on User Settings
2023-02-13T14:54:54.255Z
Controlling Realm Flexible Sync Subscription based on User Settings
859
null
[ "production", "golang" ]
[ { "code": "bson.Raw", "text": "The MongoDB Go Driver Team is pleased to release version 1.11.2 of the MongoDB Go Driver.This release includes various bug fixes, including a fix for incorrect timestamp formats when converting a bson.Raw to Extended JSON and a fix for a retryable read error that can happen when using read concern “linearizable” or “available”. For more information please see the 1.11.2 release notes.You can obtain the driver source from GitHub under the v1.11.2 tag.Documentation for the Go driver can be found on pkg.go.dev and the MongoDB documentation site. BSON library documentation is also available on pkg.go.dev. Questions and inquiries can be asked on the MongoDB Developer Community. Bugs can be reported in the Go Driver project in the MongoDB JIRA where a list of current issues can be found. Your feedback on the Go driver is greatly appreciated!Thank you,\nThe Go Driver Team", "username": "Matt_Dale" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB Go Driver 1.11.2 Released
2023-02-13T18:55:24.640Z
MongoDB Go Driver 1.11.2 Released
1,046