image_url
stringlengths
113
131
tags
list
discussion
list
title
stringlengths
8
254
created_at
stringlengths
24
24
fancy_title
stringlengths
8
396
views
int64
73
422k
null
[ "node-js" ]
[ { "code": "", "text": "exports.createService = async (client, serviceName, clusterName) => {\ntry {\nconst response = await client.service.create({\n‘name’: serviceName, // this can be anything\n‘type’: ‘mongodb-atlas’,\n‘config’: {\n‘clusterName’: clusterName\n}\n});\nconsole.log(response._id);\nreturn response._id;\n} catch (e) {\nconsole.log('createService → error → ', e);\nthrow new Error(e);\n}\n}this function call from index.jsif(serviceId === ‘’){\nconsole.log('Creating Service ’ + serviceName + ‘…’);\nserviceId = await createService(client, serviceName, clusterName);\nconsole.log('Created Service - ', serviceId);\n}const client = getMongodbRealmApiClient({\n‘publicKey’: publicKey,\n‘privateKey’: privateKey,\n‘baseUrl’: ‘https://stitch.mongodb.com/api/admin/v3.0’,\n‘appId’: appId, // Optional for first run when you are creating an app.\n‘projectId’: projectId // You can get this from mongodb console url by selecting project.\n});", "username": "Purshottam_Patidar" }, { "code": "", "text": "This topic was automatically closed after 180 days. New replies are no longer allowed.", "username": "system" } ]
Unable to create service and trigger but application is created and error occures 404 not found
2022-08-24T11:47:14.895Z
Unable to create service and trigger but application is created and error occures 404 not found
1,153
null
[ "atlas-search", "text-search" ]
[ { "code": "-,/", "text": "Hello!\nI have a data file which contains texts that have -,/\nIs there an analyzer that I can use for wildcard or phrase searches for such texts?Thanks,\nSupriya", "username": "Supriya_Bansal" }, { "code": "", "text": "Any ideas? I have analyzer set as lucene.keyword.\nWhen I try searching 20-22 there are no results even when the string exists in the file.\nI tried both regex and wildcard search.\n@Karen_Huaulme", "username": "Supriya_Bansal" }, { "code": "", "text": "@Supriya_Bansal could you share the following?", "username": "Marcus" }, { "code": "{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"Text\": {\n \"analyzer\": \"lucene.standard\",\n \"multi\": {\n \"englishAnalyzer\": {\n \"analyzer\": \"lucene.english\",\n \"type\": \"string\"\n },\n \"wildcardAnalyzer\": {\n \"analyzer\": \"lucene.keyword\",\n \"type\": \"string\"\n }\n },\n \"type\": \"string\"\n }\n }\n }\n}\n[{$search: {\n index: 'fts',\n wildcard: {\n query: '20?des*',\n path: 'Text',\n allowAnalyzedField:true\n }\n}}]\nCode:\"10000020\"\nLevel:\"PT\"\nText:\"22,20-desmolase deficiency\"\nCurrent:null\n\nCode:\"10000013\"\nLevel:\"PT\"\nText:\"17,20-desmolase deficiency\"\nCurrent:null\n\n", "text": "@MarcusIndex Definition:Search Query:Example docs:Thank you for helping me with it.Best,\nSupriya", "username": "Supriya_Bansal" }, { "code": "", "text": "Hi @Marcus,\nFollowing up on the thread.\nIs there a better way to do such searches?Best,\nSupriya", "username": "Supriya_Bansal" }, { "code": " {\n wildcard: {\n \"query\": \"*22-des*\",\n \"path\": {\"value\" : \"Text\", \"multi\" : \"wildcardAnalyzer\" },\n allowAnalyzedField:true\n }\n }\n", "text": "Got this to work. I had to mention the type of analyzer in my path construction.", "username": "Supriya_Bansal" }, { "code": "lucene.whitespace", "text": "@Supriya_Bansal, sorry for the late reply. Answering for posterity. You probably want the lucene.whitespace analyzer.", "username": "Marcus" } ]
Search for texts with hyphens, commas and slashes
2021-04-15T20:25:53.916Z
Search for texts with hyphens, commas and slashes
6,831
null
[ "atlas-search" ]
[ { "code": "", "text": "I have a requirement to be able to search various collections together with one query, is this possible with MongoDB Atlas Search?", "username": "Steven_Romero1" }, { "code": "", "text": "@Steven_Romero1 Have you tried using materialized views? It may work for your use case.", "username": "Marcus" }, { "code": "", "text": "Hello Steven, I’m facing the same problem. Did you find a solution for this case within Atlas Search?\nIf not, how did you solve it?Thanks!", "username": "Ignacio_Montero" }, { "code": "$unionWith$lookup", "text": "In MongoDB 6.0.0, we introduced native cross-collection search without the need to maintain materialized views.@Ignacio_Montero here you will find tutorials for using $unionWith, $lookup, and materialized views referenced above. The other two options require less work.", "username": "Marcus" } ]
Does MongoDB Atlas Search support searches on multiple collections?
2020-10-23T15:21:50.399Z
Does MongoDB Atlas Search support searches on multiple collections?
3,839
null
[ "queries", "dot-net" ]
[ { "code": "", "text": "I have a collection of documents, each with a unique key “AccountNum”Inside of each of these account documents is an embedded array of documents called “Items” and each item document has a unique ItemIdI need to 1) find the specific document matching AccountNum then 2) page through the embedded items array and sort them by field name (e.g. sort by ItemId ascending or descending) and return only a “page” of that embedded array (e.g. return 50 items at a time out of an array of 1000, paging by batches of 50).Is there a way to do this? I have tried different methods and just haven’t gotten it to work. I would prefer that Mongo does all the work and returns the data to the code (I’m working on a C# application).", "username": "S_E" }, { "code": "", "text": "I think I found my own solution:db.getCollection(‘catalogs’).aggregate({$match: {“AccountNum”:“99999999”}},{$project:{“Items”:{$slice:[\"$Items\",20,20]},\"_id\":false}})", "username": "S_E" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Paging and sorting an embedded array in a single document
2022-08-24T19:06:22.443Z
Paging and sorting an embedded array in a single document
1,610
https://www.mongodb.com/…_2_1024x424.jpeg
[ "server" ]
[ { "code": "", "text": "I brew installed Mongo version 6.0 and can run \" brew services restart [email protected]\" fine in the terminal. However, when I type “mongo” it shows me “command not found”. Here’s the output when I type in “mongod\" when I didn’t put in anything in the command line. But even if I do “mongod --config /usr/local/etc/mongod.conf” where my desired configuration file is, it still doesn’t launch the mongo shell. Any help will be appreciated!\n\nScreen Shot 2022-08-24 at 3.32.32 AM1920×796 316 KB\n", "username": "xttt" }, { "code": "", "text": "I am able to run mongosh though. How different have people found mongosh and mongo to be? I’m following an outdated tutorial on mongo and wonder if I should stick to mongo", "username": "xttt" }, { "code": "", "text": "Older mongo shell is deprecated\nYou can continue using latest shell mongosh", "username": "Ramachandra_Tummala" }, { "code": "", "text": "I see, thanks! Can I launch it by “mongod”? I’ve seen some people do that this month still, or is that also not recommended? I saw mongosh supports a subset of mongo’s commands, so I don’t want to miss out on some commands", "username": "xttt" }, { "code": "", "text": "mongod is used to start mongod daemon\nOnce it is up you can connect using mongo or mongosh depending on the shell you have installed.I suggest you go with latest shell\nIf you still want to use older shell i think you can install it(just shell only) on separate path and use both", "username": "Ramachandra_Tummala" }, { "code": "mongoshmongomongobinmongosh", "text": "I saw mongosh supports a subset of mongo’s commands, so I don’t want to miss out on some commandsYou are not going to miss out on much functionality using mongosh as the team has the commonly used commands implemented. There are some things that are still missing, but I wouldn’t worry about those.If you want to use mongo still for whatever reason, you can get a download a copy of the 5.0.x archive. Once you uncompress the archive you should see mongo in the bin directory. Note that by doing this, you could be missing out on functionality that the mongosh tool has added.", "username": "Doug_Duncan" }, { "code": "", "text": "Sounds good, I’ll just use mongosh then", "username": "xttt" }, { "code": "", "text": "Thanks for your response! I’ll go ahead and just use mongosh", "username": "xttt" } ]
"Mongo command not found" command lin
2022-08-24T10:35:56.584Z
“Mongo command not found” command lin
6,270
null
[ "queries", "dot-net" ]
[ { "code": "", "text": "i’m using C# driver\ni want to implement Soft Delete, like global query filter in EF Core\nto automatically add some filters to fetching data commands\nanyway?another thing is important is changing modificationDate property when the document is updated", "username": "ali_yeganeh" }, { "code": "IRepository<T>IQueryable<T>coll.AsQueryable()coll.AsQueryable().Where(x => x.Deleted == false)modificationDateModificationDateINotifyPropertyChangedRepository<T>modificationDate", "text": "Hi, @ali_yeganeh,Welcome to the MongoDB Community Forums. I understand that you have some questions about implementing soft deletes with MongoDB.You could implement soft deletes via a data layer abstraction. You could expose an IRepository<T> implementation in your data layer that returns an IQueryable<T> to your application. Rather than returning coll.AsQueryable(), you would return coll.AsQueryable().Where(x => x.Deleted == false).If you are running on MongoDB Atlas, another way to implement soft deletes would be to leverage Atlas App Services. You could implement the soft delete logic using query filters:Regarding your second requirement of implementing a modificationDate, that would be easiest to implement in your application logic whereby you would modify a ModificationDate property on your base entity when the entity is modified. This could be done manually in every property or using an INotifyPropertyChanged implementation. Other options include implementing it in your Repository<T> implementation on update/delete. Yet another would be using Atlas App Services rule to set the modificationDate when updating or (soft) deleting an entity.Hopefully this provides you with some ideas for how to accomplish your design goals.Sincerely,\nJames", "username": "James_Kovacs" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to implement Soft Delete In C#
2022-08-24T07:26:07.581Z
How to implement Soft Delete In C#
2,477
https://www.mongodb.com/…c9eff8e9cffc.png
[ "database-tools" ]
[ { "code": "", "text": "I need to have two separate databases (currently have one called “worker”) for the purpose of backing/restoring them up separately from each other. They also do contain very different data, however there’s one single app that will connect to both databases.I’m unsure how to achieve this second database within the same existing M10 cluster (which is very underutilized). Could you please advise?Also, I’ve created a second database locally (Octopussy) but where do I find it? I’ve searched for that specific name and even for the two collections names but can’t find them:\n\nScreenshot 2022-08-24 at 16.35.42948×678 54.6 KB\nEDIT: was able to locate the local MongoDb files with this answer: macos - Location of the mongodb database on mac - Stack Overflow but I can’t identify the format here whereas from mongodumps taken from Atlas the individual collections are all in easy to identify (and copy) files. Seems that isn’t the same for the local db.The reason I’m asking for such a specific file that holds the database is that I need to share it with a freelancer who works with me. I don’t want to share both databases, just one of them.Thanks in advance!", "username": "Andreas_West" }, { "code": "", "text": "Your screenshot shows that you are connected to localhost , not to Atlas.", "username": "steevej" }, { "code": "", "text": "Yes, that’s because it was related to the last part of my question (which I could answer myself).I have been in contact with MongoDb CS in the meantime and they explained to me that one can have up to 100 databases under a cluster. So that solves my problem too, I have already created the second database with the DbCompass app (on Atlas itself).", "username": "Andreas_West" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Creating two separate databases in the same Atlas cluster (M10)
2022-08-24T08:36:44.981Z
Creating two separate databases in the same Atlas cluster (M10)
2,029
null
[ "aggregation", "queries" ]
[ { "code": "const suggestedFriends = await this.usersModel\n .aggregate<SuggestedFriends>()\n .search(\n {\n index: 'default',\n wildcard: {\n query: `${friendsQuery.q}*`,\n path: 'email',\n allowAnalyzedField: true,\n },\n }\n )\n", "text": "Hi all! Im having an issue that I cant find by email using Atlas Search.\nThis is what I built at the moment:What im sending on the variable friendsQuery.q is “[email protected]” but not found nothing. I tried to use another collection that I have names and works properly, I mean the search index its working but not works on strings of emails :/. Someone faced this thing too?Thank you so much!", "username": "tomas_ruffa" }, { "code": "", "text": "Can you share your index definition?We have an email tokenizer, which helps with this.", "username": "Elle_Shwer" }, { "code": "", "text": "Thanks for quickly response here its my index definition\n\nScreen Shot 2022-08-23 at 12.11.191920×777 76.9 KB\n", "username": "tomas_ruffa" }, { "code": "lucene.simple@.{\n \"analyzer\": \"lucene.simple\",\n \"mappings\": {\n \"dynamic\": true,\n \"fields\": {\n \"email\": {\n \"analyzer\": \"emailUrlExtractor\",\n \"searchAnalyzer\": \"emailUrlExtractor\",\n \"type\": \"string\"\n }\n }\n },\n \"analyzers\": [\n {\n \"charFilters\": [],\n \"name\": \"emailUrlExtractor\",\n \"tokenFilters\": [\n {\n \"type\": \"lowercase\"\n }\n ],\n \"tokenizer\": {\n \"maxTokenLength\": 200,\n \"type\": \"uaxUrlEmail\"\n }\n }\n ]\n}\n", "text": "@tomas_ruffa That analyzer won’t work because the lucene.simple analyzer will strip the @ symbol and the . from the email.In the visual editor, we do not support the custom analyzer you should build to support this use case.It would look something like this:", "username": "Marcus" }, { "code": "", "text": "Thank you so much Marcus I did this and added new refactors to that query by the Atlas UI and now works. Thanks @Elle_Shwer too for the help, both are the solution that I need ", "username": "tomas_ruffa" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to search by Email on Atlas Search
2022-08-22T21:26:24.958Z
How to search by Email on Atlas Search
3,788
null
[]
[ { "code": "", "text": "Hey,\nI need to figure out which IP my connection to MongoDB is coming from. Current set access to 0.0.0.0/0, but cannot find out where to see what IP is inbound and limit this horrible filter to something more sane.I’m using FREE / shared version, and it seems like Logs are not available here, I tried to enable some logs, but cannot find them anywhere. There are no connection import in the built-in logs in projects and organisation, and I cannot find logs for the cluster.Can I get a tip or a link towards where to look for incoming IP information? I keep pulling blanks.Søren", "username": "Soren_Staun" }, { "code": "", "text": "Most likely it is the result ofhttps://whatismyipaddress.com/", "username": "steevej" }, { "code": "", "text": "Yeah, if only. It’s a server and I cannot find the IP. There’s no way for me to go to that URL from the server.I need to find my external IP from the data center and not my own. ", "username": "Soren_Staun" }, { "code": "", "text": "You may use cURL or wget from the server with the link I provided.", "username": "steevej" }, { "code": "", "text": "Surprisingly little editing of posts here!It’s an OutSystems (low-code) server, so no direct shell access to run curl or other things.\nBut I found this extension that can help find the external IP of the server in their AWS cloud:The OutSystems Forge is a repository of reusable, open code modules, connectors, and UI components. Add your own, or collaborate with others to deliver apps faster.\n(Server IP extension)Thanks for your help Steeve!", "username": "Soren_Staun" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Find IPs of incoming connections
2022-08-23T20:45:30.855Z
Find IPs of incoming connections
1,511
https://www.mongodb.com/…f_2_1024x576.png
[ "node-js", "connecting" ]
[ { "code": " const {MongoClient}=require('mongodb');\n const url='mongodb://localhost:27017';\n const database='local'\n const client=new MongoClient(url);\n\n async function getData()\n {\n let result=await client.connect();\n let db=result.db(database);\n let collection=db.collection('startup_log');\n let response=await collection.find({}).toArray();\n console.log(response);\n }\n getData();\n\n\n\n\n", "text": "\nScreenshot 2022-08-23 18.47.421920×1080 157 KB\nI have written the code to connect the database with node js with no mistakes and tried to execute the code but the error I have got is unable to understand, I have tried every solution that has been posted on youtube and google but none of them have helped me\nI also reinstalled the MongoDB several times still getting the same problem‘mongo’ is not recognized as an internal or external command,\noperable program or batch file.\nI am getting this when I have tried to check the version of MongoDB through cmd even the path in the system variable is clear\ncan anyone give the solution for it", "username": "bha_Nu" }, { "code": "mongomongoshmongoshconst url='mongodb://localhost:27017'\nconst url='mongodb://127.0.0.1:27017'\n", "text": "‘mongo’ is not recognized as an internal or external command,The mongo command line tool is no longer installed with version 6.0 of MongoDB. If you used an installer it may have installed mongosh which is the newer replacement tool. If you don’t have mongosh, you can download it.As for the error you’re getting, it looks like your app is trying to connect over IPv6 address. Change your connection code fromtoThat should hopefully help resolve that issue.", "username": "Doug_Duncan" }, { "code": "", "text": "it’s working . Thank you very much for your help sir", "username": "bha_Nu" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoServerSelectionError
2022-08-23T13:42:51.330Z
MongoServerSelectionError
2,224
null
[ "java", "connecting" ]
[ { "code": "# SSL options\n# Enable SSL on normal ports\nsslOnNormalPorts = true\n# SSL Key file and password\nsslPEMKeyFile = /etc/ssl/mongodb.pem\nsslPEMKeyPassword = PASSWORD\nmongo --ssl --sslCAFile /etc/ssl/rootCA.pem --sslPEMKeyFile /etc/ssl/mongodb.pem --host localhost\n\nmongodb://USER:PASSWORD@localhost:27017/?ssl=true&sslAllowInvalidCertificates=true&sslPEMKeyFile=/etc/ssl/mongodb.pem\n\nThe connection string contains an invalid host 'localhost:27017/?ssl=true&sslAllowInvalidCertificates=true&sslPEMKeyFile=/etc/ssl'. The port '27017/?ssl=true&sslAllowInvalidCertificates=true&sslPEMKeyFile=/etc/ssl' is not a valid, it must be an integer between 0 and 65535\n\nurl=mongodb://USER:PASSWORD@localhost:27017/?ssl=true\ncom.mongodb.MongoSocketWriteException: Exception sending message\n at com.mongodb.connection.InternalStreamConnection.translateWriteException(InternalStreamConnection.java:445) ~[mongo-java-driver-3.5.0.jar:?]\n.\n...\nCaused by: javax.net.ssl.SSLHandshakeException: PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target\n.\n...\n", "text": "Hi, I’m new to this community. Not sure if posting such a big description is okay. So here I go:I’m working on JDK8 and using mongo-java-driver(v3.5.0) to connect MongoDB(v3.6.3) .I’ve enabled SSL by following this article. I don’t have /etc/mongod.conf file, instead I’ve /etc/mongodb.conf file; so I’ve updated the SSL settings in that file:I’m able to access mongo via mongo shell using:I want to connect MongoDB using Java driver. I initially tried the following JDBC connection string:but as per documentation, there are no such options available. Also, I get error:And when I try with the following connection string:I get following error:Could someone help me with what JDBC connection string I need to configure to connect successfully. Thank you.", "username": "Jitin_Dominic" }, { "code": "", "text": "Hi @Jitin_DominicHave you had a look at the Enable TLS/SSL on a Connection documentation? It should explain the basics of setting up a TLS connection to MongoDB from a Java application.Regards,\nJeff", "username": "Jeffrey_Yemin" }, { "code": "", "text": "@Jeffrey_YeminThe documentation that you’ve shared is related to v4.7. And I’m using v3.6, so I’ve followed the this documentation", "username": "Jitin_Dominic" }, { "code": "", "text": "There is Java driver-specific TLS docs for 3.6 at TLS/SSL. As all the configuration is delegate to the JVM, it hasn’t really changed since then.", "username": "Jeffrey_Yemin" }, { "code": "mongodb://localhost:27017/?ssl=true&tlsCertificateKeyFile=/etc/ssl/mongodb.pem&tlsCAFile=/etc/ssl/rootCA.pem", "text": "@Jeffrey_Yemin\nSorry, I was out for couple of weeks. So I setup a MongoDB v4.4. Tried with new Java driver v3.12.11. Updated my connection URI to mongodb://localhost:27017/?ssl=true&tlsCertificateKeyFile=/etc/ssl/mongodb.pem&tlsCAFile=/etc/ssl/rootCA.pemAlso imported pem files using keytool but I still get the same error.", "username": "Jitin_Dominic" } ]
MongoDB connection URI for SSL
2022-08-05T06:03:39.911Z
MongoDB connection URI for SSL
5,052
null
[]
[ { "code": "db.a.updateMany(condition,{$set:{\n\"productMinQty\":collection b field value,\n})\n", "text": "I want to update collection A on based on Collection B field.My query like this,Thnaks.", "username": "Seekex_youtube" }, { "code": "", "text": "I would take a look at https://docs.mongodb.com/manual/tutorial/update-documents-with-aggregation-pipeline/.I am not too sure if you can $lookup. Please share any findings.", "username": "steevej" }, { "code": "", "text": "@Seekex_youtube, any followup on this?", "username": "steevej" }, { "code": "", "text": "Yes I am having the same issue could anyone please help in this", "username": "adamvinh_zi" }, { "code": "", "text": "Adding $lookup to the update pipeline would be a great add. Increasingly no-code solutions that plug directly into Mongodb are missing any ability to do collection level security, something that is usually happened in API middleware.$lookup makes it easy to reference a single user list in a collection and protect all collections for read operations. Without a $lookup update pipeline capability we can’t easily protect writes based on a single user list collection.", "username": "John_Armstrong" }, { "code": "db.a.aggregate([CONDITION, LOOKUP value, ADJUST DOCUMENT, MERGE]) \n\n// Collection collA\n\ndb.collA.find()\n{ _id: ObjectId(\"62138110d18675354ee41fc7\"), a: 1, b: 'id 1' }\n{ _id: ObjectId(\"62138115d18675354ee41fc8\"), a: 2, b: 'id 2' }\n\n\ndb.collB.find()\n{ _id: ObjectId(\"62137e41d18675354ee41fc5\"), id: 'id 1', v: 10 }\n{ _id: ObjectId(\"62137e41d18675354ee41fc6\"), id: 'id 2', v: 20 }\n\n\ndb.collA.aggregate([{$match: {\n a: {\n $in: [\n 1,\n 2\n ]\n }\n}}, {$lookup: {\n from: 'collB',\n localField: 'b',\n foreignField: 'id',\n as: 'productMinQty'\n}}, {$addFields: {\n productMinQty: {\n $first: '$productMinQty.v'\n }\n}}, {$merge: {\n into: 'collA',\n whenMatched: 'replace',\n whenNotMatched: 'discard'\n}}]\n)\n\n\ndb.collA.find()\n{ _id: ObjectId(\"62138110d18675354ee41fc7\"),\n a: 1,\n b: 'id 1',\n productMinQty: 10 }\n{ _id: ObjectId(\"62138115d18675354ee41fc8\"),\n a: 2,\n b: 'id 2',\n productMinQty: 20 }\n\n", "text": "Hi @John_Armstrong and @Seekex_youtube ,So MongoDB does not currently allow lookup stages in an update command, however, it does have the $merge pipeline.Technically you cna achieve the same update with a following pipeline:For example consider the following data:Now to update collA with productMinQty as “v” from collection collB the following merge could be used:Thanks", "username": "Pavel_Duchovny" }, { "code": "", "text": "Thanks for the suggested workaround @Pavel_Duchovny. I tried this but I’m getting “$merge is not supported when the output collection is the same as the aggregation collection” is this now supported in a newer version of mongodb? Thanks.", "username": "Oscar_Bernal" }, { "code": "", "text": "Hi @Oscar_Bernal ,Oh, yes this is available starting 5.0…Would it be ok to do a $out to a temp collection and then run the $merge from that collection to the original collection as a workaround?Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "@Pavel_Duchovny That would work for now, yes. Thank you!", "username": "Oscar_Bernal" }, { "code": "", "text": "To merge into the aggregation collection is actually available since MongoDB 4.4, see: https://www.mongodb.com/docs/manual/reference/operator/aggregation/merge/#std-label-merge-behavior-same-collection.I tripped over that because we are locally on version 4.4 and on production on 4.2 where that error appeared. We had some time pressure when we noticed it’s not working on production.", "username": "Sinan_Birbalta" } ]
How Can I update data with lookup
2021-12-10T14:02:34.914Z
How Can I update data with lookup
12,447
null
[ "atlas-functions", "atlas-triggers" ]
[ { "code": "", "text": "Recently watched an awesome video by Michael Lynn on youtube Create a Data Enabled API in 10 Minutes with MongoDB Realm - YouTube, but cant find him here in this community.I have some questions hope someone can help.Thank you.", "username": "Rishi_uttam" }, { "code": "", "text": "Hi @Rishi_uttam, I’ll try to answer your questions…", "username": "Andrew_Morgan" }, { "code": "", "text": "Hi Andrew, thanks that does clear up a little.I have set my realm app set to Global, but writes are restricted to one location (currently Singapore) . Does this mean that all reads from my app (including functions & HTTP functions) are read form the database that is served from atlas edge locations? thats great, then they dont need to route back the write origin. Are realm locations the same as Atlas locations?My mongob database with atlas is hosted on Azure (Hong Kong region) but realm database writes are in Singapore… ??. but my app is using the same collection in the same database? if you could point me to a document to explain why the realm database is different form the atlas database that may help to claify things? i know many of us are confused about this point.Thanks.", "username": "Rishi_uttam" }, { "code": "", "text": "Great Questions!I hope this helps!", "username": "Michael_Lynn" }, { "code": "", "text": "Whoops - looks like @Andrew_Morgan already answered.", "username": "Michael_Lynn" }, { "code": "", "text": "Nice.in my client side app, i am making fetch calls to realm http 3rd party http triggers so the user does see the xhr origin in the browser status bar, was hoping to put it under our domain (like you do in lambdas)Realms global reads i believe are in 4 locations currently, and writes are in 1. I seek low latency like everyone else hence its good that reads are automatically routed to the closet destination out of the 4 current locations.noted on the pricing thanks.Im still struggling with using a function to create a new user based on a incoming post request body’s username and password. I know realm functions get a context object, but where is the context.emailPasswordAuth.registerUser(email, password); do i need to import the realm node sdk in to the function? seems like i shouldn’t have to just like i dont need to import the mongodb module., its available in the context. coulnd’t find much in the documentation on this… thanks for your help.", "username": "Rishi_uttam" }, { "code": "", "text": "How can we trigger an realm client with the mongodb atlas account", "username": "Devops_Team" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongodb Realm Functions Vs Atlas Triggers? whats the difference?
2021-07-09T07:47:59.127Z
Mongodb Realm Functions Vs Atlas Triggers? whats the difference?
6,396
null
[ "node-js", "crud" ]
[ { "code": "{\n Company: 'Alpha ',\n Products: [\n {\n ProductName: ' Bike',\n Release_Date: '',\n Sales :\n {\n Today_Sales: [{Date: '...', Sales : 12},{Date: '...', Sales: 34}]\n Total_Sales:[{Date: '...', Sales : 12},{Date: '...', Sales: 34}]\n },\n {\n ...\n }\n ]\n}\ndb.collection('collection').updateMany({},[\n{ \n$set: { \"Products.totalSales\" : \"$Products.Sales.Total_Sales.Sales\"\n}}\n]\n$last: {Products.Sales.Total_Sales.Sales}\nProducts.Sales.Total_Sales.0.Sales\n", "text": "Hello Mongo Community,\nI have the following document structure;what I want to do is setting a new field at each product with using the values of Sales from Total Sales Array.I have tried as followWhat I need help is I don’t know how to fix the array indexs in the above synatxProducts.Sales.Total_Sales.SalesFor Products : I need to fix it only to the current path of that Products.totalSales will be; now is giving from every products,\nFor Total_Sales : I want the last objects from its array,I have tried thewhich is giving me the last product\n&and giving me nothing as it is searching 0 key at the Sales I think.", "username": "Kyaw_Zayar_Tun" }, { "code": "db.collection('').updateMany({},\n [ {\n $set: {\n 'Albums.totalSales': {\n $arrayElemAt:[{\n $map: {\n input:'$Albums',\n in: { $arrayElemAt: [ '$$this.Sales.Total_Sales.Sales', -1 ] } \n \n }\n },0]}\n }\n }]\n )```\nNo idea how to set the Products only to the current path of **totalSales** will be placed.", "text": "I have managed to set the last element of Total_Sales by this", "username": "Kyaw_Zayar_Tun" }, { "code": "{\n Today_Sales: [{Date: '...', Sales : 12},{Date: '...', Sales: 34}]\n Total_Sales:[{Date: '...', Sales : 12},{Date: '...', Sales: 34}]\n },\n", "text": "I am not sure of what you want to achieve.Sometimes you refer to Products.totalSales and at other times you write about Albums.totalSales. It is 2 different fields?The objectis not valid. You cannot have 2 fields within the same object that have the same key. Well, actually you can, but only the last occurrence is kept my most JSON implementation. It looks like you have introduced errors while redacting the documents you published.Please include real sample non-redacted documents and the exact result that you want.", "username": "steevej" }, { "code": "", "text": "Sorry my bad Albums is an actually a typo, I meant Products. I copied some codes from forums and tested so there was a messup.\nWhat I want to achieve is I want to add a new Field named totalSales in every element inside Products Array and its value will be last Sales Object of that product’s Total_Sales.\nThe problem I am facing is I am getting the array of every products’ last sales object in each products.", "username": "Kyaw_Zayar_Tun" }, { "code": "", "text": "As already mentioned, pleaseinclude real sample non-redacted documents and the exact result that you want.", "username": "steevej" }, { "code": "db.collection('collection').updateMany({},[{\n $set: { \"Products\" : {$map:{\n \"input\": \"$Products\",\n \"in\": {$mergeObjects:[ \n \"$$this\",\n { \"totalSales\": \"$$this.Sales.Total_Sales.Sales\"}\n ]}\n }}}\n])\nProductstotalSalesSalesTotal_Sales$sum{ \"totalSales\": {$sum:\"$$this.Sales.Total_Sales.Sales\"}}", "text": "what I want to do is setting a new field at each product with using the values of Sales from Total Sales Array .Sounds like you want to transform the Products array by doing something like this:Now, this will set in each Products subdocument a new field called totalSales which will have an array of Sales values from Total_Sales array. If you actually wanted a sum of these values, just add $sum operator like this: { \"totalSales\": {$sum:\"$$this.Sales.Total_Sales.Sales\"}}.Asya", "username": "Asya_Kamsky" }, { "code": "db.collection.updateMany({},\n[\n {\n $set: {\n Products: {\n $map: {\n input: \"$Products\",\n in: {\n $mergeObjects: [\n \"$$this\",\n {\n totalSales: {\n $arrayElemAt: [\n \"$$this.Sales.Total_Sales.Sales\",\n -1\n ]\n },\n \n }\n ]\n }\n }\n }\n }\n }\n])\n", "text": "Thanks , this solved the problem by a little modification.", "username": "Kyaw_Zayar_Tun" }, { "code": "“$$this”“$map”", "text": "It’s always “$$this” - it’s a builtin variable which points to each element of array you iterate over with “$map”.", "username": "Asya_Kamsky" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Fixing the array index of two nested arrays
2022-08-23T11:09:40.231Z
Fixing the array index of two nested arrays
2,609
null
[ "sharding" ]
[ { "code": "mongos> db.dropDatabase()\nMongoServerError: This is not the primary shard for db unfied_dashboard expected: shard2set shardId: shard1set\n[direct: mongos] unfied_dashboard> sh.status()\nshardingVersion\n{\n _id: 1,\n minCompatibleVersion: 5,\n currentVersion: 6,\n clusterId: ObjectId(\"62f397da4459adfb1985fe6a\")\n}\n---\nshards\n[\n {\n _id: 'shard1set',\n host: 'shard1set/host01-01-:1111,host01-02-:1111',\n state: 1,\n topologyTime: Timestamp({ t: 1660138052, i: 4 })\n },\n {\n _id: 'shard2set',\n host: 'shard2set/host02-01-:1111,host02-02-:1111',\n state: 1,\n topologyTime: Timestamp({ t: 1660782013, i: 3 })\n },\n {\n _id: 'shard3set',\n host: 'shard3set/host03-01-:1111,host03-02-:1111',\n state: 1,\n topologyTime: Timestamp({ t: 1660782222, i: 1 })\n }\n]\n---\nactive mongoses\n[ { '6.0.0': 3 } ]\n---\nautosplit\n{ 'Currently enabled': 'yes' }\n---\nbalancer\n{\n 'Currently enabled': 'yes',\n 'Currently running': 'no',\n 'Failed balancer rounds in last 5 attempts': 0,\n 'Migration Results for the last 24 hours': 'No recent migrations'\n}\n---\ndatabases\n[\n {\n database: { _id: 'config', primary: 'config', partitioned: true },\n collections: {\n 'config.system.sessions': {\n shardKey: { _id: 1 },\n unique: false,\n balancing: true,\n chunkMetadata: [\n { shard: 'shard1set', nChunks: 342 },\n { shard: 'shard2set', nChunks: 341 },\n { shard: 'shard3set', nChunks: 341 }\n ],\n chunks: [\n 'too many chunks to print, use verbose if you want to force print'\n ],\n tags: []\n }\n }\n },\n {\n database: {\n _id: 'test',\n primary: 'shard2set',\n partitioned: false,\n version: {\n uuid: UUID(\"f9e8580c-0a3d-4fc6-98a5-a80745ce406a\"),\n timestamp: Timestamp({ t: 1660899990, i: 1 }),\n lastMod: 1\n }\n },\n collections: {}\n },\n {\n database: {\n _id: 'test1',\n primary: 'shard2set',\n partitioned: false,\n version: {\n uuid: UUID(\"b4ea4074-b609-49d1-9753-8747f35b6153\"),\n timestamp: Timestamp({ t: 1660901440, i: 1 }),\n lastMod: 1\n }\n },\n collections: {}\n },\n {\n database: {\n _id: 'test2',\n primary: 'shard2set',\n partitioned: false,\n version: {\n uuid: UUID(\"da4dcf6f-99e7-4589-ab08-ea70b817a62c\"),\n timestamp: Timestamp({ t: 1660902134, i: 3 }),\n lastMod: 2\n }\n },\n collections: {}\n },\n {\n database: {\n _id: 'unfied_dashboard',\n primary: 'shard2set',\n partitioned: false,\n version: {\n uuid: UUID(\"f6a4e39f-dea0-49e0-bf90-80e5f41a88bf\"),\n timestamp: Timestamp({ t: 1660885444, i: 1 }),\n lastMod: 1\n }\n },\n collections: {}\n },\n {\n database: {\n _id: 'unified_dashboard',\n primary: 'shard3set',\n partitioned: false,\n version: {\n uuid: UUID(\"57b5e1bf-ae1b-4096-8b75-eb50a9cb6462\"),\n timestamp: Timestamp({ t: 1660882030, i: 2 }),\n lastMod: 1\n }\n },\n collections: {}\n }\n]\n[direct: mongos] unfied_dashboard> db.dropDatabase()\nMongoServerError: This is not the primary shard for db unfied_dashboard expected: shard2set shardId: shard1set\n", "text": "I have 3 shard cluster\nconnect to mongos and I tried to drop an un-sharded database\nwhen I dropped database I faced a strange error belowhow can I change the shard set via mongos?", "username": "turbostar777" }, { "code": "MongoServerError: This is not the primary shard for db unfied_dashboard expected: shard2set shardId: shard1set\nmongosmongosdb.adminCommand(\"flushRouterConfig\")\n", "text": "Hello @turbostar777,Welcome to the MongoDB Community forums I suspect this error because mongos might have stale information, possibly the primary shard actually moved but mongos is still having stale information. So I’ll suggest you try flushRouterConfig to flush the cache for all databases and their collections using the command:And, try to drop the database again.I hope it helps!Thanks,\nKushagra", "username": "Kushagra_Kesav" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Drop un-sharded database that I faced strange error
2022-08-19T10:20:08.936Z
Drop un-sharded database that I faced strange error
1,952
null
[ "aggregation" ]
[ { "code": " var results = database.GetCollection<TestCollection>(\"TestCollection\")\n .Aggregate(new AggregateOptions { BatchSize = 1000 })\n .ToCursor().ForEachAsync(\n x =>\n {\n x.Name = \"dummy value\";\n });\n", "text": "I have 1 million records in the Mongo collection, and I will manipulate the few results after fetching them from the database. In this case, the code does not want to wait for entire records to change the value; instead, it can process the value on the batch results. Is there any way we can use the ToCursor with batch size?Tried the below code.", "username": "Sudhesh_Gnanasekaran" }, { "code": "Collection", "text": "Hi @Sudhesh_Gnanasekaran and welcome to the community!!If I understand correctly, your goal is to process the result set in a per-document basis, instead of waiting for the whole result set to arrive. Is this correct?If yes, then typically the Collection construct in most official drivers return a cursor, with which you can iterate and process. Thus, I believe you’re on the right track with the code example you posted. However, in most drivers (e.g. Pymongo), setting batchSize do not really have a user-visible effect, since the driver will do the batching for you and return results to you in terms of documents. See python - PyMongo cursor batch_size - Stack Overflow for an example of how this is done in Pymongo.Having said that, are there any specific reason why you want to manually set batchSize? Could you elaborate more on your use case and provide some examples?Thanks\nAasawari", "username": "Aasawari" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to use ToCursor to iterate values on batch size?
2022-08-11T05:12:16.008Z
How to use ToCursor to iterate values on batch size?
3,019
null
[ "aggregation", "queries" ]
[ { "code": " {\n \"$out\": {\n \"s3\": {\n \"bucket\": \"mongodb-s3\",\n \"region\": \"us-east-1\",\n \"filename\": {\"$concat\": [\"users/\",{\"$toString\" :Date.now()},\"/\",{\"$toString\" : \"$_id\"}]},\n \"format\": {\n \"name\": \"json\",\n \"maxFileSize\": \"10MiB\",\n \n }\n }\n }\n }\n", "text": "Hi All,\nI have created a trigger that sends the newly added data to s3 bucket.\nNow if the document is updated I need to send it. But I am not sure how do I send it.\nWhen I set this database trigger it sends the entire collection instead of a single document. How do I do that sending just the updated document and not the entire collection?exports = function (changeEvent) {const datalake = context.services.get(“FDI”);\nconst db = datalake.db(“users”);\nconst events = db.collection(“users”);const pipeline = [];return events.aggregate(pipeline);\n};", "username": "Nirmal_Patil" }, { "code": "changeEvent.documentKey._id", "text": "Hi @Nirmal_Patil ,I would add a $match stage in the beginning getting only the _id of the effected changeEvent.documentKey._id I’d.Look at the documentation of trigger event documents and let me know if you have trouble to code this.Thanks\nPavel", "username": "Pavel_Duchovny" } ]
Sending the updated document to S3 using triggers
2022-08-23T21:54:34.095Z
Sending the updated document to S3 using triggers
1,290
null
[ "aggregation" ]
[ { "code": "function external_func(field_value) { // field_value is a string\n return field_value;\n}\n$project: {\n 'new_field': {\n $where: external_func(***)\n }\n}\n\"\"Unrecognized expression '$where'", "text": "Hi everyone,I’m struggling with what looks like a basic operation. I’m trying to pass a field value into a function that is defined in another script in a project stage.I’ve read about the $function operator but I’m running a Mongodb version < 4.4 (4.2.13). Then, it looks like (on the Web) $where and mapReduce() could be an alternative.Unfortunately, even after reading the documentatio, I’m not sure what the synthax should be. Let’s consider a simple example :External function (another script):What I’m trying to get (aggregation pipeline):I only would like to project the value of a field name after making some conversion in the external function.Unfortunately, I tried a couple of things as an argument: external_func(this.field_name), external_func(’$field_name’), etc… but the only response I get is \"\"Unrecognized expression '$where'Obviously, I’m still not very good at it If anyone has an idea, thanks a lot !", "username": "Dr_Sim" }, { "code": "\"\"Unrecognized expression '$where'$whereAggregation Operations$project$where$function", "text": "Hello @Dr_Sim ,Welcome to The MongoDB Community! he only response I get is \"\"Unrecognized expression '$where'The reason you are getting this error is because one cannot use $where in Aggregation Operations such as $project as it is not supported, $where can only be used in a find query.Regarding Map-Reduce, it was deprecated in MongoDB v5.0 and Aggregation Pipeline is generally a better alternative to that.Alternatively, if you really must use the Javascript function and cannot upgrade to MongoDB 4.4, you might be able to do this operation in the application side. If you’re not mandated to use the Javascript function, is it possible to translate the function into aggregation operators instead?Lastly, $function was introduced in MongoDB v4.4 and I believe this is the functionality you’re looking for. However this would require you to upgrade to MongoDB 4.4, so this may or may not work for you.Note:\nExecuting JavaScript inside an aggregation expression may decrease performance. Only use the $function operator if the provided pipeline operators cannot fulfill your application’s needs.If you need further help, Can you please explain what this external function does?Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Pass a field value to an external function
2022-08-15T12:41:46.811Z
Pass a field value to an external function
1,297
null
[ "atlas-search" ]
[ { "code": "", "text": "Greetings,\nDocuments returned from $search stage are sorted by their search score descending.\nIn case there are multiple documents with the same score - is there a secondary sort by their object ids?e.g. if I run a query on a static collection that returns multiple documents with the same search score over and over again - will the results be always sorted in the same manner?Thanks a lot,\nOfer.", "username": "Ofer_Chacham" }, { "code": "$sort$search", "text": "@Ofer_Chacham unfortunately we cannot guarantee the order of the results of a static collection. We have not tested for this use case because of the probabilistic nature of the underlying subsystem, Lucene.You can enforce a deterministic sort order a few ways if need be. The simplest way is to use $sort after $search or other features.Is the collection sharded?", "username": "Marcus" }, { "code": "", "text": "Thanks for the reply @Marcus.\nCurrently we are using sort stage after search stage but the queries run very slow.We wanted to leverage the near operator in order to sort date and numeric fields but we must to have consistency between queries, because we are also using pagination with skip and limit so we don’t want to display same document in two different pages.Is there something else we can use to achieve the consistency without a sort stage?Currently we don’t use sharded collection.Thanks,\nOfer", "username": "Ofer_Chacham" }, { "code": "path$addField", "text": "If you have a field that you create that holds the sort order for all the fields, you can use function score and the path option. It should return consistent ordering and be fast. Since it is a static collection this should not be a problem. You can add this field with Atlas Triggers and an $addField operation. Does that make sense?", "username": "Marcus" }, { "code": " \"compound\": {\n \"filter\": [\n {\n \"text\": {\n \"query\": [\n \"abc\"\n ],\n \"path\": \"string_field\"\n }\n }\n ],\n \"should\": [\n {\n \"near\": {\n \"path\": \"numeric_field\",\n \"origin\": 0,\n \"pivot\": 1\n }\n }\n \n ],\n }\n", "text": "Thanks @Marcus - if I use the function score with a field as you describe it means I always sort the documents according to this field and this is not what I want to achieve.Maybe I’ll explain my problem with an example:let’s assume I have 5 documents in a collection and the collection is not changing:object_id, numeric_field, string_field\nA, 1, ‘abc’\nB, 2, ‘abc’\nC, 2, ‘abc’\nD, 3, ‘abc’\nE, 3, ‘def’I want to display the documents that contain ‘abc’ in their string_field and sort them by their numeric_value, so I’ll use this search stage:I also want to display the documents in pages of 2.\nSo, for getting the first page a stage of limit:2 will be added to the pipe, and for getting the second page two stages of skip:2,limit:2 will be added to the pipe.If I perform the two queries for getting the two pages over and over I want to always get A,B in first page and C,D in second page.Because B,C have the same numeric field value they will both get the same search score and as far as I understand it is possible that I will get a first page of A,C or second page of B,D.\nSo is there something I can do to guarantee the consistency?BTW I tried to check consistency of the same query with real data and it do seems like i always get the same order of results even when I have documents with same search score.\nMight it work this way because the collection is not sharded so mongot always scan the documents in same order and so it outputs results in same order?Thanks,\nOfer.", "username": "Ofer_Chacham" }, { "code": "$sort$search", "text": "The only way to guarantee the sort order at this time is to have the unique values for all the numeric fields or two use the blocking $sort stage after the $search stage. Some customers, use storedSource to speed up results of this query pattern.", "username": "Marcus" } ]
Documents with same search score
2022-07-28T15:11:14.129Z
Documents with same search score
2,574
null
[ "aggregation" ]
[ { "code": "stage_group_month = {\n \"$group\" : { \n \"_id\": { \n \"year\": \"$_id.year\", \n \"month\": \"$_id.month\"\n },\n---> \"total_project_cost\" : { \"$sum\": \"$project_cost\" },\n \"total_hours\": { \"$sum\": \"$project_durationNumber\"},\n \"total_salaries\": {\"$sum\": \"$salaries\"},\n \"projects\": { \n \"$push\": {\n \"_id\": \"$_id.projectId\",\n \"name\": \"$projectName\",\n---> \"cost\": \"$project_cost\",\n \"duration\": \"$project_durationNumber\",\n \"salaries\" : \"$salaries\",\n \"gross_profit\": {\"$subtract\": [ \"$project_cost\", \"$salaries\" ]}\n }\n }\n }\n }\nstage_add_percentage_revenue = {\n \"$addFields\":\n {\n \"projects.percentage_revenue\" : {\"$divide\": [\"$projects.cost\", \"$total_project_cost\" ]}\n }\n }\n", "text": "Hello,so I have an aggregation pipeline, with this group as a stage.I’ve put arrows at lines of interestand I just want to doIf I replace “$projects.cost” by some number like 100 I have a result. But I can’t access project.cost.How can I do that ?", "username": "Timothee_Wright" }, { "code": "projectspercentage_revenue$mappercentage_revenue", "text": "If I replace “$projects.cost” by some number like 100 I have a result. But I can’t access project.cost.How can I do that ?The field projects is an array, and you want to calculate percentage_revenue for each of the projects. For this you need to use the $map aggregate array operator. This will allow iterate over the array field and calculate the percentage_revenue for each of the projects, i.e., array element’s.", "username": "Prasad_Saya" }, { "code": "", "text": "@Prasad_Saya was right, $map was the way to go. More information to https://stackoverflow.com/questions/73465964/mongodb-aggregation-divide-array-element-by-non-array-elementThis topic can be deleted as it a dupplicate", "username": "Timothee_Wright" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoDB aggregation divide array element by non-array element
2022-08-23T23:36:50.661Z
MongoDB aggregation divide array element by non-array element
1,443
null
[ "aggregation", "queries" ]
[ { "code": "{\n \"_id\": \"62fdfd7518da050007f035c5\",\n \"expiryDate\": \"2022-08-18T23:59:59+05:30\",\n \"arrayField\" : ['abc', 'def', 'ghi', 'jkl']\n},\n{\n \"_id\": \"62fdfd7518da050007f035c6\",\n \"expiryDate\": null,\n \"arrayField\" : ['abc','jkl']\n},\n{\n \"_id\": \"62fdfd7518da050007f035c7\",\n \"arrayField\" : []\n},\n{\n \"_id\": \"62fdfd7518da050007f035c8\",\n \"expiryDate\": null\n}\nexpiryDateDatearrayFieldArrayexpiryDateexpiryDateexpiryDate{\n 'compound' : {\n 'should' : [\n {\n 'compound' : {\n 'mustNot' : [{\n \"exists\": {\n \"path\": \"expiryDate\",\n }\n }]\n }\n },\n {\n \"range\": {\n \"path\": \"expiryDate\",\n 'gte': new Date()\n }\n }\n ],\n 'minimumShouldMatch' : 1\n }\n}\nexpiryDatenullexpiryDateexpiryDatenull", "text": "I am using mongodb atlas for full text search. My sample collection looks like this :expiryDate is a Date type field and arrayField is an Array type field.\nMy goal is to get all documents where either :My current atlas aggregation looks like :This is not returning all documents where the expiryDate field have null value and it is only matching one clause of should where expiryDate is greater than or equal to current time. I want it to return all those documents too where the expiryDate is null .Though i know having a null field is bad db design but is there a solution to this problem ?", "username": "pawan_saxena1" }, { "code": "\"expiryDate\"$search\"expiryDate\"db.collection.find({\n $or: [\n {expiryDate:{$exists:false}},\n {expiryDate:null},\n {expiryDate:{$gte:ISODate(\"2022-01-01\")}} /// <--- Assuming the current time is ISODate(\"2022-01-01\")\n ]\n})\n[\n {\n _id: '62fdfd7518da050007f035c6',\n expiryDate: null,\n arrayField: [ 'abc', 'jkl' ]\n },\n { _id: '62fdfd7518da050007f035c7', arrayField: [] },\n { _id: '62fdfd7518da050007f035c8', expiryDate: null },\n {\n _id: '62fdfd7518da050007f035c5',\n expiryDate: ISODate(\"2022-08-18T23:59:59.000Z\"), /// <---- NOTE: My example has this as a date value\n arrayField: [ 'abc', 'def', 'ghi', 'jkl' ]\n }\n]\n\"expiryDate\"\"expiryDate\"", "text": "Hi @pawan_saxena1 - Welcome to the community.Can you advise what your goal is here? Is it perhaps to view all documents with an non-existing or invalid \"expiryDate\"?In saying so, maybe utilising $search may not be required here. You could create an index on the \"expiryDate\" field and then possibly use the following if it suits your use case:Please note the first document in your sample data has the \"expiryDate\" as a string value rather than date value used in the example above.If this isn’t what you’re after, please advise the following:Regards,\nJason", "username": "Jason_Tran" }, { "code": "expiryDateexpiryDateexpiryDate", "text": "Hi @Jason_Tran I have already put the requirements in the question. Please go through the question again.\nI will put requirements again :\nMy goal is to get all documents:", "username": "pawan_saxena1" }, { "code": "$search$searchDB> var a = \n{\n '$search': {\n index: 'default',\n compound: {\n should: [\n {\n range: {\n path: 'expiryDate',\n gte: ISODate(\"2022-01-01T00:00:00.000Z\") /// <--- Assuming this is the current date for example purposes. Modify as required.\n }\n },\n {\n compound: {\n mustNot: [ { exists: { path: 'expiryDate' } } ]\n }\n },\n {\n compound: {\n must: [ { exists: { path: 'expiryDate' } } ],\n mustNot: [\n {\n range: {\n path: 'expiryDate',\n lt: 1 /// <---- null is less than 1 based off the below mongosh output\n }\n }\n ]\n }\n }\n ]\n }\n }\n}\nnull\"expiryDate\"nullmongoshnullDB> null < 1\ntrue\n$match.find()var b =\n{\n '$match': {\n '$or': [\n { expiryDate: { '$exists': false } },\n { expiryDate: null },\n { expiryDate: { '$gte': ISODate(\"2022-01-01T00:00:00.000Z\") } } /// <--- Assuming this is the current date for example purposes. Modify as required.\n ]\n }\n}\nDB> db.collection.aggregate(b)\n[\n {\n _id: '62fdfd7518da050007f035c6',\n expiryDate: null,\n arrayField: [ 'abc', 'jkl' ]\n },\n { _id: '62fdfd7518da050007f035c7', arrayField: [] },\n { _id: '62fdfd7518da050007f035c8', expiryDate: null },\n {\n _id: '62fdfd7518da050007f035c5',\n expiryDate: ISODate(\"2022-08-18T23:59:59.000Z\"),\n arrayField: [ 'abc', 'def', 'ghi', 'jkl' ]\n }\n]\n$search$match\"expiryDate\"null$match\"expiryDate\"", "text": "I understand those are your requirements. The alternate I have suggested runs in both in Atlas and non-Atlas deployments of MongoDB. To be more specific, i’m curious to what the 3 conditions equate to as a goal - I.e. Do those 3 requirements equate to documents that have an invalid expiry date from your application’s perspective?In saying so, there a few ways that should be able to get the document(s) you’re after although i’m not sure if you’re wanting to use $search ONLY, whether additional stages are allowed, etc.For example, if you’re only wanting to use the $search stage and nothing else, you may be able to get these documents using the below example:The oddity here is that i’ve used a range of less than 1 to try and retrieve null values on \"expiryDate\". However, i’d recommend also voting for the following feedback related to indexing null data types. The mongosh output for testing if null is greater than 1:Similar to the above example I provided earlier, you can also obtain the same document(s) using a $match stage equivalent to my .find() example:Output:You could also do a combination of a $search stage that gets the path if it exists or not followed by a $match stage for documents that have an \"expiryDate\" field value that is greater than the specified date or equal to null. Although there would be no index usage here for the $match stage that follows.Please note:Regards,\nJason", "username": "Jason_Tran" }, { "code": "{\n \"_id\": \"62fdfd7518da050007f035c5\",\n \"expiryDate\": \"2022-08-18T23:59:59+05:30\",\n \"arrayField\" : ['abc', 'def', 'ghi', 'jkl']\n},\n{\n \"_id\": \"62fdfd7518da050007f035c6\",\n \"expiryDate\": null,\n \"arrayField\" : ['abc','jkl']\n},\n{\n \"_id\": \"62fdfd7518da050007f035c7\",\n \"arrayField\" : []\n \"status\" : \"\"\n},\n{\n \"_id\": \"62fdfd7518da050007f035c8\",\n \"expiryDate\": null,\n \"status\" : null\n}\narrayFieldarrayFieldarrayField{\n \"should\" : [\n {\n \"compound\": {\n \"mustNot\": [{\n 'exists': {\n 'path': 'arrayField'\n }\n }]\n }\n },\n { // <--- i assume it checks for empty array like []\n \"compound\": {\n \"must\": [{\n 'exists': {\n 'path': 'arrayField'\n }\n }],\n \"mustNot\": [\n {\n \"exists\": {\n \"path\": \"arrayField.0\"\n }\n }]\n }\n },\n {\n \"equals\": {\n \"value\": mongoose.Types.ObjectId(someExampleValue),\n \"path\": \"arrayField\"\n }\n }\n ],\n \"minimumShouldMatch\" : 1\n}\n", "text": "Hi @Jason_Tran .\nThank you for taking out the time.\nI went through the docs of atlas and found that we should avoid using $match along with $search : * Query Performance , maybe in future the product gets evolved so much that it wouldn’t matter but for now the $match impacts the performance a lot.I did a example benchmark on the collection, it have more than 70 million records and :Now that being tested, i am planning to stick with only $search and above resolution works : execution time : 90msThe above resolution depicts that we used range operator (which can be used for numbers and date values).\nI have one other use case where the types of fields are String and Array.How can i achieve same thing for string and array types also ?arrayField is an array of objectIds : [ObjectId(123…), ObjectId(456…)]My Goal is :My current query for arrayField is :Problem with above query is it checks for empty array , but not for null.\nSame case for string type also.", "username": "pawan_saxena1" }, { "code": "$match$search$match$match$search", "text": "Thanks for getting back to me. You’ve stated the performance differences without $match and ($search with $match). However, I am curious as to why you are not using just a $match by itself (i.e. no $search) as shown in my previous example? This should be able to utilise indexes as well and would most likely be able work for you string and array types question.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "Hi @Jason_Tran i cannot use $match alone as it won’t uitlise the index mappings defined in atlas. The FTS mappings can only be leveraged using $search and $searchMeta :Learn how MongoDB Atlas can perform advanced text searches in your application using Atlas Search aggregation pipeline stages, MongoDB aggregation pipeline stages, and score-based results ranking.", "username": "pawan_saxena1" }, { "code": "expiryDate$matchdb.collection.aggregate({\n '$match': {\n '$or': [\n { expiryDate: { '$exists': false } },\n { expiryDate: null },\n { expiryDate: { '$gte': ISODate(\"2022-01-01T00:00:00.000Z\") } } /// <--- Assuming this is the current date for example purposes. Modify as required.\n ]\n }\n})\n$match$search", "text": "i cannot use $match alone as it won’t uitlise the index mappings defined in atlas.You could create an index on the expiryDate field for the example on this post. You can then do the same for your array and string type fields.The documents can then be found using $match alone. E.g. (from previous reply of mine):Have you compared the performance of a standard index and $match alone versus the Atlas search index w/ $search alone to see if it suits your use case? I’ve not yet tested this myself but if your concern is utilisation of indexes then you can create the index mentioned and run an db.collection.explain(“executionStats”) to see the results where there should be some level of index usage.Regards,\nJason", "username": "Jason_Tran" }, { "code": "$match", "text": "The documents can then be found using $match alone. E.g. (from previous reply of mine):Yes i can do that , but the $match doesn’t support full text search like $search does. query is a small part of a very big query which also include the search on text , date, string, number, objectId types and uses custom analyzers, combinations of $must, $mustNot, $should, $filter etc. I can use compound in $search but not in $match.I also agree that i can replicate the whole query (and i did it earlier), the query then spans to 4000 lines and very diffcult to maintain. using $search i can write much cleaner and self explanatory query (without having to deal with deep nesting of $or and $and).But above all, I was curious that if the $search syntax is promoted so much in atlas docs, there must be a way which i might be missing I tried and failed so can you please check once more for the arrayField as i cannot use range or equals there.", "username": "pawan_saxena1" }, { "code": "null", "text": "query is a small part of a very big query which also include the search on text , date, string, number, objectId types and uses custom analyzers, combinations of $must, $mustNot, $should, $filter etc. I can use compound in $search but not in $match.Thanks for providing those details and clarifying the use case although unfortunately I’m not aware of how this can be done for array fields containing null value(s) off the top of my head or if it can be done at all.I can also see you have raised another post in regards to this so I will also go ahead and close this post off since we have discussed a few possible solutions / workarounds specific to this post.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "", "username": "Jason_Tran" } ]
Check for missing field or null value in mongoDB atlas
2022-08-18T20:27:03.787Z
Check for missing field or null value in mongoDB atlas
5,159
null
[ "security" ]
[ { "code": "", "text": "Hi Everyone!!\nI have a specific requirement where I want to restrict a user to read the entire database except only a single collection. How can I proceed with this, does anyone have an idea about this?Thanks!", "username": "Hemendra_Chaudhary" }, { "code": "", "text": "It is not possible\nPlease check this link", "username": "Ramachandra_Tummala" }, { "code": "", "text": "While you might not be able to explicitly restrict 1 collection you might be able to explicitly give access to the other collections.See https://www.mongodb.com/docs/manual/tutorial/manage-users-and-roles/ for more details.", "username": "steevej" } ]
Role or Privilede to deny user to access only single Collection
2022-08-23T10:27:01.381Z
Role or Privilede to deny user to access only single Collection
2,761
null
[ "aggregation", "spark-connector" ]
[ { "code": "spark.read.format(\"mongo\").option(\"uri\",_uri).option(\"database\",_database) \\\n .option(\"collection\", _collection).option(\"pipeline\",\"{$match:{flag:0}}\").load()\nspark.read.format(\"mongodb\").option(\"spark.mongodb.connection.uri\",_uri) \\\n .option(\"spark.mongodb.database\",_database) \\\n .option(\"spark.mongodb.collection\", _collection)\\\n .option(\"pipeline\",\"{$match:{flag:0}}\")\\\n .load()\n", "text": "I am trying to filter data using aggregation pipeline. In spark mongo connector v3.0, it is working and in V10, the filter is not applied and all documents from collection is returnedv3.0 code: (filter is working)v10 code : (not working)Can you anyone please help me on this?", "username": "Amos_Decruz" }, { "code": "spark.read.format(“mongodb”).option(“spark.mongodb.connection.uri”,_uri)\n.option(“spark.mongodb.database”,_database)\n.option(“spark.mongodb.collection”, _collection)\n.option(“aggregation.pipeline”,\"{$match:{flag:0}}\")\n.load()\n", "text": "It worked. I have used", "username": "Amos_Decruz" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Aggregation pipeline not working in spark mongo connector v10
2022-08-23T11:10:33.350Z
Aggregation pipeline not working in spark mongo connector v10
2,668
null
[ "atlas-functions" ]
[ { "code": "", "text": "Is there a way that I can read/access the realm app id from within a realm function?", "username": "ankit.mhn" }, { "code": "", "text": "I just contacted a MongoDB Support agent with this same question, and they informed me that as of now, this is not possible. The support agent put in a feature request to add this functionality, but in the meantime we can work around this by setting the realm app id as an environment value.", "username": "Elias_Heffan1" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Get realm app id within a realm function?
2021-11-29T10:27:26.227Z
Get realm app id within a realm function?
2,946
https://www.mongodb.com/…0314107430cf.png
[ "aggregation", "data-modeling", "views" ]
[ { "code": "location: {\n type: \"Point\",\n coordinates: [-73.856077, 40.848447]\n}\n},{\n $project: {\n _id: 0, \n location: {\n type: \"Point\",\n coordinates: ['$longitude', '$latitude']\n },\n country: { $arrayElemAt: [ '$location.country_name', 0 ] } ,\n region: { $arrayElemAt: [ '$location.subdivision_1_name', 0 ] } ,\n device: changeEvent.fullDocument.sn\n }\n },{ \n $merge: { \n into: {\n \"db\": \"myDB\",\n \"coll\": \"myColl\"\n }, \n on: [ \"myKey\"],\n whenMatched: \"merge\",\n whenNotMatched: \"discard\"\n } \n }\n", "text": "I have an issue that chats is not detecting the type “corodinates/location”. I always get back an array of Objects. I am following the docs to specify a geoPoint.Here is the actual code:The resulting structure looks like this (mind the Array after ‘location’!)\nFor some reason location becomes an array and this seems to fool chats, so that the coordinates are not detected as coordinates/location but as an array:\nIt should look like this\n\n(Example taken from the airbnb sample data)How can I get the coordinates detected in charts so that I can make use of them??Regards,\nMichael", "username": "michael_hoeller" }, { "code": "$unwind", "text": "Hi Michael -Your location data is in GeoJSON format, which is something that Charts is able to deal with directly. However unfortunately we can’t handle arrays of GeoJSON objects, which is what you have. The best solution would be to use $unwind in the query bar to get rid of the array, and then it should detect the location data correctly.Tom", "username": "tomhollander" }, { "code": "\"location\"$arrayElemAtmyColl", "text": "Note that if \"location\" is an array in the original document (as I suspect it is because you’re using $arrayElemAt to get a single country name from it) then the output will also make it an array.Can you provide the original document format in source collection as well as document format in myColl that you are outputting to? Both projection and updates will traverse arrays and preserve their “array-ness”…Asya", "username": "Asya_Kamsky" }, { "code": "\"location\"$arrayElemAtas: 'location'location\"location_new_field_name\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 22.9414,\n 39.3616\n ]\n },\n \"country\": \"Greece\",\n \"region\": \"Thessaly\",\n \"devicemac\": \"01:02:03:04:05:06\"\n }\n,{\n $lookup: {\n from: 'geoip_location',\n localField: 'geoname_id',\n foreignField: 'geoname_id',\n as: 'location'\n }\n },{\n $project: {\n _id: 0, \n location: {\n type: \"Point\",\n coordinates: ['$longitude', '$latitude']\n },\n country: { $arrayElemAt: [ '$location.country_name', 0 ] } ,\n region: { $arrayElemAt: [ '$location.subdivision_1_name', 0 ] } ,\n devicemac: changeEvent.fullDocument.sn\n }\n }\n", "text": "Hello @Asya_KamskyNote that if \"location\" is an array in the original document (as I suspect it is because you’re using $arrayElemAt to get a single country name from it) then the output will also make it an array.thanks a lot, next time we meet the dink is on me…This is a stupid mistake I made. Before I $project I have a $lookup stage where I use as: 'location' this is an array. For whatever reason I used location again in the $project to name the projected Object. So in $project the array is picked up from the $lookup and the aggregation does what it should do.\nSimply changing the name of the field fixes my mistake and the $project returns an object instead of an array.Guess this is a further entry on the “don’t do this” list…The new output:The (incorrect) aggregation → in $project the objectname should NOT be location since this conflicts with the location in the $lookup:Cheers, Michael", "username": "michael_hoeller" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Charts fails to detect coordinates, when following the docs.. Where is the glitch?
2022-08-19T19:25:23.776Z
Charts fails to detect coordinates, when following the docs.. Where is the glitch?
2,515
null
[ "aggregation", "queries" ]
[ { "code": "", "text": "How write stage $match {} (all documents of collection )else user: ‘an email’ by condition? somewhat like: if (isAdmin==true) {$match:{}} else { $match:{user:‘an email’}\nit is 1st stage of aggregation query, result for next stage all docs or docs match only belong / created that user.\nisAdmin – var boolean outside of aggregation", "username": "Andrei" }, { "code": "", "text": "Please provide sample documents and the expected result.It is not clear if isAdmin is a field of the documents from your collection or a variable in your programming language.", "username": "steevej" }, { "code": "", "text": "thanks for answer,\n[\n{\n“_id”: {\n“$oid”: “630423495c8abda60dad5b47”\n},\n“title”: “e”,\n“description”: “e”,\n“user”: “[email protected]”\n},\n{\n“_id”: {\n“$oid”: “630423615c8abda60dad5b70”\n},\n“title”: “eee”,\n“description”: “eee”,\n“user”: “[email protected]”\n},\n{\n“_id”: {\n“$oid”: “630423ef5c8abda60daea1d3”\n},\n“title”: \"Moncton \",\n“description”: “Moncton’s Modality Shift”,\n“user”: “[email protected]”\n},\n{\n“_id”: {\n“$oid”: “630423ef5c8abda60daea1d5”\n},\n“title”: “New town”,\n“description”: “Founded in 2021, Downtown town is the new hotness”,\n“user”: “[email protected]”\n},\n{\n“_id”: {\n“$oid”: “630423ef5c8abda60daea1d7”\n},\n“title”: “Market: Johner Fest 2022”,\n“description”: “vv”,\n“user”: “[email protected]”\n}\n]", "username": "Andrei" }, { "code": "", "text": "isAdmin – var boolean outside of aggregation", "username": "Andrei" }, { "code": "", "text": "it is 1st stage of aggregation query, result for next stage all docs or docs match only belong / created that user. isAdmin – var boolean outside of aggregation", "username": "Andrei" }, { "code": "if ( isAdmin ) {\n match_stage = { \"$match\" : {} }\n}\nelse {\n match_stage = { \"$match\" : { \"user\" : email } }\n}\n\npipeline = [ match_stage , /* other stages */ ]\n\n", "text": "", "username": "steevej" }, { "code": "", "text": "thanks, I’m sorry for stupid question, what is syntax to add that into like: db.collectionName.aggregate()? So, I’ve used to just syntax db.collectionName.aggregate([\n{$match:…},\n{$project:…},\n{$group:…}\n…\n])", "username": "Andrei" }, { "code": "match_stage = { \"$match\" : { } }pipeline = [ ]\n\nif ( ! isAdmin ) {\n pipeline.push( { \"$match\" : { \"user\" : email } } )\n}\n\npipeline.push( { \"$group\" : { \"_id\" : null , count : { \"$sum\" : 1 } } } )\n\ndb.collectionName.aggregate( pipeline ) ;\n", "text": "An aggregation pipeline is simply an array of JSON object. Each object being a stage. You can manipulate your pipeline with any logic and array operation you wish.For example, by default, all documents are matched so havingmatch_stage = { \"$match\" : { } }is useless and might even be detrimental to performances.So alternatively you could do:", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Aggregation $match
2022-08-23T15:56:11.825Z
Aggregation $match
1,727
null
[ "aggregation" ]
[ { "code": "{\n \"time\": ISODate(\"2022-08-12T11:05:00Z\"),\n \"nodeName\": \"selipcnsgsnmme174\",\n \"cpuload\": {\n \"1_11(FSB)\": 0.04,\n \"1_15(NCB)\": 0.04,\n \"1_9(NCB)\": 0.01,\n \"2_1(AP/LC/SS7_SCTP_DP)\": 0.13,\n \"2_2(AP/LC/SS7_SCTP_DP)\": 0.12,\n \"2_1(LC)\": 0,\n \"2_2(LC)\": 0\n }\n}\n{\n \"time\": ISODate(\"2022-08-12T11:10:00Z\"),\n \"nodeName\": \"selipcnsgsnmme174\",\n \"cpuload\": {\n \"1_11(FSB)\": 0.04,\n \"1_15(NCB)\": 0.04,\n \"1_9(NCB)\": 0,\n \"2_1(AP/LC/SS7_SCTP_DP)\": 0.13,\n \"2_2(AP/LC/SS7_SCTP_DP)\": 0.12,\n \"2_1(LC)\": 0.01,\n \"2_2(LC)\": 0\n }\n}\n{\n \"time\": ISODate(\"2022-08-12T11:05:00Z\"),\n \"nodeName\": \"NODE2\",\n \"2_11(FSB)\": 0.04,\n \"2_15(NCB)\": 0.04,\n \"2_9(NCB)\": 0.01,\n \"2_1(AP/LC/SS7_SCTP_DP)\": 0.13,\n \"2_2(AP/LC/SS7_SCTP_DP)\": 0.12,\n \"2_1(LC)\": 0,\n \"2_2(LC)\": 0\n}\n{\n \"time\": ISODate(\"2022-08-12T11:10:00Z\"),\n \"nodeName\": \"NODE1\",\n \"_11(FSB)\": 0.04,\n \"1_15(NCB)\": 0.04,\n \"1_9(NCB)\": 0,\n \"1_1(AP/LC/SS7_SCTP_DP)\": 0.13,\n \"1_2(AP/LC/SS7_SCTP_DP)\": 0.12,\n \"1_1(LC)\": 0.01,\n \"1_2(LC)\": 0\n}\n", "text": "Hi Experts, I am new to MongoDB.\nI need help to query and project nested documents.Consider the following sample documents of a collection, CPU load is a nested document its field names vary in different records.I would like to project is like the following:", "username": "Shashikant_Saxena" }, { "code": "db.collection('').updateMany({},$rename{ \"cpuLoad.1_11(FSB)\" : \"1_11(FSB)\", ...}db.collection('').aggregate([{\n$project:{\ntime: 1,\nnodeName: 1,\n\"1_11(FSB)\": \"$cpuLoad.1_11(FSB)\" :\n}}])\n", "text": "I am not sure what you mean to project is like following so let me guess two scenarioif you want to change the documents format in the database,\nyou can do it by renaming the fields with following operator,db.collection('').updateMany({},$rename{ \"cpuLoad.1_11(FSB)\" : \"1_11(FSB)\", ...}or if you want to display the data after query you can do it throught this", "username": "Kyaw_Zayar_Tun" }, { "code": "", "text": "Thankyou for the reply,I do not want to change the document format in the database , only need the formatting in projection query.1_11(FSB) is not common field for all the documents , it will change in documents. Can we have a generic query.", "username": "Shashikant_Saxena" }, { "code": "pipeline = [\n { \"$replaceRoot\" : {\n newRoot : { \"$mergeObjects\" : [ \"$$ROOT\" , \"$cpuload\" ] } }\n },\n { \"$unset\" : [ \"cpuload\"] }\n]\n{ \"cpuload.1_11(FSB}\" : 0.04 }\n", "text": "This type of data cosmetic is better done on the application side. The operation is easier to scale since only the application making the query is impacted.Try with the pipelineTake a look at Building with Patterns: The Attribute Pattern | MongoDB Blog. The way your data is structure you cannot benefit indexing for queries like:without indexing each and every cpuload fields.", "username": "steevej" }, { "code": "", "text": "This type of data cosmetic is better done on the application side. The operation is easier to scale since only the application making the query is impacted.Doing it in aggregation is not doing it application side - your pipeline in fact correctly does it server-side where only the desired format is returned…Asya", "username": "Asya_Kamsky" }, { "code": "", "text": "Doing it in aggregation is not doing it application sideThanks, but I really know that. It was not clear that the first paragraph was my recommendation and the pipeline was not my recommendation but the answer to the question.", "username": "steevej" } ]
Displaying Nested Documents
2022-08-23T11:21:54.016Z
Displaying Nested Documents
1,442
null
[]
[ { "code": "{w:0}majoritymajority", "text": "Can anyone answer these questions regarding read/write concerns. I’ve read the documentation, but it’s so vast I struggle to find specific answers to what I don’t understand.After issuing an unacknowledged write {w:0} do reads from the primary (or any other members) wait for the operation to finish?After issuing a majority write to the primary, could a majority read from a secondary come from one of the members that has not received the update? or does it wait? or does the driver select a suitable member that has received the update?", "username": "timw" }, { "code": "\"majority\"secondary\"majority\"", "text": "Update: I think I found the answer to Q2 here.These guarantees hold across all members of the MongoDB deployment. For example, if, in a causally consistent session, you issue a write with \"majority\" write concern followed by a read that reads from a secondary (i.e. read preference secondary ) with \"majority\" read concern, the read operation will reflect the state of the database after the write operation.I don’t know how the magic happens, but good enough for me.", "username": "timw" }, { "code": "\"local\"\"available\"", "text": "Update: I think I found the answer to Q1 here.Regardless of a write’s write concern, other clients using \"local\" or \"available\" read concern can see the result of a write operation before the write operation is acknowledged to the issuing client.This example further clarifies the timeline, so it seems reads do not wait.", "username": "timw" }, { "code": "", "text": "Looks like you found your own answers, and in fact those are correct - local (aka default) reads don’t wait for data to be acknowledged or replicated to read it, and the same goes for majority readConcern - it will read the latest majority committed data, but it will not necessarily be the same data that previous write operation waited for.", "username": "Asya_Kamsky" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Understanding read/write concerns
2022-08-23T09:58:58.101Z
Understanding read/write concerns
1,010
null
[ "atlas-device-sync", "atlas-functions", "schema-validation" ]
[ { "code": "nullnull{\n someKey: {\n \"bsonType\": \"object\",\n \"properties\": {\n \"url\": {\n \"bsonType\": \"string\"\n }\n },\n \"required\": [],\n \"title\": \"FileObject\"\n }\n}\nclass FileObject: RealmObject {\n .....\n var url: String?\n}\nlet file = FileObject()\nfile.url = nil // maybe I am editing an existing file and removing the url\nsomeObject = file\n{\n ...,\n someKey: {\n url: null \n }\n}\nnull", "text": "Synced unset Keys from device yield null in database. And null fails validation if you attempt to edit this object later using cloud functions.iOS client codein database (after SYNC from iOS Client)Then if I attempt to edit this object in cloud-function or if I run schema validation in Atlas, I get an error “InvalidTypeError” on url, because it suppose to not exist, but it is null which is not a string.So JSON validation should accept null and consider the field non existent for primitive types", "username": "Georges_Jamous" }, { "code": "", "text": "Found the solution in AppSetting, Null type check can be disabled", "username": "Georges_Jamous" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
[BUG] null values do no pass validation
2022-08-21T18:38:32.432Z
[BUG] null values do no pass validation
2,280
https://www.mongodb.com/…f_2_1024x211.png
[ "queries", "kotlin", "realm-studio", "many-to-many-relationship", "one-to-one-relationship" ]
[ { "code": "class Product: RealmObject {\n @PrimaryKey\n var _id: ObjectId = ObjectId.create()\n var name: String? = null\n var quantity: Int? = null\n var price: Float? = null\n var image: String? = null\n var shortDescritpion: String? = null\n var longDescritpion: String? = null\n var barcode: Long? = null\n var category: Category? = null\n}\n\nclass Category: RealmObject {\n @PrimaryKey\n var _id: ObjectId = ObjectId.create()\n var name: String = \"\"\n var image: String = \"\"\n var products: RealmList<Product> = realmListOf<Product>()\n var company_id: Company? = null\n}\n{\n \"title\": \"Product\",\n \"bsonType\": \"object\",\n \"required\": [\n \"_id\",\n \"_partition\"\n ],\n \"properties\": {\n \"_id\": {\n \"bsonType\": \"objectId\"\n },\n \"_partition\": {\n \"bsonType\": \"string\"\n },\n \"name\": {\n \"bsonType\": \"string\"\n },\n \"quantity\": {\n \"bsonType\": \"long\"\n },\n \"price\": {\n \"bsonType\": \"float\"\n },\n \"image\": {\n \"bsonType\": \"string\"\n },\n \"shortDescritpion\": {\n \"bsonType\": \"string\"\n },\n \"longDescritpion\": {\n \"bsonType\": \"string\"\n },\n \"barcode\": {\n \"bsonType\": \"long\"\n },\n \"category\": {\n \"bsonType\": \"objectId\"\n }\n }\n}\n\n{\n \"company_id\": {\n \"ref\": \"#/relationship/mongodb-atlas/bestock-database/Company\",\n \"foreignKey\": \"_id\",\n \"isList\": false\n },\n \"products\": {\n \"ref\": \"#/relationship/mongodb-atlas/bestock-database/Product\",\n \"foreignKey\": \"_id\",\n \"isList\": true\n }\n}\n{\n \"title\": \"Category\",\n \"bsonType\": \"object\",\n \"required\": [\n \"_id\",\n \"_partition\",\n \"name\",\n \"image\"\n ],\n \"properties\": {\n \"_id\": {\n \"bsonType\": \"objectId\"\n },\n \"_partition\": {\n \"bsonType\": \"string\"\n },\n \"name\": {\n \"bsonType\": \"string\"\n },\n \"image\": {\n \"bsonType\": \"string\"\n },\n \"products\": {\n \"bsonType\": \"array\",\n \"items\": {\n \"bsonType\": \"objectId\"\n }\n },\n \"company_id\": {\n \"bsonType\": \"objectId\"\n }\n }\n}\n", "text": "I have created to RealmObjects: Category and Product, and I have created a one-to-many relationship, where one category has multiple products.\nWhen I insert a product to Atlas database the category is not showing up. The products are not showing up either in Category collection.These are my Realm Objects:These is product schema:This is category schema:Relationship:Body:The database looks like this after I insert a category and a product:\n\nScreenshot 2022-08-22 at 11.39.11 AM1796×371 22.3 KB\n\n\nScreenshot 2022-08-22 at 11.39.30 AM1693×465 41.6 KB\n", "username": "Besart_H" }, { "code": "_partition", "text": "Hi @Besart_H ,It’s not clear from the information you provide, but, are you using Partition-based Sync, or Flexible Sync?If the first is the case, the 2 records you report will never end up in the same partition, as they have different partition values, so the relationship won’t work.If you’re using Flexible Sync instead, then the _partition field is redundant, of course", "username": "Paolo_Manna" }, { "code": "", "text": "Hi @Paolo_Manna I am using partition-based Sync", "username": "Besart_H" }, { "code": "productcategory", "text": "I am using partition-based SyncThen, as explained before, the relationship can’t possibly work, as the Product document is in the product partition, while the Category is in the category partition, and relationships don’t work cross-partition.To have it working, both objects should be in the same partition.You may want to re-evaluate your data structure accordingly: some docs about different Sync modes, and Partition-based Sync may help here.", "username": "Paolo_Manna" } ]
How to insert objects to Atlas using Relationships with Realm in Kotlin?
2022-08-22T09:42:18.919Z
How to insert objects to Atlas using Relationships with Realm in Kotlin?
2,701
null
[ "aggregation" ]
[ { "code": "backend only proj", "text": "my search\n‘path’: ‘name’,\n‘query’: backend only proj,\nthis is result =>\n[\n{\n“name”: “backend only project”,\n“objCode”: “PROJECT”\n},\n{\n“name”: “Backend Template - Project”,\n“objCode”: “PROJECT”\n},\n{\n“name”: “Backend Template - Project”,\n“objCode”: “PROJECT”\n},\n{\n“name”: “Backend Template - Project”,\n“objCode”: “PROJECT”\n}\n]\nbut i need exact data like this =>\n[\n{\n“name”: “backend only project”,\n“objCode”: “PROJECT”\n}\n]", "username": "ansil_tm" }, { "code": "", "text": "Hi @ansil_tm, check out this blog we recently posted about achieving exact matches. Let me know if you have any questions upon review.", "username": "Elle_Shwer" }, { "code": "", "text": "thanks . but i did not get it and its like similar to this one The Phrase Operator\nactually this function i want to change and create altas search\ncollection.find({name: new RegExp(“backend only pro”, “i”)} ,{_id: 1,name:1,objCode: “PROJECT”}, { limit: limit })\noutput=>\n{\n“name”: “backend only project”,\n“objCode”: “PROJECT”\n}\ni need this exact in altas search", "username": "ansil_tm" }, { "code": "${filter}", "text": "Hai\ni found its solution\n‘autocomplete’: {\n‘path’: ‘name’,\n‘query’: ${filter},\n‘tokenOrder’: ‘sequential’,\n}\n‘tokenOrder’: ‘sequential’, // this line make it solved\nThanks ", "username": "ansil_tm" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to get exact search data in altas search
2022-08-22T13:07:16.085Z
How to get exact search data in altas search
1,158
https://www.mongodb.com/…e_2_1024x512.png
[ "queries", "replication" ]
[ { "code": "sudo mongod --port 27017 --dbpath /var/lib/mongo/ --replSet rs0 --bind_ip localhost,0.0.0.0{\"t\":{\"$date\":\"2022-08-19T12:36:48.700+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23285, \"ctx\":\"main\",\"msg\":\"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.703+00:00\"},\"s\":\"W\", \"c\":\"ASIO\", \"id\":22601, \"ctx\":\"main\",\"msg\":\"No TransportLayer configured during NetworkInterface startup\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.704+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4648601, \"ctx\":\"main\",\"msg\":\"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize.\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.704+00:00\"},\"s\":\"W\", \"c\":\"ASIO\", \"id\":22601, \"ctx\":\"main\",\"msg\":\"No TransportLayer configured during NetworkInterface startup\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.704+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4615611, \"ctx\":\"initandlisten\",\"msg\":\"MongoDB starting\",\"attr\":{\"pid\":4426,\"port\":27017,\"dbPath\":\"/var/lib/mongo/\",\"architecture\":\"64-bit\",\"host\":\"ip-172-31-35-40.ap-south-1.compute.internal\"}}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.704+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23403, \"ctx\":\"initandlisten\",\"msg\":\"Build Info\",\"attr\":{\"buildInfo\":{\"version\":\"4.4.15\",\"gitVersion\":\"bc17cf2c788c5dda2801a090ea79da5ff7d5fac9\",\"openSSLVersion\":\"OpenSSL 1.0.2k-fips 26 Jan 2017\",\"modules\":[],\"allocator\":\"tcmalloc\",\"environment\":{\"distmod\":\"amazon2\",\"distarch\":\"x86_64\",\"target_arch\":\"x86_64\"}}}}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.704+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":51765, \"ctx\":\"initandlisten\",\"msg\":\"Operating System\",\"attr\":{\"os\":{\"name\":\"Amazon Linux release 2 (Karoo)\",\"version\":\"Kernel 4.14.256-197.484.amzn2.x86_64\"}}}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.704+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":21951, \"ctx\":\"initandlisten\",\"msg\":\"Options set by command line\",\"attr\":{\"options\":{\"net\":{\"bindIp\":\"localhost,0.0.0.0\",\"port\":27017},\"replication\":{\"replSet\":\"rs0\"},\"storage\":{\"dbPath\":\"/var/lib/mongo/\"}}}}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.705+00:00\"},\"s\":\"E\", \"c\":\"STORAGE\", \"id\":20568, \"ctx\":\"initandlisten\",\"msg\":\"Error setting up listener\",\"attr\":{\"error\":{\"code\":9001,\"codeName\":\"SocketException\",\"errmsg\":\"Address already in use\"}}}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.705+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":4784900, \"ctx\":\"initandlisten\",\"msg\":\"Stepping down the ReplicationCoordinator for shutdown\",\"attr\":{\"waitTimeMillis\":10000}}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.705+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":4784901, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the MirrorMaestro\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.705+00:00\"},\"s\":\"I\", \"c\":\"SHARDING\", \"id\":4784902, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the WaitForMajorityService\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4784905, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the global connection pool\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":4784907, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the replica set node executor\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4784918, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the ReplicaSetMonitor\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"SHARDING\", \"id\":4784921, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the MigrationUtilExecutor\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":4784925, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down free monitoring\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4784927, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the HealthLog\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4784929, \"ctx\":\"initandlisten\",\"msg\":\"Acquiring the global lock for shutdown\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"-\", \"id\":4784931, \"ctx\":\"initandlisten\",\"msg\":\"Dropping the scope cache for shutdown\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"FTDC\", \"id\":4784926, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down full-time data capture\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":20565, \"ctx\":\"initandlisten\",\"msg\":\"Now exiting\"}\n{\"t\":{\"$date\":\"2022-08-19T12:36:48.706+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23138, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down\",\"attr\":{\"exitCode\":48}}\n", "text": "Hii Evenryone,I’m trying to migrate my MongoDB Standalone database into MongoDB Altas using Live Migration Service but it requires replicaset of it and I’m faing issues while converting stanalone databse into replicaset. I’m refering following documentation of MongoDB:From this documentation I run following command:sudo mongod --port 27017 --dbpath /var/lib/mongo/ --replSet rs0 --bind_ip localhost,0.0.0.0This is gving me folowwing error:Can anyone tell me what I’m suppose to do next. My ultimate goal is to migrate my Standalone Database to MongoDB Atlas.", "username": "Hemendra_Chaudhary" }, { "code": "\"errmsg\":\"Address already in use\"mongod", "text": "If you look at line 9 of your output you will see the following:\"errmsg\":\"Address already in use\"This means that there is something already listening on the port in question. It looks like you have another mongod instance running on that server. You need to stop that one first.", "username": "Doug_Duncan" }, { "code": "sudo", "text": "Yes. Step 1 is shutdown mongod.I recommend you change this(the replset argument) in the configuration fie. If your host restarts you’ll be back to standalone which will impact your migration.Additionally running sudo without specifying the mongdb user could/will create files that will prevent the systemd unit automatically starting at a later date.If it is a one-off very quick migration the way you are running it is probably fine.", "username": "chris" }, { "code": "", "text": "theI don’t know why it is giving this error but I’m running only one mongod in my system.", "username": "Hemendra_Chaudhary" }, { "code": "", "text": "You can check by ps -ef|grep mongod if another mongod is running\nI suspect issue with your bindIp parameter\nI think it should be localhost,private IP for a replica", "username": "Ramachandra_Tummala" }, { "code": "", "text": "thanks, this worked!!!", "username": "Hemendra_Chaudhary" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Unable to convert a Standalone mongodb Database on AWS to a Replica Set
2022-08-19T12:56:40.031Z
Unable to convert a Standalone mongodb Database on AWS to a Replica Set
2,180
null
[ "aggregation", "node-js", "mongoose-odm" ]
[ { "code": " const userDataList = await models.User.aggregate([\n {\n $match: {\n _id: new mongoose.Types.ObjectId(_id),\n },\n },\n\n {\n $set: {\n userData: { $first: \"$userData\" },\n },\n },\n\n {\n $unset: [\"user\"],\n },\n\n {\n $lookup: {\n from: \"cards\",\n localField: \"_id\",\n foreignField: \"creator.id\",\n as: \"cards\",\n },\n },\n\n {\n $lookup: {\n from: \"cards\",\n let: { userId: \"$_id\" },\n pipeline: [\n {\n $facet: {\n count: [\n {\n $match: {\n// need help here\n},\n },\n ],\n },\n },\n {\n $count: \"totafdafd\",\n },\n ],\n as: \"totalCards\",\n },\n },\n\n ]);\n", "text": "Hey there, I want to fetch the user details by passing _id and I also want the total no. of posts made by him/her should also be fetched. So please help me in getting it done.This is an example of how I am doing it.", "username": "Taranpreet_Singh1" }, { "code": "", "text": "Hi @Taranpreet_Singh1,Welcome to the MongoDB Community forums Could you please share the sample data set of each of the collections (being used here) for a better understanding of the schema of your application?Regards,\nKushagra", "username": "Kushagra_Kesav" } ]
Please help in fetching the data of user with posts info
2022-08-21T15:09:11.844Z
Please help in fetching the data of user with posts info
1,759
null
[ "database-tools", "containers", "backup", "storage" ]
[ { "code": "2022-08-21T19:53:32.158+0000\tusing write concern: &{0 false 0}\n2022-08-21T19:53:32.173+0000\tchecking options\n2022-08-21T19:53:32.173+0000\t\tdumping with object check disabled\n2022-08-21T19:53:32.173+0000\twill listen for SIGTERM, SIGINT, and SIGKILL\n2022-08-21T19:53:32.174+0000\tconnected to node type: replset\n{\"t\":{\"$date\":\"2022-08-21T20:16:41.294+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1661113001:294663][1:0x7fe711343700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 484, snapshot max: 484 snapshot count: 0, oldest timestamp: (1661112996, 1) , meta checkpoint timestamp: (1661113001, 1) base write gen: 43869\"}}\n{\"t\":{\"$date\":\"2022-08-21T20:17:08.490+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"127.0.0.1:57968\",\"connectionId\":14,\"connectionCount\":5}}\n{\"t\":{\"$date\":\"2022-08-21T20:17:08.490+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"127.0.0.1:57978\",\"connectionId\":15,\"connectionCount\":6}}\n{\"t\":{\"$date\":\"2022-08-21T20:17:08.493+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn15\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"127.0.0.1:57978\",\"client\":\"conn15\",\"doc\":{\"driver\":{\"name\":\"mongo-go-driver\",\"version\":\"v1.10.0\"},\"os\":{\"type\":\"linux\",\"architecture\":\"amd64\"},\"platform\":\"go1.17.10\",\"application\":{\"name\":\"mongorestore\"}}}}\n{\"t\":{\"$date\":\"2022-08-21T20:17:08.494+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn14\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"127.0.0.1:57968\",\"client\":\"conn14\",\"doc\":{\"driver\":{\"name\":\"mongo-go-driver\",\"version\":\"v1.10.0\"},\"os\":{\"type\":\"linux\",\"architecture\":\"amd64\"},\"platform\":\"go1.17.10\",\"application\":{\"name\":\"mongorestore\"}}}}\n{\"t\":{\"$date\":\"2022-08-21T20:17:08.495+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"127.0.0.1:57984\",\"connectionId\":16,\"connectionCount\":7}}\n{\"t\":{\"$date\":\"2022-08-21T20:17:08.496+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn16\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"127.0.0.1:57984\",\"client\":\"conn16\",\"doc\":{\"driver\":{\"name\":\"mongo-go-driver\",\"version\":\"v1.10.0\"},\"os\":{\"type\":\"linux\",\"architecture\":\"amd64\"},\"platform\":\"go1.17.10\",\"application\":{\"name\":\"mongorestore\"}}}}\n{\"t\":{\"$date\":\"2022-08-21T20:17:08.503+00:00\"},\"s\":\"I\", \"c\":\"ACCESS\", \"id\":20250, \"ctx\":\"conn16\",\"msg\":\"Authentication succeeded\",\"attr\":{\"mechanism\":\"SCRAM-SHA-256\",\"speculative\":true,\"principalName\":\"root\",\"authenticationDatabase\":\"admin\",\"remote\":\"127.0.0.1:57984\",\"extraInfo\":{}}}\n{\"t\":{\"$date\":\"2022-08-21T20:17:41.299+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1661113061:299260][1:0x7fe711343700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 499, snapshot max: 499 snapshot count: 0, oldest timestamp: (1661113056, 1) , meta checkpoint timestamp: (1661113061, 1) base write gen: 43869\"}}\n", "text": "Hi, I am trying to dump and restore a mongodb database (from version 5.0.10 to 4.4.14) running in docker container.The mongodump works without a problem and I am getting a 1.1GB file:docker exec -i mongodb-5 mongodump --ssl --sslAllowInvalidHostnames --port 12005 --gzip --db rocketchat -u root -p <mongodb pw für local user> --authenticationDatabase admin --host 127.0.0.1 --archive > .gzWhen I try to restore (have tried with 4 and 5) I get only the output for starting the restore but there is no indicator starting (I have let it run for about 3h max) and there was also no additonal output in the logfile (I have copied the backupfile to the container):docker exec -i mongodb-4 mongorestore -vvvv --ssl --tlsInsecure --port 12004 --gzip -u root -p <mongodb pw für local user> --authenticationDatabase admin --drop --host 127.0.0.1 --preserveUUID --convertLegacyIndexes --noIndexRestore --archive /opt/.gzThe output:In the docker log I am getting this output:I am new to mongodb, and would be grateful for any help.Best\nRobert", "username": "Robert_Reihs" }, { "code": "", "text": "Did it dump successfully?\nAny log from your terminal\nYou have give a name to archive file\nIs the archive you are passing as input to your mongorestore correct?", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Yes the dump was successful.\nOnly the messages shown, for the export it shows the progress of the dump.\nAh there is a formatting issue with the code, yes I have named the archive files.\nNo I have tested it multiple times, I will check the archive file.", "username": "Robert_Reihs" }, { "code": "--archive=filename--archive filename", "text": "the form is --archive=filename and you have --archive filenameIts waiting for stdin in your invocation.", "username": "chris" }, { "code": "", "text": "Ah thank you that was the problem.", "username": "Robert_Reihs" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Restore database not starting restore process
2022-08-21T20:19:43.299Z
Restore database not starting restore process
3,443
null
[ "sharding", "security" ]
[ { "code": "db.createRole(\n {\n role: \"ADMON_USRS\",\n privileges: [],\n roles: [\n { role: \"userAdminAnyDatabase\", db: \"admin\" }\n ]\n }\n)\n\nMongoDB Enterprise mongos> db.getUser(\"user\")\n{\n \"_id\" : \"$external.user\",\n \"userId\" : UUID(\"8d1ca4a0-3560-46c3-9ef6-d126092be54c\"),\n \"user\" : \"user\",\n \"db\" : \"$external\",\n \"roles\" : [\n {\n \"role\" : \"ADMON_USRS\",\n \"db\" : \"admin\"\n }\n ],\n \"mechanisms\" : [\n \"external\"\n ]\n}\nMongoDB Enterprise mongos>\n", "text": "Hi guys, I’m trying to create a role able to admin users but only that, I did it like this but I still can create databases and collections, etc, should I revoke specific permissions?", "username": "Oscar_Cervantes" }, { "code": "userAdminAnyDatabaseuserAdminuserAdminuserAdminMongoDB", "text": "Hi @Oscar_Cervantes and welcome to the community!!The userAdminAnyDatabase is an extension of userAdmin with applies to all the databases.\nThe userAdmin provides the ability to create and modify roles and users on the current database and the following actions are provided with the userAdmin roles.However, note that neither userAdmin nor userAdminAnyDatabase roles have permission to create/drop collections (which is available on dbAdmin/dbAdminAnyDatabase roles) so I think you’re on the right track by using the userAdminAnyDatabase role.The scenario you mentioned (userAdmin role can create/drop collections) is a possibility when the mongod process was not started with the --auth parameter. Without the --auth parameter, users can be created and can login, but the authorisation are not enforced.Can you please confirm that the MongoDB process is started with --auth being enabled?Please refer to the documentations on Built-in Roles to understand further.The Enable Access Control may be useful as well for details into enabling auth. If you need further help, could you post:Let us know if you have any further questions.Thanks\nAasawari", "username": "Aasawari" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Create a role to admin users and just that
2022-08-19T16:45:30.140Z
Create a role to admin users and just that
2,041
null
[]
[ { "code": "", "text": "All resources under domain “mongodb.com” cannot be accessed.\nAtlas cloud is available, but it takes a long long time to open the atlas portal.Is it something that often happens?\nIt will seriously affect our choice of MongoDB.Thanks.", "username": "E_Y" }, { "code": "", "text": "Hey @E_Y welcome to the community.Is the website working for you now, or are you still having issues with it?Kevin", "username": "kevinadi" }, { "code": "", "text": "Ok now.\nThanks for your reply.\nDo you know why it was broken that day?", "username": "E_Y" }, { "code": "", "text": "Hi @E_YFor MongoDB Atlas & Cloud resources, you can monitor their current status in https://status.cloud.mongodb.com/ . The page also lists past incidents.Do you know why it was broken that day?Not that particular incident, unfortunately. It’s also not listed in the status page above, so it may not be due to Atlas infrastructure. Sorry I don’t have more details than that Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "so it may not be due to Atlas infrastructure. Sorry I don’t have more details than that The broken site was the main site “mongodb.com”, and Atlas cloud was slow.\nAll right. Thanks all the same.", "username": "E_Y" }, { "code": "", "text": "", "username": "Stennie_X" } ]
MongoDB Website not available yesterday, is it something that often happens?
2022-07-19T02:31:35.908Z
MongoDB Website not available yesterday, is it something that often happens?
1,690
null
[ "dot-net", "atlas-device-sync" ]
[ { "code": "", "text": "Hi. I have a trigger on my atlas database that creates a few documents in a few collections when a new user registers. These collections are to be synced to the new user because they define the layouts of certain screens in a MAUI app.\nI found that the first screen would not have received that data when I do my first realm.all for these collections. This is hardly a surprise because the request for all docs is done as soon as I go to the first screen after registering and logging in, so I assume it is before the database has had time to populate the collections. To get over this, I have a simple loop with a 500ms delay in it that attempts to get the data 20 times. This works quite well but I’m not sure if it is a good strategy.\nI then stumbled across something in the React SDK usage examples that talks about syncing in the background by making use ofconst OpenRealmBehaviorConfiguration = {\ntype: “openImmediately”,\n};which is used in the Realm.open configuration. I then looked for this in the .net SDK but there is no mention of it. I tried to use it in my .Net MAUI app but it does not know about OpenRealmBehaviorConfigurationI have a few questions.\nIs it correct that this is not available in .Net and will it be available soon?\nIf I can’t use that, is there another way in .Net to achieve the same so I don’t need to perform a realm.all\nIf none of that is available, is my loop a good workaround or have I missed the proper way to solve this problem?I also have an unrelated question. Currently I have 1 realm per model (collection). Is that correct or should I have 1 realm for all my models / collections? That is a pretty basic question so I think I need to do some more reading!John.", "username": "John_Atkins" }, { "code": "OpenRealmBehaviourConfiguration await GetInstanceAsync(config)GetInstanceawait realm.SyncSession.WaitForDownloadAsync()", "text": "Hi @John_Atkins, thanks for your questions So… there is no OpenRealmBehaviourConfiguration in the .NET SDK, but you will get the same effect if you use await GetInstanceAsync(config) instead of GetInstance when retrieving the realm.\nAlso, if you already opened the realm you can use await realm.SyncSession.WaitForDownloadAsync() to get the latest values locally. Please note that if you’re doing this too soon it could still be that the server didn’t have time to correctly process the documents you just added with the trigger.Regarding the other question, usually it’s an overkill to have one realm per model. How many realms you need and what models you want to sync in each of them really depends on the structure of your data, of the application and how you want to manage all of it. That said, you can also just have one realm for all your models if that works for you.I hope this helps ", "username": "papafe" }, { "code": "", "text": "Hi Ferdinando. Thank you for a very useful reply. The more I learn, the more I realise I don’t know!I was already using realm = await Realm.GetInstanceAsync(syncConfig); and have addedawait realm.SyncSession.WaitForDownloadAsync();but that returns immediately. Presumably, it s too soon for the trigger and function to have started to create collections. This means I need to stick with my loop or find an alternative. I have 5 small docs in the collection in Atlas, and on one occasion, I had only received 2 docs, so my loop exited before the other 3 had been received. I tried dropping my delay from 500ms to 100 ms expecting to receive part of the collection much more often, but weirdly, I keep receiving all 5 now. I expect this is always a possibility.However, I noticed something else, which almost certainly means I need to change my strategy. After receiving the 5 docs and showing them in my UI, I then added a new doc in Atlas and expected to see it be added to the UI automatically. The reason I expected that was because if I change a doc in Atlas, the UI does update to show that in my app. I expect this is because the realm contains records that I update but does not contain docs that I add in Atlas. I did expect sync to cope with that.Can you please confirm that to handle adding docs in Atlas (or on another device), I need to subscribe to changes using a realm change listener and when that event fires, I need to do a realm.All?After your previous reply, I think I understand more about realms (I’ve done some more reading as well). It looks like for various small collections, it would be simpler for me to have 1 realm for them.", "username": "John_Atkins" }, { "code": "", "text": "I tried “subscribe to changes using a realm change listener and when that event fires, I need to do a realm.All”but that event never fires. Possibly because it is for changes to the local DB and not for syncing of Inserted records in Atlas database.Is there a way the app can be informed that records have been inserted in the Atlas database so I can get them with realm.all?", "username": "John_Atkins" }, { "code": "", "text": "Can you show a piece of code where you do “subscribe to changes using a realm change listener and when that event fires, I need to do a realm.All”? I am a little unsure of what you are doing here.\nIf you have your code in a public repository you can also just drop a link and I can take a look if you prefer.The realm listener should fire independently from the origin of the new objects/documents. If the objects have been added locally or in Atlas the listener should fire anyways. It could be that the objects that you’re adding to Atlas don’t follow the schema you’ve defined, and so they do not get synced back to the application or maybe they are not in the set of the results for the subscription queries you’ve defined (I suppose you are using flexible sync, am I correct?).", "username": "papafe" }, { "code": "", "text": "As an additional note, it’ll be useful if you show how you are populating the UI. I’ve seen that you’re using MAUI in another thread, so you should be able to use realm collections with your bindings directly, without the need to use realm listeners directly.", "username": "papafe" }, { "code": "", "text": "I’m using partition sync at present because I had too many problems trying to get flexible to work. I think partition is all I need for this app but I may change to flexible once I’ve learnt more about it.I can’t upload this to a public repository because it is for the company I work for. I may be able to make a simple app that shows the same problem.I don’t want to get into subscribing tp events unless I really need to. I thought I read that is only necessary for background threads. This is simple UI updating that I’m hoping normal sync looks after. If I really need to subscribe to events, then I’ll create a cut-down app that you can look at.The object added in Atlas definitely follows the schema because all I did was clone a document and change a few fields in it. When I force a realm.all in the MAUI app, then I do receive the new record. The only query I have defined is a realm.all so it should receive any inserted records at Atlas. The UI definitely updates automatically if I change fields in a doc in Atlas.I am making use of MAUI data binding and that is working perfectly for updates. I’ll tidy up some code and paste some snippets here soon.You seem to have confirmed that insertions at Atlas should automatically be added to the MAUI UI, so that is good to know. When you say“maybe they are not in the set of the results for the queries you’ve defined”perhaps I have not defined the query properly such that Realm knows about it? This is all I have doneReminders = new ObservableCollection(realm.All().OrderBy(r => r.IndexNo).ToList());where Reminders is the collection that is bound to my UI. Is that sufficient for Realm to know to monitor the query? As I said, it definitely works for updates.", "username": "John_Atkins" }, { "code": "Reminders = realm.All<>().OrderBy(....)INotifyCollectionChangedToList()INotifyPropertyChanged", "text": "With “maybe they are not in the set of the results for the queries you’ve defined” I meant the subscription queries, the ones you define if you use flexible sync. I’ve corrected the text in my previous post to make it clearer, but you can disregard it in your case as you’re using partition sync.Regarding the UI not updating… I can see what’s the problem here. You are wrapping the realm collection in an ObservableCollection, but this way the realm collection doesn’t fire all the notifications that the UI binding framework uses to update the UI.\nYou should simply do: Reminders = realm.All<>().OrderBy(....). Realm collections are live and implement INotifyCollectionChanged that is used by MAUI to update the UI accordingly. As a general rule you should always try to use Realm collections “as they are”, without wrapping them in other things, as you could lose some of the advantages of live collections. That’s also the reason why you should also avoid using ToList(), as the collections that you obtain is not live anymore.\nAs an additional note, the reason why updates to single fields still works is that even though your collection is not live anymore, the single objects that are part of the collections are still Realm Objects, and as such they are still live and implement INotifyPropertyChanged and so the UI can change when their content changes.I’ve seen that in another thread my colleague already recommended an article that I wrote about How to Use Realm Effectively in a Xamarin.Forms App. Maybe that’s a little bit long for an article, but if you want to see some simple code you can take a look at the example we have in our repo, QuickJournal. Here you can see how we define Entries and how we just bind to it in the UI here.Sorry for the wall of text but I hope this clarifies some things.", "username": "papafe" }, { "code": "", "text": "Don’t be sorry for the length of text! It sounds like you’ve almost certainly solved the problems so I’ll make the changes. The article you wrote is open in another browser tab, along with about 100 other tabs about MongoDB I have open Thank you for such a good answer. I’ll make the changes and read your article.", "username": "John_Atkins" }, { "code": "", "text": "Perfect answer, and I’ve learnt a lot. I’ll definitely read your article and apologise for not reading it before.\nThank you\nJohn.", "username": "John_Atkins" }, { "code": "", "text": "This also explains and fixes the other problems I was having. I no longer need to loop around waiting for the first sync of the data being created in Atlas due to a trigger.Just by defining the query, my UI updates whenever the data arrives, which is excellent and, to be honest, the way I hoped it would work.", "username": "John_Atkins" }, { "code": "", "text": "I’m glad to hear that! And yes that is the way it should work in the majority of cases, with the realm collections/objects updating the UI “automatically”, without the need to define additional data flows in the application.", "username": "papafe" }, { "code": "", "text": "I just finished reading your very good article on Xamarin Forms and Realm. Do you know when part 2 will be available? It would be great if it used Flexible sync.", "username": "John_Atkins" }, { "code": "", "text": "I’m glad you liked the article Unfortunately I have no timeline on when the new article will be released, but for sure it will use flexible sync.I understand a longer article can probably explain better how to work with flexible sync, but feel free to open new posts on the forum here if you need additional help.", "username": "papafe" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Strategy for sync after a new user registers
2022-08-22T07:33:31.663Z
Strategy for sync after a new user registers
3,006
null
[]
[ { "code": "", "text": "Hi All,My requirement is to load dummy data in new MongoDB Atlas Cluster and I am getting error when I try to run the script mentioned in the below link. Could you please suggest on how to overcome this and seed the DB.How to Seed a MongoDB Database with Fake Data | MongoDBError:\n… Uncaught:\nError: Cannot find module ‘faker’\nRequire stack:", "username": "Hendry_Ding" }, { "code": "", "text": "Have you installed the required module faker?The blog does not seem to mentioned that you have to install it. But like any .js module it must be installed.You could also take a look at mgeneratejs - npm", "username": "steevej" }, { "code": "npm install [email protected]", "text": "Hello @Hendry_Ding and welcome to the MongoDB community forums.I’m not a Node.js person, but that error usually means that you haven’t installed the Node module. Did you run npm install [email protected] yet? Note that this Node package was purposely broken by its creator a while back so you don’t want the latest version. Once you install the module you should have better luck in running things,", "username": "Doug_Duncan" }, { "code": "", "text": "Thank You Doug & Steeve.\nI am running the script from mongosh (V1.5.4) from my windows laptop. So how do i do “npm install”.Also is there any other ways to perform the dummy data loading. Say i require a data of 5 to 10GB.", "username": "Hendry_Ding" }, { "code": "npm installmongoshnode <scriptname>", "text": "am running the script from mongosh (V1.5.4) from my windows laptop. So how do i do “npm install”.npm install is ran from your operating system prompt and not from inside mongosh. Once you install the package then you would run node <scriptname> from the command line, You need to make sure that the script has a valid connection string for your MongoDB instance in it for it to run.Also is there any other ways to perform the dummy data loading. Say i require a data of 5 to 10GB.Steeve mentions mjeneratejs in his post which is anohter NodeJS package that generates JSON data that could be imported into MongoDB.", "username": "Doug_Duncan" }, { "code": "", "text": "Thank You Doug.Was able to overcome all the hiccups and successfully run the below script. Thank you once again for the support.[How to Seed a MongoDB Database with Fake Data | MongoDB]", "username": "Hendry_Ding" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
To Seed a MongoDB Database with Dummy Data
2022-08-22T13:29:25.001Z
To Seed a MongoDB Database with Dummy Data
2,589
null
[]
[ { "code": "", "text": "We’re running v4.2.2 where the option to set/unset the padding factor is removed. What is the default padding? How do we manage that?For background info, we have a collection that is relatively big (100G), and we suspect the padding is causing it to balloon quickly.", "username": "Fawn" }, { "code": "Padding", "text": "Hello @Fawn ,Welcome to MongoDB Community! Are you referring to Padding as mentioned in this doc?\nIf yes, then this is specific to MMAPv1 (which was removed in MongoDB 4.2). Modern versions of MongoDB uses WiredTiger as the default storage engine . WiredTiger is different from MMAPv1 and does not use padding.Apart from the many benefits of a modern storage engine, WiredTiger enables the use of compression. If you are concerned about disk size. By default, it is compressed using snappy. Additionally, you can use alternate compression algorithms such aswe suspect the padding is causing it to balloon quickly.Can you elaborate on what you mean by “balloon quickly”? Is there any metric that you can share?Lastly, MongoDB v4.2 will be out of support by April 2023 so it is recommended to upgrade to the latest that is MongoDB v6.0.1, please check this documentation for reference. MongoDB v4.4.22 is latest in 4.2 series and 4.2.2 was released in December 2019, so upgrade to v4.2.22 is at least recommended.Let me know if you have more questions. Happy to Help! Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to deal with padding factor in documents since the option has been removed?
2022-08-17T22:33:54.585Z
How to deal with padding factor in documents since the option has been removed?
1,222
null
[ "queries", "cxx" ]
[ { "code": "", "text": "Lets say i have a network of mongoDB servers. I want to write a record with\nname: XFaon and age: 14how does mongodb choose which srver its stored on. also if i have a group of mongodb servers, which one of them exactly am i supposed to connect to from c++. I beleive mongodb is decenteralized so there shouldnt be a centeral access point.One more thing, when I search for a record, like all records that have age 14, and age is a primary key, how dos mongodb say, hmm this db might have this record.I dont think every single db is asked to search its tables for this record because that would practically be dddosing ur own infastructure.Thankyou!", "username": "Rayyan_Khan" }, { "code": "mongodmongosmongos", "text": "Hi @Rayyan_Khan,In a standalone deployment (generally for development) or a replica set deployment (minimum recommended for production environments) each mongod process stores the same data.If your use case merits a larger or more distributed deployment, sharded clusters allow you to define a per-collection shard key index to distribute data. Sharded clusters have additional infrastructure components including config servers which keep track of the sharded cluster metadata and mongos processes which route queries to the correct member(s) of the sharded cluster based on the query and collection’s sharding metadata.A database in a sharded cluster can have both unsharded and sharded collections. Unsharded collections live on primary shard for each database; data in sharded collections is partitioned across available shards based on a chosen shard key for the collection. For more information, please review mongos Routing and Results Process in the MongoDB documentation.From an application/driver point of view, you generally do not have to keep track of the cluster topology. The driver API includes discovery of changes in configuration and availability for replica sets and sharded clusters. However, you can specify read preferences if you want to route queries to specific members of a replica set and set up up zones for sharded clusters for more control over read and write distribution of sharded data.If you would like a more detailed introduction to MongoDB deployments and configuration, there are free online courses for DBAs at MongoDB University including M103: Basic Cluster Administration.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How does MongoDB make decisions for records
2022-08-23T02:44:07.111Z
How does MongoDB make decisions for records
1,841
null
[ "connecting" ]
[ { "code": "", "text": "I’m getting the same ‘ETIMEOUT’ error as well. It was working at one time but I haven’t been able to connect for weeks now and cannot find a solution.", "username": "Joseph_Brown" }, { "code": "ping/// example\nping cluster0-shard-00-00.ze4xc.mongodb.net\ntelnet27017/// example\ntelnet cluster0-shard-00-00.ze4cx.mongodb.net 27017\n", "text": "Hi @Joseph_Brown - Welcome to the community.Has this been resolved yet?If not, firstly ensure that the client’s IP is on the Network Access List.It was working at one timeIt could be that the ISP has changed the outgoing IP address for your network although this is just one thing that may have stopped it working. You can also try adding 0.0.0.0/0 temporarily for troubleshooting purposes and see if you can connect after.If you believe this is already done, please try performing the initial basic network connectivity tests and provide the output for the cluster you are having trouble connecting to:Note: You can find the hostname in the metrics page of your clusterAdditionally, I would recommend to review the Troubleshoot Connection Issues documentation and verify some configurations, such as adding the client’s IP (or IP ranges) to the Network Access List. You may also find the following blog post regarding tips for atlas connectivity useful.Regards,\nJason Tran", "username": "Jason_Tran" }, { "code": "", "text": "", "username": "Jason_Tran" } ]
‘ETIMEOUT’ error connecting to Atlas
2022-08-14T20:06:51.761Z
‘ETIMEOUT’ error connecting to Atlas
2,478
null
[ "backup" ]
[ { "code": "", "text": "we have mongo 5.0.5 CE installed on linux redhat 7. I created a backup with dump command.now our rpm db is corrupted and I received a new server redhat 8. I installed latest version 6.0 of mongo. Is the restore supporting a dump from lower version ? I get all msg … skipping file…or do we have to install first the same version 5.0.5 - restore - upgrade mongo to 6.0thanks for all help, best regards, Guy", "username": "Guy_Przytula" }, { "code": "mongorestoremongorestoremongodump4.4.x4.4.xmongorestoremongodumpmongodump100.5.4mongorestore100.5.4", "text": "Hi @Guy_Przytula - Welcome to the community.As per the mongorestore documentation:When using mongorestore to load data files created by mongodump , be sure that you are restoring to the same major version of the MongoDB Server that the files were created from. For example, if your dump was created from a MongoDB Server running version 4.4.x , be sure that the MongoDB Server you are restoring to is also running version 4.4.x .In addition, ensure that you are using the same version of mongorestore to load the data files as the version of mongodump that you used to create them. For example, if you used mongodump version 100.5.4 to create the dump, use mongorestore version 100.5.4 to restore it.In short specific to this scenario you’ve described; restore to the same MongoDB version and then upgrade to 6.0. See the documentation for more information regarding the Upgrade of a standalone to 6.0 (You can find the associated Replica Set and Sharded Cluster upgrade procedures and notes on the left hand side of the linked documentation).Hope this helps.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Use backup restore and upgrade at same time
2022-08-08T15:16:48.130Z
Use backup restore and upgrade at same time
1,618
null
[ "php" ]
[ { "code": "", "text": "Hello. I am running into some significant challenges during the extension’s installation process.I am following alongside the article series (Getting Set Up to Run PHP with MongoDB | MongoDB) for PHP drier setup. I was able to install all prerequisite packages (PHP, Apache) through Homebrew and was able to successfully install the Mongodb Driver extension through pecl. I also added the “extension=mongodb.so” line to both php.ini files.However, when I subsequently run the command “php -i | grep mongodb”, I receive an output of:\nmongodb\nmongodb.debug => no value => no valueI have read through both Mongodb’s documentation and PHP’s documentation but have not yet been able to locate a solution.I am operating on macOS Monterey (12.2.1). When I tried to uninstall the Mongodb driver with pecl to retry the process, I received a “permission denied” error and the uninstall failed. Not sure if this is related to the issue at hand.", "username": "michael_demiceli" }, { "code": "sudo", "text": "Well, you may have to go sudo to delete the driver.\nAlso, did you reboot after you modified php.ini? Not a bad idea.", "username": "Jack_Woehr" }, { "code": "", "text": "Thanks Jack.For others’ benefit, the issue was caused by missing prerequisite packages libbson and libmongoc, which I had to install separately and subsequently rerun the process.", "username": "michael_demiceli" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
PHP driver extension installation problems
2022-08-22T16:52:35.987Z
PHP driver extension installation problems
2,791
null
[]
[ { "code": "\"_id\": \"62be0271d373b2f2fc1826a2\",\n \"condition\": \"new\",\n \"variants\": [\n {\n \"color_name\": \"Green\",\n \"items\": [\n {\n \"size\": \"S\",\n \"price\": 100,\n \"quantity\": 1,\n \"_id\": \"62be0271d373b2f2fc1826a4\"\n },\n {\n \"size\": \"M\",\n \"price\": 100,\n \"quantity\": 2,\n \"_id\": \"62be0271d373b2f2fc1826a5\"\n }\n ],\n \"_id\": \"62be0271d373b2f2fc1826a3\"\n },\n {\n \"color_name\": \"Blue\",\n \"items\": [\n {\n \"size\": \"S\",\n \"price\": 100,\n \"quantity\": 1,\n \"_id\": \"62be0271d373b2f2fc1826a7\"\n },\n {\n \"size\": \"S\",\n \"price\": 100,\n \"quantity\": 1,\n \"_id\": \"62be0271d373b2f2fc1826a8\"\n }\n ],\n \"_id\": \"62be0271d373b2f2fc1826a6\"\n }\n ],\n \"featured\": true\n \"_id\": \"62be0271d373b2f2fc1826a2\",\n \"condition\": \"new\",\n \"variants\": [\n {\n \"color_name\": \"Green\",\n \"items\": [\n {\n \"size\": \"S\",\n \"price\": 100,\n \"quantity\": 1,\n \"_id\": \"62be0271d373b2f2fc1826a4\"\n },\n {\n \"size\": \"M\",\n \"price\": 100,\n \"quantity\": 2,\n \"_id\": \"62be0271d373b2f2fc1826a5\"\n }\n ],\n \"_id\": \"62be0271d373b2f2fc1826a3\"\n },\n {\n \"color_name\": \"Blue\",\n \"items\": [\n {\n \"size\": \"S\",\n \"price\": 100,\n \"quantity\": 1,\n \"_id\": \"62be0271d373b2f2fc1826a7\"\n },\n {\n \"size\": \"S\",\n \"price\": 100,\n \"quantity\": 1,\n \"_id\": \"62be0271d373b2f2fc1826a8\"\n }\n ],\n \"_id\": \"62be0271d373b2f2fc1826a6\"\n }\n ],\n \"featured\": true\n", "text": "I have this in a collection:I have this document in my mongodb collection:I want to update only the quantity of the items field with _id = “62be0271d373b2f2fc1826a8”NOTE: there may be more variants", "username": "Philip_Enaohwo" }, { "code": "db.collection.updateOne(\n {\"variants.items._id\" : \"62be0271d373b2f2fc1826a8\"},\n {$set: {\n 'variants.$[].items.$[xxx].quantity': 999\n }},\n {arrayFilters: [\n {\"xxx._id\": '62be0271d373b2f2fc1826a8'}\n ]}\n)\n$[<identifier>]arrayFiltersarrayFiltersarrayFilters", "text": "Helllo @Philip_Enaohwo,Welcome to the community! I notice you haven’t had a response to this topic yet - were you able to find a solution?\nIf not, then you can try below.Your particular use case includes Array within an Array, below is the explanation of the query.Let me know if you have any more questions. Happy to help! Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Updating nested array of objects
2022-07-08T17:00:15.392Z
Updating nested array of objects
16,995
null
[ "performance" ]
[ { "code": "", "text": "I wonder if it’s better to use monotonically increasing _id because of problems like “page split” in RDB.", "username": "KD_K" }, { "code": "", "text": "Doesn’t anyone know?", "username": "KD_K" }, { "code": "\"_id\"\"_id\"", "text": "Hello @KD_K ,Welcome to MongoDB community! \"_id\" has no connection to how documents are stored physically on the disk. It is the primary key of a collection, (see _id), and as far as I am aware, it does not influence how the WiredTiger Storage Engine stores documents.In fact, monotonically increasing \"_id\" can be detrimental in some applications, like Sharding (if _id is used as shard key). For reference, please check this documentation on Monotonically Changing Shard Keys.However, are you facing any issue in your deployment that you feel may or may not be caused by page splits?Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Is there a performance penalty for _id that does not increase monotonically?
2022-07-22T08:10:21.308Z
Is there a performance penalty for _id that does not increase monotonically?
2,251
null
[]
[ { "code": "", "text": "Hi Team,I am trying to update embedded doc only when matching condition is true but its inserting even condition not seems true. May be I am missing something here.Sample mongo playgroundMongo playground: a simple sandbox to test and share MongoDB queries onlineI dont want to insert role if matching role is already there.", "username": "Shyam_Sohane" }, { "code": "", "text": "It is not clear if this issue is the same as", "username": "steevej" }, { "code": "", "text": "That I have done with aggregation pipeline. But here I was trying with simple update with elemmatch filter for another scenario and trying to understand why it’s not working in this case.", "username": "Shyam_Sohane" }, { "code": "mongosh> c.find( { _id : 1 })\n{ _id: 1, roles: null }\nmongosh> c.updateOne( { _id : 1 } , { \"$push\" : { \"roles\" : [ { _id : 101 , name : 201 }]}})\nMongoServerError: The field 'roles' must be an array but is of type null in document {_id: 1}\nroles:nullroles:[ ]{ \"$nor\": [\n {\n \"roles\": {\n \"$ne\": null,\n \"$not\": { \"$size\": 0 }\n }\n }\n ]\n }\n", "text": "In case you still need some insight about this playground.1 - if roles is null, $push will failStarting with 4.2 you could handle this case using a $set stage with $cond to replace roles:null with roles:[ ].2 - I do not quite understand the purpose of", "username": "steevej" } ]
Trying to update embedded doc only when matching condition is true but its inserting even condition not seems true
2022-08-19T01:17:36.178Z
Trying to update embedded doc only when matching condition is true but its inserting even condition not seems true
1,618
null
[ "aggregation", "node-js", "atlas-search" ]
[ { "code": "rangenear[\n {\n $search: {\n compound: {\n must: [\n {\n text: {\n query: ['shopping'],\n path: 'tags',\n },\n },\n {\n near: {\n path: 'savedAt',\n origin: new Date(2022, 0, 1),\n pivot: 2629800000,\n },\n },\n ],\n },\n },\n },\n];\nshoppingrangenew Date(2022, 0, 1)", "text": "Hello all, I’m trying out different Atlas search options using the aggregate api. The text search has been working great and now i’ve started querying my date fields. The range operator seems to function as I expect but the near one doesn’t seem to make a difference in my results.I’m using the Node driver, and my collection documents are stored as the Date type.An aggregate pipeline I’m trying:My understanding is that this query will look for documents with “shopping” in their tag field, and that have a savedAt date within ~1 month of January 1st, 2022. I have 19 documents that have shopping in their “tags” field. Only one of them has a saved at date in 2022 (Jan 5th). My aggregate result however returns all 19 documents.What am I missing? I would only expect 1 result since a month before and after Jan 1 2022 should only result in the 1 document present? Using the range operator and defining a “greater than” date of new Date(2022, 0, 1) returns just that one document.", "username": "zacharykane" }, { "code": "nearpivotpivotnear1/20.5pivotoriginrangenew Date(2022, 0, 1)rangenearnearrangenearrangeDB> var f =\n[\n {\n '$search': {\n compound: {\n must: [\n { text: { query: [ 'shopping' ], path: 'tags' } },\n {\n range: {\n path: 'savedAt',\n gte: ISODate(\"2021-12-01T00:00:00.000Z\"),\n lte: ISODate(\"2022-02-01T00:00:00.000Z\")\n }\n },\n {\n near: {\n path: 'savedAt',\n origin: ISODate(\"2022-01-01T00:00:00.000Z\"),\n pivot: 2592000000\n }\n }\n ]\n }\n }\n },\n { '$project': { savedAt: 1, score: { '$meta': 'searchScore' } } }\n]\nDB> db.collection.aggregate(f)\n[\n {\n _id: ObjectId(\"63030ce08ed303b32008b26f\"),\n savedAt: ISODate(\"2022-01-05T00:00:00.000Z\"),\n score: 1.9016982316970825\n },\n {\n _id: ObjectId(\"63030f768ed303b32008b278\"),\n savedAt: ISODate(\"2021-12-01T00:00:00.000Z\"),\n score: 1.5111485719680786\n },\n {\n _id: ObjectId(\"63030f768ed303b32008b275\"),\n savedAt: ISODate(\"2022-02-01T00:00:00.000Z\"),\n score: 1.5111485719680786\n }\n]\nnearscore", "text": "Hi @zacharykane,My aggregate result however returns all 19 documents.My understanding is that the near operator utilises the pivot to calculate scores but will still return documents outside of the pivot “range” as you have stated. The below is also from the pivot section of the near documentation:Results have a score equal to 1/2 (or 0.5 ) when their indexed field value is pivot units away from origin.What am I missing? I would only expect 1 result since a month before and after Jan 1 2022 should only result in the 1 document present? Using the range operator and defining a “greater than” date of new Date(2022, 0, 1) returns just that one document.It sounds like range works for you but please correct me if i’m wrong here. I’m curious to understand if there is a need for near to filter out for ±1 month from a specified date? Or, is it for scoring / sorting purposes (nearest to furthest)?If you could explain the context further here for use of near or possibly if range doesn’t suit all requirements, that would help greatly.In saying so, please see the following example for both near and range (~±1 month from 2022-01-01) usage:Output:Without use of near the score of the above documents in the output were all the same in my test environment.Hope this helps in some manner.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Aggregate search near a date
2022-06-18T23:45:22.472Z
Aggregate search near a date
2,187
null
[ "app-services-user-auth" ]
[ { "code": "21:27:19:58\n2021-05-21T18:27:20.131Z\tc792ac33-3292-44c3-a1e7-b0dd21aaca00\tERROR\t{\n message: \"cannot find app using Client App ID 'timeless-is-***'\",\n code: -1\n}\n const realmApp = new Realm.App({ 'id': 'timeless-is-***' })\n const user = await realmApp.logIn(Realm.Credentials.anonymous())\n const db = user.mongoClient('mongodb-atlas').db('timeless-is')\[email protected]", "text": "This happens to me right now for some reason.Some clients report that they got this error:But, other clients can access our app with no problems.Here’s the code that we use:Versions:No idea how can I fix this ", "username": "111522" }, { "code": "", "text": "Hello Nikita,Welcome to the MongoDB Realm forums! I would love to help you with this, I just have a few clarifying questions.Thank you,Brock.Also note:\nMoya kirillitsa ne samaya luchshayaTem ne meneye, ya ne protiv pomoch’ vam lyubym sposobom, kotoryy vam budet proshche vsego.", "username": "Brock_GL" }, { "code": "", "text": "Hello Nikita,Other question for follow up, are you able to gain access to realm console via http://cloud.mongodb.com/ ?Thank you,Brock", "username": "Brock_GL" }, { "code": "cannot find app using Client App IDhttps://realm.mongodb.com/api/client/v2.0/app/<my-app-id>/auth/providers/local-userpass/login", "text": "I’m having the same issue where it says\ncannot find app using Client App IDi created another app to check and replace that id still same issue.I also used url authentication https://realm.mongodb.com/api/client/v2.0/app/<my-app-id>/auth/providers/local-userpass/login.\nbut still the same result.", "username": "Safik_Momin" }, { "code": "", "text": "Hey guys, I’m facing the same issue, and haven’t found any solution on the internet. @Safik_Momin @111522 Did any of you found a solution?", "username": "James_Hasan" }, { "code": "", "text": "I have same problem too. Any one have a solution?", "username": "Muhammed_Ali_Cinar" }, { "code": "", "text": "It looks like this is not fixed yet, right? I just created an App and enabled the Data API … but, am getting the same exact error when trying a sample POST from postman{\n“error”: “cannot find app using Client App ID ‘the-app-id-here’”\n}Can anyone from Mongo reply please? Thank you.", "username": "Dima" }, { "code": "", "text": "So … disabling drafts under Deployment->Configuration solves the problem … kind of. Certain clients can connect while others continue to get the error …", "username": "Dima" }, { "code": "", "text": "Also, possibly having App Settings → Deployment Region global works better than local (though, I am not sure that you can make this change after the app is created). Glad this is all “GA” …", "username": "Dima" }, { "code": "", "text": "Hello Mongo,It looks like there are a ton of views on this issue and it looks like it has been out there for a while. Is there anyone looking into this issue? Is there a bug number that we could follow to track any progress?Also, it would be nice to understand how Mongo reviews these communities and why something like this might sit for this long, without any comments. Am I just missing them?Thanks,\nSteve G", "username": "Steve_Gaylord" }, { "code": "", "text": "Same here. But if you continually try to log in, it works. This means you can try to keep login at least twice, interesting,", "username": "Haikun_Huang" }, { "code": "", "text": "G’day Folks @Dima, @Steve_Gaylord , @Haikun_Huang, Welcome to MongoDB Community I appreciate you raising your concerns and apologies for the delay. The replies to an old topic are sometimes not visible in the queue.There is not a single reason for this error to happen. Deployment Region, internal SDK URI mapping, etc could be some possible reasons. Unfortunately, on June 02, 2022, during our regular release cycle, a bug got introduced that caused locally deployed apps not to be found. The bug was isolated to a limited number of apps in a subset of regions. This has been fixed since.@Dima, could you please let me know if this is happening in your prod or dev environment and what is the app name it is happening for?@Haikun_Huang could you please share the app name where this is happening? Your app logs are not showing the error. Could you confirm if this is fixed for you?Best,\nHenna", "username": "henna.s" }, { "code": "", "text": "Hello,First of all, thank you @henna.s for replying and shedding some light both on this issue and your process for responding to these threads.I hope that you all change the way you monitor the queue because old issues, especially popular ones, are sometimes the most troubling.This was happening in dev, but we resolved the issue the way I described above by changing the deployment region to global.Was there visibility into this bug? Is there a good place to search for open bugs?Thank you!", "username": "Dima" }, { "code": "", "text": "Hello Dima,Thank you for your reply and appreciate your feedback. It’s always helpful to know areas where we can improve.This was happening in dev, but we resolved the issue the way I described above by changing the deployment region to global.Could you please share the time period when this was happening? What SDK are you using? What local region was your app deployed and what is the cluster location? There may not have been a need to change the deployment model but glad to know this was resolved for you.The SDK issues are available to search on Github based on the language stack you are using but the cloud issues are internal unless there is an outage that gets highlighted on the respective status pages.Kind Regards,\nHenna", "username": "henna.s" }, { "code": "", "text": "This was at the time of the reply to the original post, around June 16th, 2022.No SDK; just postman POST to the Data API endpoint.The app region initially was the one it defaults to … I believe in our case that is AWS us-east-1 (same as the cluster location)As far as not needing to change the deployment model, I disagree. It simply did not work (as per the post) when the deployment region was local instead of global.It would be great to see cloud issues somewhere; this way, if anyone runs into a problem, you can at least see if it is a known issue.Thank you.", "username": "Dima" }, { "code": "", "text": "Hello Dima,Thank you for your patience and apologies for the delay in getting back to you.As far as not needing to change the deployment model, I disagree. It simply did not work (as per the post) when the deployment region was local instead of global.This definitely should be a one-off, perhaps a residual effect of the bug that unfortunately was introduced. Could you try again with a test application with local deployment?It would be great to see cloud issues somewhere; this way, if anyone runs into a problem, you can at least see if it is a known issue.Your feedback is definitely noted, but since this was an internal issue and was fixed rather immediately, this was not highlighted.Please let us know if you would have any more questions or if it is fine to close this topic.Happy Coding ", "username": "henna.s" }, { "code": "", "text": "This does work now. Thanks.", "username": "Dima" }, { "code": "", "text": "", "username": "henna.s" } ]
Some clients reporting "cannot find app using Client App ID"
2021-05-21T18:53:02.050Z
Some clients reporting &ldquo;cannot find app using Client App ID&rdquo;
11,651
https://www.mongodb.com/…b_2_1023x201.png
[ "android", "kotlin" ]
[ { "code": "implementation \"org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.0-native-mt\"\nimplementation \"io.realm.kotlin:library-sync:1.0.2\"\nid 'io.realm.kotlin'id 'io.realm.kotlin' version '1.0.2' apply false", "text": "I’m trying to use Mongo DB Realm in my Android app. I’ve added those two dependencies:Along with the plugin:id 'io.realm.kotlin'andid 'io.realm.kotlin' version '1.0.2' apply falseBut I’m getting this strange error, and not sure why exactly…Caused by: org.gradle.api.GradleException: Cannot find a version of ‘org.jetbrains.kotlinx:kotlinx-coroutines-core’ that satisfies the version constraints. It’s just a fresh Android Studio project, and I’m trying to setup the Mongo DB realm.\nScreenshot_11919×377 46.9 KB\n", "username": "111757" }, { "code": "", "text": "Can someone help me out with this please?", "username": "111757" }, { "code": "build.gradleimplementation (\"org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.0-native-mt\") {\n version {\n strictly(\"1.6.0-native-mt\")\n }\n }\n", "text": "Hi @111757 ,This is typically due to a conflict between library versions: try to modify the build.gradle with something like", "username": "Paolo_Manna" }, { "code": "implementation (\"org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.0-native-mt\") {\n version {\n strictly(\"1.6.0-native-mt\")\n }\n }\n", "text": "Wow that actually worked! Thanks!", "username": "111757" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongo DB Realm + Kotlin Coroutines - Cannot find a version of 'org.jetbrains.kotlinx:kotlinx-coroutines-core' that satisfies the version constraints
2022-08-21T11:03:31.978Z
Mongo DB Realm + Kotlin Coroutines - Cannot find a version of &lsquo;org.jetbrains.kotlinx:kotlinx-coroutines-core&rsquo; that satisfies the version constraints
3,296
null
[ "atlas-device-sync" ]
[ { "code": " {\n \"title\": \"Log\",\n \"bsonType\": \"object\",\n \"properties\": {\n \"_id\": {\n \"bsonType\": \"objectId\"\n },\n \"__partition\": {\n \"bsonType\": \"string\"\n },\n \"type\": {\n \"bsonType\": \"int\"\n },\n \"result\": {\n \"bsonType\": \"bool\"\n },\n \"message\": {\n \"bsonType\": \"string\"\n },\n \"authorId\": {\n \"bsonType\": \"string\"\n },\n \"createdOn\": {\n \"bsonType\": \"date\"\n }\n },\n \"required\": [\n \"_id\",\n \"result\",\n \"type\",\n \"message\",\n \"authorId\",\n \"createdOn\"\n ]\n }\n", "text": "Hi there,I’m trying to build an app using Realm Sync feature but I face an issue that i don’t understand.\nIn Realm schema, I define an class with some fields marked as required.\nOn the app side, an object is instantiated.\nWhat i noticed :\nif a mandatory int field is set to 0, a string field is set to “” or a boolean is set to false, at creation time, the matching field are not created in the atlas Document.no big trouble until then, BUT , if you stop the Sync ( change in the schema or in sync config ), then it is not possible to turn it on again cleanly as object won’t get re synchronized.Log shows message complaining about missing required field ( which is true as they’re missing in atlas documents) .here is my class schema definition:an instance of Log object is created with “type” field set to 0 and message set to an empty string.\nHere is what i get in the atlas collection{\"_id\":{\"$oid\":“604b7c401fcf488cfeec9f51”},\"__partition\":“xxx”,“authorId”:“id”,“createdOn”:{\"$date\":“2021-03-12T14:35:44.105Z”},“result”:true}A small precision: if these fields are set to a different value then set back to the wanted one (0, false or “” ), then the field remain present in the atlas document.This look like a bug.Am I missing something in my schema declaration or in Atlas configuration?Thanks in advance.", "username": "bruno_levx" }, { "code": "", "text": "Hi @bruno_levx welcome to the community!Which SDK are you using?Could you share the Object definitions from your app?Thanks.", "username": "Andrew_Morgan" }, { "code": "public class Log: PartitionObject {\n \n @objc public dynamic var type:Int = 0\n @objc public dynamic var result:Int = 0\n @objc public dynamic var objectClass:String = \"\"\n @objc public dynamic var objectId:String = \"\"\n @objc public dynamic var identity:String?\n @objc public dynamic var message:String?\n @objc public dynamic var createdOn:Date = Date()\n \n public override class func primaryKey() -> String {\n return \"_id\"\n }\n \n public override var description: String{\n return \"log\"\n }\n}\npublic protocol KItemStoring {\n var _id:ObjectId {get set}\n var __partition:String? {get set}\n}\n\npublic class PartitionObject:Object, ObjectKeyIdentifiable, KItemStoring {\n @objc public dynamic var _id:ObjectId = ObjectId.generate()\n @objc public dynamic var __partition:String?\n}\n", "text": "Hi Andrew,I don’t think there is anything related to the SDK We’re using => swiftUI SDK 10.7.2.\nThis seems to be purely an issue between realm sync and Atlas Db on the MongodB servers.here is the object definition:and the interfaces :", "username": "bruno_levx" }, { "code": "", "text": "@bruno_levx thanks for the extra details.Can you confirm that the Object in the iOS has the correct data (even after sync?)One way to easily check is using Realm Studio - as described here.", "username": "Andrew_Morgan" }, { "code": "", "text": "Hi andrew,We already did that check when we noticed the issue. Yes, the value is correctly inserted in the object.\nJust the atlas DB doesn’t reflect it.regards,", "username": "bruno_levx" }, { "code": "", "text": "Thanks - that narrows down who I can ask to look deeper into this. I’ll reach out to the translator team", "username": "Andrew_Morgan" }, { "code": "\"\"false", "text": "@bruno_levx Could you please provide an example of how you’re creating one of these Objects where \"\"/false are being mapped to a missing attribute?", "username": "Andrew_Morgan" }, { "code": "public class License: EmbeddedObject {\n\n @objc public dynamic var _id:ObjectId = ObjectId.generate()\n @objc public dynamic var __partition:String?\n @objc public dynamic var isEnabled:Bool = false\n @objc public dynamic var comment:String = \"\"\n @objc public dynamic var extensions:Int = 524287\n @objc public dynamic var maxRegistrationDisplay:Int = 0\n @objc public dynamic var maxDisplayController:Int = 0\n @objc public dynamic var maxCamera:Int = 0\n @objc public dynamic var validUntil:Date?\n\n public convenience required init(partition:String) {\n self.init()\n self.__partition = partition\n }\n}\nLicense(partition: partition)\n_id: 6051cc7091942e269dc03b47\n__partition: \"603a72fd6afac24e45e2be84\"\nextensions: 524287\n@objc public dynamic var maxCamera:Int = 1\n_id: 60527aa8ee5e8115a733b3f4\n__partition: \"603a72fd6afac24e45e2be84\"\nextensions: 524287\nmaxCamera: 1\n{\n \"title\": \"License\",\n \"bsonType\": \"object\",\n \"properties\": {\n \"_id\": {\n \"bsonType\": \"objectId\"\n },\n \"__partition\": {\n \"bsonType\": \"string\"\n },\n \"comment\": {\n \"bsonType\": \"string\"\n },\n \"validUntil\": {\n \"bsonType\": \"date\"\n },\n \"extensions\": {\n \"bsonType\": \"int\"\n },\n \"isEnabled\": {\n \"bsonType\": \"bool\"\n },\n \"maxCamera\": {\n \"bsonType\": \"int\"\n },\n \"maxDisplayController\": {\n \"bsonType\": \"int\"\n },\n \"maxRegistrationDisplay\": {\n \"bsonType\": \"int\"\n }\n },\n \"required\": [\n \"_id\",\n \"comment\",\n \"extensions\",\n \"isEnabled\",\n \"maxCamera\",\n \"maxDisplayController\",\n \"maxRegistrationDisplay\"\n ]\n }", "text": "here is code sample in swift for another classInstantiated like this:here is the document on atlas:As you can see, isEnabled, comment, maxRegistrationDisplay, MaxDisplayController, maxCamera, validUntil are missing in atlasI we change the definition of maxCamera as followed :then the document look like:here is the realm schema on mongoDB realm interface:", "username": "bruno_levx" }, { "code": "Objectinit", "text": "Hi @bruno_levx it appears that there’s an issue when you rely on the default values when creating Objects. Try updating your init to explicitly set the values of all of the attributes to work around that issue.", "username": "Andrew_Morgan" }, { "code": "", "text": "hi andrew,That’s a bit annoying if you’re true. I guess we’re not the only ones who rely on properties default values. in that case there will be incomplete document in atlas db that will have to be manually corrected.we’ll drive a test asap and keep you posted.bruno.", "username": "bruno_levx" }, { "code": "initimport Foundation\nimport RealmSwift\n\npublic class License: EmbeddedObject {\n \n @objc public dynamic var _id:ObjectId = ObjectId.generate()\n @objc public dynamic var __partition:String?\n @objc public dynamic var isEnabled:Bool = true\n @objc public dynamic var comment:String = \"Comment\"\n @objc public dynamic var extensions:Int = 524287\n @objc public dynamic var maxRegistrationDisplay:Int = 0\n @objc public dynamic var maxDisplayController:Int = 0\n @objc public dynamic var maxCamera:Int = 0\n @objc public dynamic var validUntil:Date?\n\n public convenience required init(partition:String, validUntil:Date? = nil) {\n self.init()\n self.__partition = partition\n self.validUntil = validUntil\n self.maxRegistrationDisplay = 0\n self.maxDisplayController = 0\n self.maxCamera = 0\n }\n}\n_id: 60531edafc2b58fd7cd33bc1\n__partition: \"603a72fd6afac24e45e2be84\"\nisEnabled: true\ncomment: \"Comment\"\nextensions: 524287\n", "text": "Hi Andrew,unfortunately we got the same results assigning values explicitely in init:result in atlas :best regards,\nBruno", "username": "bruno_levx" }, { "code": "", "text": "Hi there,Already 7 days since My last post, any updates or test you want we realize ?Bruno,", "username": "bruno_levx" }, { "code": "", "text": "@Ian_Ward am I missing something?", "username": "Andrew_Morgan" }, { "code": "", "text": "@bruno_levx Hi can you please provide a reproduction case and file it here please -Realm is a mobile database: a replacement for Core Data & SQLite - GitHub - realm/realm-swift: Realm is a mobile database: a replacement for Core Data & SQLite", "username": "Ian_Ward" }, { "code": "int = 0bool = falseunsynced_documentsunsynced_documents", "text": "This the same behavior with the dotnet SDK as well. When you leave the defaults like int = 0 or bool = false, the field never propagates to Atlas.Then in my _realm_sync database I start to accumulate documents in the collection unsynced_documents@bruno_levx can you connect directly to your Atlas instance and check your unsynced_documents? The two way sync seems to break as well and I think you will have documents in here.One more note of interest, if you edit the document directly via the web interface, the document will simply disappear from the client. This seems to be because of the document not matching the schema due to missing fields.", "username": "Michael_Phillips" }, { "code": "", "text": "Hi Michael,You probably have unsynced documents because, as you mentioned, you’re modifying them from atlas. As we didn’t do such thing we didn’t face this issue but the root cause is the same what we pinpoint : due to missing fields, atlas documents can never be synced to a realm from atlas.\nWe noticed this when turning off the Sync : we are then unable to turn it on again.I hope you’re not in production phase! otherwise I warmly advise you to not turn off the sync unless you manually add all the missing fields in atlas documents.\nThis means no partition key or permissions changes (sync off/on) nor destructive schema changes (which will leads to the same result =>sync off then on).@Ian_Ward : I still didn’t open the github issue. We were quite overwhelmed past few days and we want to try downgrade the realm cocoa Pod to check if the behavior was already present. (we don’t remember noticing such issue before we update to the last version)regards\nBruno", "username": "bruno_levx" }, { "code": "", "text": "Hi,I’ve made a small C# app to test and i confirm that the behavior is almost the same except than in my test empty tring are correctly synced. Int value zet to 0 are still Missing.i’ll open a file on github right now and paste the link here for those, like Martin, who are intersted in that issue.Regards,\nBruno", "username": "bruno_levx" }, { "code": "", "text": "Here is to the link to the GitHub file :\nMissing mandatory realm fields in atlas when set to “base” value. · Issue #7192 · realm/realm-cocoa (github.com)Regards,\nBruno", "username": "bruno_levx" }, { "code": "", "text": "@Andrew_Morgan\nJust a quick additional question regarding this issue:We tried to use the workaround mentioned above ( removing .modified parameter). This works great but bring another issue.\nif the inserted object refer to an already existing one, then an duplicate objectId exception is raised.Is there any way to prevent realm.add() function to try to also add objects declared in a link.(Maybe we missed something obvious once again…)regards,\nBruno", "username": "bruno_levx" }, { "code": "", "text": "Hey All - just as an update we’ve fixed this behavior on the server and should have this out in the next release, likely end of next week. Thank you for the repro cases-Ian", "username": "Ian_Ward" } ]
Missing mandatory realm fields in atlas
2021-03-12T14:54:07.938Z
Missing mandatory realm fields in atlas
7,364
null
[]
[ { "code": "", "text": "I wanted to chime in again as I have switched from importing the “AWS SDK” package back to using the deprecated 3rd party service.I conducted testing about a month ago to test the difference in speed for simply signing a URL with the AWS service. When importing the AWS SDK package, it would take anywhere from 3-15 seconds for the function to return. Nearly all of this time was spent on the module being loaded which I tested by removing all of the signing function.Now having moved the same signing function to use the 3rd Party Service, the function consistently returns in hundreds of milliseconds or less.If the import of AWS-SDK speed is not fixes, and the AWS 3rd Party Service is discontinued, I will have to move away from realm functions because the speed is too unreliable.", "username": "Tyler_Collins" }, { "code": "aws-sdkgetSignedUrlputObjectaws-sdk", "text": "Hey @Tyler_CollinsI’ve been doing some work converting the functions to the aws-sdk and faced the same issues uploading a raw image via the function, in terms of 20+ seconds for a simple image. I think it’s likely to do with memory or something with the package being loaded in.However… Is there any reason you don’t just use the getSignedUrl function from S3 to generate a presigned URL for a putObject command? That way the user uploads directly so S3 and upon the success you could call another function to save the key and bucket to mongo?Take a look here - How to Generate Signed Amazon S3 URLs in [email protected] @Drew_DiPalma - Moving functions over to the aws-sdk package was pretty painless aside from the issues that @Jason_Tulloch is running into, but I think the presigned URL will help there. I haven’t yet put the code into production as I’m currently traveling. However, I’m definitely less stressed about the move to npm packages now Thanks!", "username": "Adam_Holt" }, { "code": "", "text": "getSignedUrlHi @Adam_Holt apologies ahead of time I’m on mobile.I was using getSignedUrl to get a url and upload the image directly from the browser. I was not sending the image itself to the realm function.This action of just signing the url was still taking upwards of 20 seconds. However to narrow down the time drain I removed all code and only imported the Aws SDK module. Even with this the function times were 5-20 seconds.", "username": "Tyler_Collins" }, { "code": "", "text": "@Tyler_Collins , @Adam_Holt ,Thank you for raising the concerns. I have separated this issue from the image loading issue and let’s continue the discussion on getSignedUrl issue here.The engineering team is aware of this and is working on performance improvements.I will keep updates on this topic as soon as there is more information available.Appreciate your patience with us.Best,\nHenna ", "username": "henna.s" }, { "code": "getSignedUrlPromise", "text": "Just linking this post here, as I’m not seeing any issues with the getSignedUrl function. However, it might be because I’m using getSignedUrlPromise from the AWS S3 SDK. Example is in the post below.Cheers", "username": "Adam_Holt" }, { "code": "", "text": "", "username": "henna.s" } ]
Issues with getSignedUrl AWS S3 function
2022-07-26T15:49:08.891Z
Issues with getSignedUrl AWS S3 function
2,891
null
[ "data-modeling", "crud" ]
[ { "code": "Parent:\n{\nchilds:[ \nsubchilds:[]]\n}\n", "text": "I have a schema of this shapeHow can I create the childs and subschilds if not exists\nand\nadd to subchilds if it exists?\nI tried running array filters from here but i don’t think it is the correct way to do it", "username": "Majd_Khasib" }, { "code": "", "text": "How can I also remove both of them when the subchilds has one item to be deleted?", "username": "Majd_Khasib" }, { "code": "", "text": "Please provide real sample input and resulting documents for all the cases you want.While it is possible for us to create input documents from your schema, it is a very tedious process for us but should be easy for you since you most likely already have some data.", "username": "steevej" }, { "code": "", "text": "Imagine you want to save offers from different supermarkets\nchilds-> supermarket\nsubchilds->offers per supermarketYou save an offer you liked and want to see all saved offers from all supermarkets later.What I am thinking of:\n1-query to check shop existence thenin case shop doesn’t exist:\n1-Create it\n2-Create an offer inside it\ntotal operations:3\nin case shop does exist:\n1-Check if offer exists in it\n2- Update it\ntotal operations:3how to reduce the number of operations to do this? Or what is the best approach?", "username": "Majd_Khasib" } ]
How to create a nested arrays if not exist or add to nested
2022-08-22T03:57:18.281Z
How to create a nested arrays if not exist or add to nested
2,093
null
[ "react-native", "flexible-sync" ]
[ { "code": " const config: Realm.Configuration = {\n sync: {\n error: (e, r) => console.log(e, r),\n user,\n flexible: true,\n initialSubscriptions: {\n update: realm => {\n console.log(realm);\n realm.add(\n realm.objects('user').filtered('account_id', user.id),\n // This is a named subscription, so will replace any existing subscription with the same name\n {name: 'LoggedInUserData'},\n );\n },\n rerunOnStartup: true,\n },\n } as FlexibleSyncConfiguration,\n };\n Realm.open(config)\n .then(e => {\n realmRef.current = e;\n console.log(e);\n })\n .catch(er => console.error(er));\n", "text": "Hello,I am trying out flexible sync for react native and am having some trouble in my initial setup. I have hooked up everything on the Realm UI and have a schema for the user table. The trouble is when I try to write to this realm, I get Cannot write to class user when no flexible sync subscription has been created. Didn’t my initialSubscriptions set up the subscription? Also note that realm.subscriptions is null for some reason.Thanks!", "username": "Luke_Walz" }, { "code": "initialSubscriptionssubs.append( ... )", "text": "Having the same issue: using initialSubscriptions does somehow not avoid the error * Cannot write to class user when no flexible sync subscription has been created*\nI did solve this by adding adding an explicit subs.append( ... ) but in my opinion the initial subscription should suffice. Maybe this is a bug?", "username": "David_Kessler" }, { "code": "update: realm => {\nrealm.add(...)\nupdate: (subs, realm) => {\n subs.add(...)\n", "text": "G’Day @Luke_Walz and @David_Kessler,@Luke_Walz I hope you were able to get past this error. If not, you may need to modify the section of your codeto thisYou can find out more on Bootstraping with Initial Subscriptions.I hope provided information is helpful.Please let me know if you have more questions.Cheers, ", "username": "henna.s" }, { "code": "", "text": "", "username": "henna.s" } ]
Subscriptions object is null on realm flexible sync
2022-06-03T22:53:08.054Z
Subscriptions object is null on realm flexible sync
3,782
null
[ "queries" ]
[ { "code": "$indb.collection.find({_id: {$in: [...arrayOf1000000Items]}})\ndb.collection.find({_id: {$in: [...arrayOf1000000Items]}}).limit(1000000)\nlimit", "text": "Hello community!\nWe’re having a debate on which approach is more performant regarding querying over a unique indexed field using $in operator.\nWhich is more performant?orThe explain plan displays a new LIMIT stage (besides IXSCAN FETCH), but performance difference was very nullable as we tried the comparison over a smaller set of documents.Another question would be:\nWhat would happen if we introduce sorting to the query? Would limit be recommended here?", "username": "Jean-Paul_Saade" }, { "code": "", "text": "The limit will not make a difference because _id has a unique index so you cannot have more elements than what you have in your arrayOf1000000Items. If he limit would be smaller that the array then it is another story.If you sort on _id, it should not make a difference because the field is indexed. If you sort on another field that is not indexed you will experiment much more latency as the sort will be in memory.Passing an array of 1_000_000 elements is probably very slow by it self, I would encourage you to find another way of specifying your query. While it is not advised to do multiple round-trip to the database to implement a use-case, may be this one is an exception. One query to find the 1_000_000’th id and then query with $lt and $gt to get the 1_000_000 documents.The question is what criteria is used to determine the documents to put in the array at first. May be this criteria can be the query.The other question is what ever you do with your 1_000_000 documents, can you do it with the aggregation framework instead?", "username": "steevej" }, { "code": "limit(1_000_000)limit()", "text": "For sure 1_000_000 elements in an array is very bad in terms of performance, what I wanted to know is that using limit(1_000_000) along this filter would cause in slower performance than without it.\nMy given example might be irrealistic, usually our filter arrays would contain a maximum of 20 elements.So long story short, since adding limit() would add an extra step “LIMIT” to the EXPLAIN plan, it nonetheless doesn’t affect performance, but better not use it, right?", "username": "Jean-Paul_Saade" }, { "code": "limit_id$in", "text": "In this case, you don’t need to add a limit because the _id field is guaranteed to be unique. This means you will never have more results than you have in the $in array of elements.", "username": "Doug_Duncan" }, { "code": "", "text": "Very much appreciated, thanks everyone!", "username": "Jean-Paul_Saade" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Best practice on using limit() with $in over indexed field
2022-08-19T10:15:31.895Z
Best practice on using limit() with $in over indexed field
1,766
null
[ "indexes", "atlas-search" ]
[ { "code": "", "text": "Hello,\nDoes Mongo create additional index for $search(autocomplete)?\nIf i have already indexed field lets call it FirstName, and if i would like to create a search index for the field, would mongo use the alerady created index or will it create one more index for the same field for searching purpose?\nIf so are both of the indexes kept in the ram memory on the same machine?", "username": "Bojan_Despotoski" }, { "code": "", "text": "Hi @Bojan_Despotoski, the only index necessary to run $search is an Atlas Search index. You can use one Search index to index as many fields as you’d like using static mappings.When using this, any other MongoDB index is likely unnecessary, as they are used for a different goal.", "username": "Elle_Shwer" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongodb Indexes
2022-08-22T10:00:32.717Z
Mongodb Indexes
1,202
null
[ "crud" ]
[ { "code": "", "text": "Hello, working on the chapter 3 lab in the M001 course where the ask is to add a boolean capital to Albany and NY City New York using the sample_training, zips db.\nMy thinking is, identify the New York state documents with the city ALBANY and add the field using $set.\nI’ve tried this\ndb.zips.updateMany({“city”:“ALBANY”},{“state”:“NY”},{$set: {“capital?”:true}})\nbut get anupdate document requires atomic operators. I think it doesn’t like that I’m searching by 2 items but I can’t seem to figure out how to look for both city and state and then update the resulting set.\ncan anyone point me in the right direction?\nthank you,\npaul", "username": "paul_carson" }, { "code": "db.zips.updateMany(\n {\n \"city\": \"ALBANY\",\n \"state\": \"NY\"\n },\n {\n \"$set\": {\n \"capital?\": true\n }\n }\n)\n", "text": "db.zips.updateMany({“city”:“ALBANY”},{“state”:“NY”},{$set: {“capital?”:true}})This is incorrect syntax. You want to have both of your matches in a single set of curly brackets.Try", "username": "Doug_Duncan" }, { "code": "", "text": "ahhhh!!! got it, thank you so much for the rookie course correct!!!", "username": "paul_carson" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Update document requires atomic operators
2022-08-22T13:11:51.829Z
Update document requires atomic operators
32,383
null
[]
[ { "code": "", "text": "Hello everyone,First of all, I hope that everyone is staying safe and healthy!I have a couple of quick questions that relate to several (mock) applications that I have deployed to Heroku for use in my portfolio (I’m also asking on behalf of a potential client that I plan on approaching for a one off job soon…):I have successfully used mLab as an add on in the past and I have also just migrated one of my Heroku hosted applications over to Atlas (realizing that mLab will no longer be available as a service) with no problem via this migration guide: Guide to Migrating a Sandbox Heroku Add-on to Atlas. I plan to migrate my remaining test applications that need to connect to Atlas via this guide as well, so no problem there, especially being that these are just mock applications.My first question is this: what is the best way to connect to Atlas from Heroku going forward? There is no (as far as I can see from searching online) real clear or easy way to retrieve an IP address from Heroku so that it can be whitelisted for a direct connection to Atlas.Will there be another service available for sandbox applications to connect to Atlas in the future? I’m also wondering how to connect a production ready Heroku application to Atlas (or maybe via another paid service if need be…) , as again, getting an IP from Heroku to whitelist seams to be quick tricky. Maybe I’ve missed something…not sure…Thanks in advance to anyone who can help!!Cheers and be well Juliette", "username": "Juliette_Tworsey" }, { "code": "", "text": "Hi @Juliette_Tworsey,\nThanks for your questions.I believe that in order to facilitate your Heroku Ips you need to use a private space for your application:Heroku Private Spaces | Heroku Dev CenterThis will result in a set of defined outdoing ips or a range that you can define in Atlas whitelistPlease note that Atlas adds a set of security features along side ip whitelist:Let me know if you have any further questions.Best regards\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Hi @Pavel_Duchovny,Thank you for responding and thank you for the link.I see that Private Spaces are currently available only in Heroku Enterprise, which is likely out of reach for the client that I am thinking about approaching. I noticed that there is another add on Dyno available, but it is also kind of pricy.mLab is still available for the time being. I’m wondering if going this route (with a shared/paid cluster for a production level application), along with a migration to Atlas might work for now. Would this be a stable approach to take?Thanks again:-)Juliette", "username": "Juliette_Tworsey" }, { "code": "", "text": "Hi @Juliette_Tworsey,You can always whitelist 0.0.0.0/0 ip to allow all traffic although its not recommended.I am not familiar with the way mLab solves this problem, can you elaborate?Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "provider.regionheroku regions --json\n", "text": "Hi @Pavel_Duchovny,I am not familiar with the way mLab solves this problemI’m not all together familiar with how mLab solves this problem either. I’m still trying to accrue enough information to figure this out. I do know that in the migration guide (for migrating a sandbox add-on) it says to whitelist all IP addresses with 0.0.0.0 (not something that I would do for a non-sandbox project &/or cluster).I found this via another Guide to Migrating to Atlas:Note that mLab’s Sandbox and Shared plan deployments are always accessible by all IP addresses. To match the firewall settings of your mLab Sandbox or Shared plan deployment you can whitelist all IP addresses (0.0.0.0/0) on your Atlas cluster.…so it appears that even with a cheap(ish) shared plan there is no way around the whitelisting all IPs issue.…but, then there is this:However, we recommend whitelisting only the addresses that require access. To match the firewall settings of your mLab Dedicated plan deployment on Atlas you can review your current mLab firewall settings on the “Networking” tab in mLab’s UI.…and:If you’re connecting to MongoDB Atlas from a Heroku app, it’s most likely that you need to whitelist 0.0.0.0/0 (the range of all IP addresses) unless your app is in Heroku Private Spaces. Heroku IP addresses are, in general, highly dynamic. As such most mLab and Atlas-hosted deployments used by Heroku apps allow all IP addresses.…and from this link:Heroku Common Runtime Dynos use a subset of the IP range of the underlying AWS EC2 instances.The underlying AWS region for your app can be found as the provider.region field in the output of this CLI command:…and also from the same link directly above:The IPs in use by Heroku at any given time are highly dynamic, meaning that the published ranges may cover other IP addresses not currently in use by Heroku. This means that it is often not desirable to open up your firewall to the whole of the AWS region for security reasons.…and lastly:For apps in the Common Runtime a better approach would be to use an add-on to provide a static outbound IP address Add-ons - Heroku Elements or to rely on secure communication via TLS.I’m going to have to read up a bit more and look into the network service add-ons listed that Heroku provides to see if I can find a solution. I did try using Fixie Socks for one of my own sandbox applications, but that didn’t seem to resolve the issue:-)Cheers:-)Juliette", "username": "Juliette_Tworsey" }, { "code": "", "text": "Hi @Juliette_Tworsey,Thanks for the detailed response. Please remember that Atlas provide by default a user and password + role base autherzation with SSL.Moreover, you can add x509 user Authentication to strengthen your authentication.Therefore, without whitelist you can still secure your cluster on an appropriate level.Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Hi @Pavel_Duchovny,Thank you for taking the time to respond again and thank you for the tips!This might be a good time for me to re-take (or at least practice in my local VirtualBox VMs) M310: MongoDB Security &/or M103: Basic Cluster Administration . Actually, I need to refresh my memory with all that I learned via both courses.Fast forward 5 minutes… I have just spotted a new course that begins today (lucky me!):A300: Atlas SecurityI have just signed up.I also plan on adding x509 user Authentication via the instructions in the link that you have provided.Thanks again!!", "username": "Juliette_Tworsey" }, { "code": "", "text": "Any findings worth sharing? I’m running into the same issue. Feel a little uneasy about whitelisting ALL IP addresses. I’ve tried to use the Fixie addon, which routes outgoing Heroku traffic through a Proxy with a static IP. Unfortunately, this didn’t work and I’m stuck.", "username": "George_N" }, { "code": "", "text": "@George_N,Please see if any of the options https://www.mongodb.com/how-to/use-atlas-on-heroku/#configuring-heroku-ip-addresses-in-atlas", "username": "Pavel_Duchovny" }, { "code": "", "text": "any of the oI used that article a reference. The fixie add-on did not work.", "username": "George_N" }, { "code": "", "text": "Hey @Pavel_Duchovny Thanks for giving a part of you time here! im a new developer, from colombia sorry for my english X: okk.according to this recommendation, so do you have any docs to implements only my own ip for access to my ip?, now im using whitelist 0.0.0.0/0 in my personal project but i dont want this.", "username": "Gamers_here" }, { "code": "", "text": "Hi @Gamers_here ,You need to find your public Ip and add them to atlas access list:Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Please see if any of the options https://www.mongodb.com/how-to/use-atlas-on-heroku/#configuring-heroku-ip-addresses-in-atlas That link 404s. Updated link here: How to Deploy MongoDB on Heroku | MongoDB", "username": "yo_adrienne" }, { "code": "", "text": "Thanks, sorry for replying to late! Salute from colombia.", "username": "Gamers_here" }, { "code": "", "text": "I have another question,I Want only to access to my Backend fron my frontend, this is posibble? … my frontend app is hosted in netlify. its like i only need that my frontend have access to my data. do you have any url blogpost to read about this ??", "username": "Gamers_here" }, { "code": "", "text": "@Gamers_here ,Maybe its best for you to use a realm sdk and realm backend to power your netlify app\nhttps://www.mongodb.com/developer/quickstart/realm-web-sdk/Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Hi everyone,I am trying to connect my Heroku app to MongoDB Atlas via pymongo and I have so far adopted the “whitelist all” solution. Now I want to move my app to production and I feel uncomfortable with this solution. I have tried with proxy servers solutions as provided by Fixie and Fixie Socks (which are available as Heroku add-ons), but I haven’t really made progress. Has anybody made progress?", "username": "Davide_Ferri1" } ]
Connect Atlas to Heroku hosted app
2020-07-27T00:57:49.455Z
Connect Atlas to Heroku hosted app
29,147
null
[ "sharding" ]
[ { "code": "mongodmongosmongodmongosmongomongos", "text": "What are mongod and mongos instances? I know that mongod and mongos can be used in sharding and replication. mongo and mongos creates instances on nodes.", "username": "Master_Selcuk" }, { "code": "", "text": "Does this thread here on the forums help answer your question?", "username": "Doug_Duncan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
What are mongod and mongos Instances
2022-08-22T11:22:16.777Z
What are mongod and mongos Instances
1,306
null
[ "replication", "java", "atlas-cluster", "serverless", "spring-data-odm" ]
[ { "code": "", "text": "Hello,\nI’m using spring boot but I’m connected with mongo driver.\nI’m having the following error:\nnested exception is com.mongodb.MongoConfigurationException: A TXT record is only permitted to contain the keys [authsource, replicaset], but the TXT record for ‘mongo-serverless-dev.wyw1i.mongodb.net’ contains the keys [loadbalanced, authsource ]I’m using java 11.String connection:\nmongodb+srv://accurta-dev:@mongo-serverless-dev.wyw1i.mongodb.net/?retryWrites=true&w=majorityDependency version:\n\norg.mongodb\nmongodb-driver-sync\n4.7.1\nCode:\nString connectionString = “mongodb+srv://acerta-dev:@mongo-serverless-dev.wyw1i.mongodb.net/test?retryWrites=true&w=majority”;I need urgent help, I don’t know what else to do!!!", "username": "Wagner_Wagner" }, { "code": "", "text": "I suspect the issue is that you’re using an older driver version than you think you are. The driver added support for loadbalanced in the TXT record back in the 4.3.0 release, as you can see from this commit.Please double check your mongodb-driver-core version to make sure it’s actually >= 4.3.0.Regards,\nJeff", "username": "Jeffrey_Yemin" }, { "code": "", "text": "\n@Jeffrey_Yemin what else do i need besides this dependency?? there is nothing in the documentation, only referring to it", "username": "Wagner_Wagner" }, { "code": "mvn dependency:treemongodb-driver-coremongodb-driver-corebsonmongodb-driver-sync", "text": "Generally that’s enough but I suspect you have a conflict somewhere in your configuration. Can you run something like mvn dependency:tree to see what version of mongodb-driver-core is being pulled in? If you can’t figure it out, try taking an explicit dependency on both mongodb-driver-core and bson at version 4.7.1 (these are both transitive dependencies of mongodb-driver-sync, so ordinarily they are pulled in automatically at the same version).", "username": "Jeffrey_Yemin" }, { "code": "", "text": "OK. I configured and connection is OK!!\nBut, I’m getting this error when I send something to the cluster:\nConnectionString connectionString = new ConnectionString(“mongodb+srv://acerta-dev:@mongo-serverless-dev.wyw1i.mongodb.net/?retryWrites=true&w=majority”);\nMongoClientSettings settings = MongoClientSettings.builder()\n.applyConnectionString(connectionString)\n.serverApi(ServerApi.builder()\n.version(ServerApiVersion.V1)\n.build())\n.build();\nMongoClient mongoClient = MongoClients.create(settings);\nMongoDatabase database = mongoClient.getDatabase(“test”);\ndatabase.createCollection(“example”);Error:\norg.mongodb.driver.cluster | SRV resolution completed with hosts: [mongo-serverless-dev-lb.wyw1i.mongodb.net:27017]\ncom.mongodb.MongoSocketWriteException: Exception sending message\nat com.mongodb.internal.connection.InternalStreamConnection.translateWriteException(InternalStreamConnection.java:684)Caused by: javax.net.ssl.SSLHandshakeException: extension (5) should not be presented in certificate_requestI`m not work with certificated", "username": "Wagner_Wagner" }, { "code": "", "text": "Try googling that error. You probably need to update your JVM. See this SO post, for one explanation.", "username": "Jeffrey_Yemin" }, { "code": "", "text": "I`im using GCP App Engine, not existe update JVM!!\nWhy resolve this problem??", "username": "Wagner_Wagner" }, { "code": "", "text": "I’m not sure I understand. Are you saying that it’s not possible for you to update the JVM?There are some other idea here.", "username": "Jeffrey_Yemin" }, { "code": "", "text": "Exactly. With Google App Engine, it is not possible to change the JVM.\nalready tried this, but it doesn’t work either:\nmvn spring-boot:run -Dspring-boot.run.profiles=local -Djdk.tls.client.protocols=TLSv1.2", "username": "Wagner_Wagner" }, { "code": "java -version", "text": "What does java -version print? I’m surprised that Google would not be keeping up with the latest JDK patches. The JDK bug report lists the back ports: [JDK-8236039] JSSE Client does not accept status_request extension in CertificateRequest messages for TLS 1.3 - Java Bug System", "username": "Jeffrey_Yemin" }, { "code": "", "text": "Java verion is:\nopenjdk version “11.0.2” 2019-01-15OpenJDK Runtime Environment 18.9 (build 11.0.2+9)OpenJDK 64-Bit Server VM 18.9 (build 11.0.2+9, mixed mode)Is there any alternative to solve this? I really need to connect to atlas and it is being very problematic for java", "username": "Wagner_Wagner" }, { "code": "", "text": "It’s very concerning in general if Google App Engine isn’t keeping applications up to date with JDK patch releases, as this presents large security risks. According to their docs, they should be. I suggest checking with Google App Engine support to see what’s going on. I will look for workarounds if you’re not able to get anywhere on that front.Regards,\nJeff", "username": "Jeffrey_Yemin" }, { "code": "", "text": "What would be the JVM version to end this problem?Please if you have any alternative for java 11 let me know, otherwise I will have to leave the atlas product", "username": "Wagner_Wagner" }, { "code": "", "text": "Please try Java 17. That’s also an LTS release and the JDK bug was fixed before it went GA, so you shouldn’t need the latest patch release (though it’s still recommended).", "username": "Jeffrey_Yemin" } ]
Connection Fail on Mongo Atlas Serverless
2022-08-19T19:30:45.679Z
Connection Fail on Mongo Atlas Serverless
4,460
null
[ "installation" ]
[ { "code": "", "text": "", "username": "Platon_workaccount" }, { "code": "", "text": "It’s strange that in 2 years there are no updates on this area.", "username": "Platon_workaccount" } ]
Request to update the installation documentation for Fedora
2020-03-23T11:43:56.192Z
Request to update the installation documentation for Fedora
2,990
null
[ "node-js", "crud" ]
[ { "code": "{\nCompany : \"Alpha\",\nProducts : \n[\n{ Name : \"Car\",\nSalesRecord : [ <Obj>]},\n{Name: \"Plane\",\nSalesRecord : [<Obj>]}\n]\n}\nexistsnot existsdb.collection.findOneAndUpdate(\n{Company:'Alpha'},\n [\n {$project:{\n \"Company\": 1,\n \"Products\":{\n $switch :{\n branches:[\n {\n case:{$eq:['$Name','Car']},\n then: {$push:{'$Products.$.SalesRecord ': [salesRecordObj]}}\n },\n {\n case:{$ne:['$Name','Car']},\n then: {$push:{ Products: [NewProductdocument]}}\n }\n ]\n }}}})\n", "text": "Hello Mongo Community,I have the following documents format (simplifed to easy to read)How I want to do isI have read that $set:{Products.Name} will satisfiy “not exist” condition but it will overwrite the existing document at “exist” condition.The sample query I have tried is as follows but the problem with this code is $push is not allowed in the $project State.", "username": "Kyaw_Zayar_Tun" }, { "code": "db.collection(name).bulkWrite(\n[\n { /* first scenario is Both Company and the Product exists but update newSales Object with same date`*/\n updateOne:{\n filter :{Company:chart[0].Company,'Products.Name' :chart[0].Products[0].Name},\n update : { $set:{\n \"Products.$.SalesRecord.$[elem]\": chart[0].Products[0].SalesRecord[0]\n }\n },\n arrayFilters: [{\"elem.Date\": chart[0].Products[0].SalesRecord[0].Date }]\n }\n },\n { /* second scenario is Both Company and the Products exists but no Sales object with same date exists */\n updateOne:{\n filter: {Company:chart[i].Company,'Products.Name': chart[i].Products[0].Name,'Products.SalesRecord.Date':{$ne:chart[0].Products[0].SalesRecord[0].Date}},\n update: {$push:{\n \"Products.$.SalesRecord\": chart[i].Products[0].SalesRecord[0]\n } } }\n },\n {\n /* Third scenario is Company exists but Product not exists */\n updateOne:{\n filter: {Company:chart[i].Company,'Products.Name':{$ne:chart[i].Products[0].Name}},\n update: {$push:{'Products': chart[i].Products[0]}}\n }\n },\n {\n /* fourth is if Company not exists */\n updateOne:{\n filter: {Company:chart[i].Company},\n update: {$setOnInsert:{'Products': chart[i].Products}},\n upsert: true\n }\n },\n],\n/* it need to be ordered for not to be conflicted*/\n{ordered:true})", "text": "I have managed to do the above by filtering each specific scenario and perform an update", "username": "Kyaw_Zayar_Tun" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Different update Operation to nested documents of a document based on subdocument
2022-08-21T08:47:01.953Z
Different update Operation to nested documents of a document based on subdocument
2,153
null
[ "queries", "data-modeling", "mongoose-odm" ]
[ { "code": "findOne", "text": "When you use findOne to return a single document, but the specified field in the filter does not exist (for example because of a typo), MongoDB returns an arbitrary document. This can cause some really bad bugs and security issues.How do I mitigate this?", "username": "Florian_Walther" }, { "code": "db.collection('collection').findOne({'abcd':'abcd'})", "text": "it returns null if there is no field or value in the collection,db.collection('collection').findOne({'abcd':'abcd'})what is your query syntax?", "username": "Kyaw_Zayar_Tun" }, { "code": "findOneconst googleSignupToken = await GoogleSignupToken.findOne({ token: token }).exec();\n{ tokenId: token }tokenId", "text": "I use Mongoose’s findOne. I thought it behaves the same as the native one, is that not the case?When I accidentally wrote { tokenId: token } as the filter (tokenId doesn’t exist as a key), it returned the first document in the collection. Which in this case is a devastating bug. Does the native findOne behave differently?", "username": "Florian_Walther" }, { "code": "", "text": "It seems like yes, it return null for findOne() and empty array with find().toArray()", "username": "Kyaw_Zayar_Tun" }, { "code": "", "text": "Thank you. Then I have to find out why Mongoose behaves this way!", "username": "Florian_Walther" }, { "code": "strictQuery", "text": "I found the answer on Stackoverflow. To disable this default behavior, we have to disable an option called strictQuery:", "username": "Florian_Walther" }, { "code": "strictQuerystrictQuery", "text": "Hi @Florian_Walther,Thank you for finding and sharing the solution for this default Mongoose behaviour, which is definitely a very unexpected deviation from the normal MongoDB driver behaviour.It looks like the strictQuery behaviour changed in Mongoose 6 and this has caused some confusion.The Mongoose maintainer has created an issue to have strictQuery off by default in Mongoose 7 (as it was prior to Mongoose 6), so you may want to watch & upvote Make `strictQuery` `false` by default again · Issue #11861 · Automattic/mongoose · GitHub.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Thank you. I upvoted it \nYea I noticed that many people were complaining about this change and even the maintainer admitted that it was a mistake. I find it quite dangerous but I’m happy that there is a way to disable it.", "username": "Florian_Walther" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to avoid accidentally returning an arbitrary document when using findOne with a non-existing field in Mongoose?
2022-08-21T10:18:41.860Z
How to avoid accidentally returning an arbitrary document when using findOne with a non-existing field in Mongoose?
7,443
null
[ "aggregation", "atlas-search" ]
[ { "code": "db.example.aggregate([\n {\n \"$search\": {\n \"index\": \"default\",\n \"compound\": {\n \"must\": [\n ...some conditions\n ],\n \"filter\": [\n ...some clauses\n ]\n }\n }\n },\n {\n \"$project\": {\n ...some fields\n }\n },\n {\n \"$skip\": 0\n },\n {\n \"$limit\": 10\n },\n {\n \"$sort\": {\n \"score\": 1\n }\n }\n])\ndb.example.aggregate([\n {\n \"$search\": {\n \"index\": \"default\",\n \"compound\": {\n \"must\": [\n ...some conditions\n ],\n \"filter\": [\n ...some clauses\n ]\n }\n }\n },\n {\n \"$match\": [...some other conditions]\n },\n {\n \"$project\": {\n ...some fields\n }\n },\n {\n \"$skip\": 0\n },\n {\n \"$limit\": 10\n },\n {\n \"$sort\": {\n \"score\": 1\n }\n }\n])\n", "text": "I want to know the complete request life cycle of request when we query mongodb atlas for full text search. I know that when we run some aggregation in atlas it goes to mongot and then to lucene. we can consider below query for reference :I also read in docs that for full text search we have to put all our search criteria inside either : $search or $serachMeta.\nWhat if i use $match after $search in atlas like so. :what changes would come in request liefcycle of query compared to without $match clause and what would be the performance impact if any ?", "username": "pawan_saxena1" }, { "code": "$search$search$match", "text": "Hi @pawan_saxena1Regarding performance, you might want to check out these pages:About your question:What if i use $match after $search in atlas like so.it’s discussed in the above link about Query Performance:Using a $match aggregation pipeline stage after a $search stage can drastically slow down query results. If possible, design your $search query so that all necessary filtering occurs in the $search stage to remove the need for a $match stage.Note that as the product is constantly being improved, this may or may not be relevant in the future. Please check out the links above for more details.Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Performance impact of using $match after $search in mongodb atlas
2022-08-19T20:51:46.505Z
Performance impact of using $match after $search in mongodb atlas
2,477
null
[ "performance", "storage" ]
[ { "code": "{\n \"application threads page read from disk to cache count\" : 327399071,\n \"application threads page read from disk to cache time (usecs)\" : 30961681878,\n \"application threads page write from cache to disk count\" : 3957777,\n \"application threads page write from cache to disk time (usecs)\" : 879859200,\n \"bytes belonging to page images in the cache\" : 5962097784,\n \"bytes belonging to the cache overflow table in the cache\" : 182,\n \"bytes currently in the cache\" : 6297119974,\n \"bytes not belonging to page images in the cache\" : 335022189,\n \"bytes read into cache\" : NumberLong(\"10697633891413\"),\n \"bytes written from cache\" : 96058542714,\n \"cache overflow cursor application thread wait time (usecs)\" : 0,\n \"cache overflow cursor internal thread wait time (usecs)\" : 0,\n \"cache overflow score\" : 0,\n \"cache overflow table entries\" : 0,\n \"cache overflow table insert calls\" : 0,\n \"cache overflow table max on-disk size\" : 0,\n \"cache overflow table on-disk size\" : 0,\n \"cache overflow table remove calls\" : 0,\n \"checkpoint blocked page eviction\" : 55,\n \"eviction calls to get a page\" : 336695779,\n \"eviction calls to get a page found queue empty\" : 9160503,\n \"eviction calls to get a page found queue empty after locking\" : 4009923,\n \"eviction currently operating in aggressive mode\" : 0,\n \"eviction empty score\" : 0,\n \"eviction passes of a file\" : 1666156000,\n \"eviction server candidate queue empty when topping up\" : 1468129,\n \"eviction server candidate queue not empty when topping up\" : 2532493,\n \"eviction server evicting pages\" : 0,\n \"eviction server slept, because we did not make progress with eviction\" : 14927017,\n \"eviction server unable to reach eviction goal\" : 0,\n \"eviction state\" : 64,\n \"eviction walk target pages histogram - 0-9\" : 1660912885,\n \"eviction walk target pages histogram - 10-31\" : 1167311,\n \"eviction walk target pages histogram - 128 and higher\" : 0,\n \"eviction walk target pages histogram - 32-63\" : 606182,\n \"eviction walk target pages histogram - 64-128\" : 3469622,\n \"eviction walks abandoned\" : 4168376,\n \"eviction walks gave up because they restarted their walk twice\" : 1645756428,\n \"eviction walks gave up because they saw too many pages and found no candidates\" : 8456934,\n \"eviction walks gave up because they saw too many pages and found too few candidates\" : 17760,\n \"eviction walks reached end of tree\" : 3300161895,\n \"eviction walks started from root of tree\" : 1657176952,\n \"eviction walks started from saved location in tree\" : 8979048,\n \"eviction worker thread active\" : 4,\n \"eviction worker thread created\" : 0,\n \"eviction worker thread evicting pages\" : 323658140,\n \"eviction worker thread removed\" : 0,\n \"eviction worker thread stable number\" : 0,\n \"failed eviction of pages that exceeded the in-memory maximum count\" : 6607,\n \"failed eviction of pages that exceeded the in-memory maximum time (usecs)\" : 5902,\n \"files with active eviction walks\" : 0,\n \"files with new eviction walks started\" : 1654405467,\n \"force re-tuning of eviction workers once in a while\" : 0,\n \"hazard pointer blocked page eviction\" : 659884,\n \"hazard pointer check calls\" : 327250926,\n \"hazard pointer check entries walked\" : 9220828506,\n \"hazard pointer maximum array length\" : 42,\n \"in-memory page passed criteria to be split\" : 19000,\n \"in-memory page splits\" : 9509,\n \"internal pages evicted\" : 1055038,\n \"internal pages split during eviction\" : 114,\n \"leaf pages split during eviction\" : 43773,\n \"maximum bytes configured\" : 7874805760,\n \"maximum page size at eviction\" : 26877,\n \"modified pages evicted\" : 1021176,\n \"modified pages evicted by application threads\" : 0,\n \"operations timed out waiting for space in cache\" : 0,\n \"overflow pages read into cache\" : 0,\n \"page split during eviction deepened the tree\" : 0,\n \"page written requiring cache overflow records\" : 0,\n \"pages currently held in the cache\" : 216173,\n \"pages evicted because they exceeded the in-memory maximum count\" : 22800,\n \"pages evicted because they exceeded the in-memory maximum time (usecs)\" : 6083073,\n \"pages evicted because they had chains of deleted items count\" : 3886695,\n \"pages evicted because they had chains of deleted items time (usecs)\" : 3303170,\n \"pages evicted by application threads\" : 60,\n \"pages queued for eviction\" : 398428510,\n \"pages queued for urgent eviction\" : 11843046,\n \"pages queued for urgent eviction during walk\" : 19765,\n \"pages read into cache\" : 327416121,\n \"pages read into cache after truncate\" : 5061,\n \"pages read into cache after truncate in prepare state\" : 0,\n \"pages read into cache requiring cache overflow entries\" : 0,\n \"pages read into cache requiring cache overflow for checkpoint\" : 0,\n \"pages read into cache skipping older cache overflow entries\" : 0,\n \"pages read into cache with skipped cache overflow entries needed later\" : 0,\n \"pages read into cache with skipped cache overflow entries needed later by checkpoint\" : 0,\n \"pages requested from the cache\" : 20182969265,\n \"pages seen by eviction walk\" : 2277363640,\n \"pages selected for eviction unable to be evicted\" : 686461,\n \"pages walked for eviction\" : 80676236195,\n \"pages written from cache\" : 3998879,\n \"pages written requiring in-memory restoration\" : 20088,\n \"percentage overhead\" : 8,\n \"tracked bytes belonging to internal pages in the cache\" : 163573705,\n \"tracked bytes belonging to leaf pages in the cache\" : 6133546269,\n \"tracked dirty bytes in the cache\" : 17379297,\n \"tracked dirty pages in the cache\" : 52,\n \"unmodified pages evicted\" : 324927301\n}\nRegards\nSS", "text": "Hi Community,Can someone help and analyze below WiredTIger.cache stats and suggest if there wiredTiger cache size need to increase ?I have checked below parameter , does they indicate wiredTiger cache need to increase ? Is there any other parameter need to review to make a decision for wiredTiger cache increase ?\n``|\n“maximum bytes configured” : 7874805760,\n“bytes currently in the cache” : 6297119974,\n“tracked dirty bytes in the cache” : 17379297,\n“pages written from cache” : 3998879,\n“pages read into cache” : 327416121,", "username": "satvant_sandhu" }, { "code": "", "text": "Hi @satvant_sandhu welcome to the community!Generally the WiredTiger cache is set to ~50% of RAM by default, and this default was chosen since it works well in most cases, and also leave about half of the RAM for OS use (other processes, filesystem cache, etc.). In short, there’s typically no need to change this value unless your needs is very, very specific, and also after a deep troubleshooting by a MongoDB engineer.It’s generally more beneficial to increase the amount of RAM in the machine, instead of changing the WiredTiger cache size.Having said that, what issue are you seeing? Are you seeing slow queries, extremely busy disk, or similar performance issues?Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "Hi Kevinadi,Thanks for the update .We have customer complaint for slow queries and we want to ensure our current workingSet is fitting in WiredTiger cache . We can increase RAM size but need help to under stand the wiredTIger cache stats . Which stats in wiredTiger cache we need to look to conclude if current WiredTigerCacheSize is sufficient to hold working set.Would you please able to analyze the WiredTIger cache stats give in the query and recommend do we need to increase the RAM or not ? Also what are the main parameter in cache stats which we should look and what is the threshold value per recommendation .Thanks in advanced .Regards\nSatvant SinghWe are", "username": "satvant_sandhu" }, { "code": "dirtyuseddirtyusediostatdirtymongostatused", "text": "Hi @satvant_sandhuI’m afraid analysing WiredTiger performance and other deep performance-related issues is not as simple as checking some statistics. The reason why MongoDB records thousands of statistics each second in full-time diagnostic data capture is because all of them have to be examined holistically in relation to one another. There is no single metric that can tell you what’s happening in the system. Note that these metrics are just one of the important tools to troubleshoot a deployment, but it’s not the only tool. In a typical investigation, FTDC data, disk data, OS data, and all relevant information are collected and examined thoroughly, so doing that on a public forum is extra challenging Having said that, we can sort of see if a deployment is overworked by checking some things:Ideally you don’t want to see high dirty in mongostat (high used is ok – and healthy, and you want this to be about 80% – but this also depends on the use case), low delay in IO operation, and no slow queries in the logs.Best regards\nKevin", "username": "kevinadi" } ]
WiredTiger cache status (Working Set)
2022-08-18T04:32:21.395Z
WiredTiger cache status (Working Set)
3,262
null
[ "node-js", "mongoose-odm" ]
[ { "code": "mongoose.connect(mongodb://username:password@ip:port/dbname,{\n\n useNewUrlParser:true,\n\n useUnifiedTopology:true,\n\n});\napp.use(express.json()); \napp.post(`${api}/products`,(req,res)=>{\n\n const product = new Product({\n\n name: req.body.name,\n\n image: req.body.image,\n\n countInStock: req.body.countInStock\n\n });\n\n product.save()\n\n .then((createdProduct)=>{\n\n res.status(201).json(createdProduct);\n\n })\n\n .catch((err)=>{\n\n res.status(500).json({\n\n error: err,\n\n success:false\n\n });\n\n });\n\n});\n", "text": "Hi,I am new to mongoDB and i have installed a 6.0 community server on my ubuntu server.\nI have followed the tutorials on the site, i have enabled the access from other server , i have create a super user , a collection and an admin user for that collection with roles of adminDB and readWrite.my problem is that when i am trying to sent a POST request i am getting an error 13 Unuthorised.i am using mongoose and nodejs to connect (following code with changed loggin data)and the following code to POST from nodeIf i use MongoDBCompass I can add dataany idea how on where is the error ?", "username": "kordou_N_A" }, { "code": "const productSchema = mongoose.Schema({\n\n name: String,\n\n image: String,\n\n countInStock: Number\n\n});\n\nconst Product = mongoose.model('Product',productSchema);\n", "text": "and my model is", "username": "kordou_N_A" } ]
Error 13 OK 0 Unauthorised
2022-08-17T15:14:54.045Z
Error 13 OK 0 Unauthorised
1,538
null
[]
[ { "code": "", "text": "I have a problem with installing mongodb on ubuntu 22.04 on hetzner I have such an error E: Unable to locate package mongodb-org", "username": "OnlyBis_Design" }, { "code": "", "text": "Hello @OnlyBis_Design and welcome to the MongoDB Community forums. According to the installation docs Ubuntu 22.04 is not officially supported by MongoDB at this time.There are JIRA tickets in place to get 22.04 support added for x86 and ARM architectures. You can vote on those tickets to hopefully get higher prioritization and quicker resolution.There have been numerous threads on the forums here. This thread has comments that some users were able to get MongoDB installed on 22.04, but note that there are no guarantees.", "username": "Doug_Duncan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to install mongodb on hetzner
2022-08-21T19:29:10.218Z
How to install mongodb on hetzner
2,385
null
[ "node-js", "serverless", "realm-web", "flexible-sync" ]
[ { "code": "", "text": "Hi,I currently have a React app using the realm-web SDK to access data on Atlas with the serverless Functions.\nThis realm app does NOT have SYNC enabled. (I call it realm-web)\nEverything works fine.I made an Electron version of this React app that is using the realm-node SDK to access/sync the data from Atlas.\nThis Electron app uses a different realm app that has SYNC enabled. (I call it realm-sync).\nEverything works fine.\nIt is accessing the same data as the web app on Atlas.My question is: Can’t I just copy all the serverless Functions that are on the realm-web app to the realm-sync app and use ONLY the realm-sync app for the web and electron apps?The web app would use the realm-web SDK and the Electron app would use the realm-node SDK.\nI’ve tried that but, I always get “InvalidParameter Error/db: string required” when I call a serverless Function with the web SDK on the realm-sync app that has SYNC enabled.\nIs that not suppose to be working?\nThanks for your help.", "username": "benoitw" }, { "code": "", "text": "For those wondering, it works, you can call server less functions on a realm app that has SYNC enabled.\nIn my case the problem was a misconfiguration of constants.", "username": "benoitw" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can I call serverless Functions with the realm-web SDK on a realm app that has SYNC enabled?
2022-08-01T09:39:38.955Z
Can I call serverless Functions with the realm-web SDK on a realm app that has SYNC enabled?
2,169
null
[ "aggregation" ]
[ { "code": "Transaction{\n _id: ObjectId(\"5f1284078a7dd8a6b9140c97\")\n company: ObjectId(\"5f127ce1e354f37df698f55e\")\n person: ObjectId(\"5f1284078a7dd8a6b9140c95\")\n actionTransaction: \"Purchase\"\n}\nCompanies{\n _id: ObjectId(\"5f127ce1e354f37df698f55e\")\n name: \"MongoDB Inc\"\n phoneNumber: \"+1111111111\"\n members: [\n {\n _id: ObjectId(\"5f1284078a7dd8a6b9140c95\")\n title: \"CEO\"\n role: \"ADMIN\"\n },\n {\n _id: ObjectId(\"5f1284078a7ff8a6b9300d93\")\n title: \"CHAIRMAN\"\n role: \"Others\"\n }\n ]\n}\n{\n _id: ObjectId(\"5f1284078a7dd8a6b9140c95\")\n name: \"Foo Bar\"\n gender: \"Male\"\n}\ndb.Transaction.aggregate(\n [\n { $lookup:\n {\n from: \"companies\",\n localField: \"company\",\n foreignField: \"_id\",\n as: \"company\"\n }\n },\n { $lookup:\n {\n from: \"people\",\n localField: \"person\",\n foreignField: \"_id\",\n as: \"person\"\n }\n },\n ]\n)\nArray[\n _id: ObjectId(\"5f1284078a7dd8a6b9140c97\")\n company: [\n {\n _id: ObjectId(\"5f127ce1e354f37df698f55e\")\n name: \"MongoDB Inc\"\n phoneNumber: \"+1111111111\"\n members: [\n {\n _id: ObjectId(\"5f1284078a7dd8a6b9140c95\")\n title: \"CEO\"\n role: \"ADMIN\"\n },\n {\n _id: ObjectId(\"5f1284078a7ff8a6b9300d93\")\n title: \"CHAIRMAN\"\n role: \"Others\"\n }\n ]\n }\n ],\n person: [\n {\n _id: ObjectId(\"5f1284078a7dd8a6b9140c95\")\n name: \"Foo Bar\"\n gender: \"Male\"\n }\n ]\n]\nArrayPersonTransaction CollectionroletitleCompanyPerson fieldObject// Transaction\n{\n company : { COMPANY_DATA },\n transactions: [\n member: {\n _id: ObjectId(\"5f1284078a7dd8a6b9140c95\")\n name: \"Foo Bar\"\n gender: \"Male\"\n title: \"CEO\"\n role: \"ADMIN\"\n },\n actionTransaction: \"Purchase\"\n\n ]\n\n}\n", "text": "I have Transaction collection like this:I have Companies like this :and I have ‘People’ collection like this:when I do query like this:I got result an Array like this :you can see each relations there the result on that fields are Array,\nand I really one also on that Person field on Transaction Collection gonna get the role and title which from Company but I have no idea to filter it into that Person fieldthis is the illustration the result I want just an Object:is it possible doing this ? any idea for doing it to possibble??", "username": "Virtual_Database" }, { "code": "db.transactions.aggregate([\n {\n $lookup: {\n from: 'companies',\n localField: 'company',\n foreignField: '_id',\n as: 'company'\n }\n },\n {\n $lookup: {\n from: 'people',\n localField: 'person',\n foreignField: '_id',\n as: 'person'\n }\n },\n // the following two $unwind stages\n // used to transform array into object\n {\n $unwind: '$person',\n },\n {\n $unwind: '$company',\n },\n {\n // find a company member, that matches\n // to a given person\n $addFields: {\n matchedMember: {\n $arrayElemAt: [{\n $filter: {\n input: '$company.members',\n cond: {\n $eq: ['$$this._id', '$person._id'],\n }\n },\n }, 0]\n }\n }\n },\n {\n $addFields: {\n person: {\n $mergeObjects: ['$person', '$matchedMember'],\n }\n }\n },\n {\n // group all transactions of members,\n // that belong to a certain company\n $group: {\n _id: '$company._id',\n company: {\n $first: {\n // select fields, that you want to have\n // it the output\n _id: '$company.id',\n name: '$company.name',\n phoneNumber: '$company.phoneNumber'\n },\n },\n transactions: {\n $push: {\n member: '$person',\n actionTransaction: '$actionTransaction',\n }\n }\n }\n },\n {\n // cleanup\n $project: {\n _id: false,\n }\n }\n]).pretty();\ndb.transactions.insertOne({\n _id: ObjectId(\"5f1284078a7dd8a6b9140c97\"),\n company: {\n _id: ObjectId(\"5f127ce1e354f37df698f55e\"),\n name: \"MongoDB Inc\",\n phoneNumber: \"+1111111111\",\n },\n member: {\n // this member._id may be used to join data \n // from 'people' collection\n _id: ObjectId(\"5f1284078a7dd8a6b9140c95\"),\n title: \"CEO\",\n role: \"ADMIN\"\n },\n actionTransaction: \"Purchase\"\n});\ndb.transactions.insertOne({\n _id: ObjectId(\"5f1284078a7dd8a6b9140c97\"),\n company: {\n _id: ObjectId(\"5f127ce1e354f37df698f55e\"),\n name: \"MongoDB Inc\",\n phoneNumber: \"+1111111111\",\n },\n person: {\n _id: ObjectId(\"5f1284078a7dd8a6b9140c95\"),\n name: \"Foo Bar\",\n gender: \"Male\",\n title: \"CEO\",\n role: \"ADMIN\"\n },\n actionTransaction: \"Purchase\"\n});\n", "text": "Considering the output, it looks like you want to have an array of transactions, that were made by some company members, grouped by company.I have extended your aggregation and now it gives the exact output as you’ve described:Have a look at $mergeObjects, $filter pipeline operators and $group, $unwind pipeline stages for more details.Keep in mind, if you do not limit your query, the aggregation will tend to be slower and slower with each document, inserted into ‘transactions’ collection. Alternatively, you may want to denormalize your transaction documents like this:Or even like this:This way you can use simpler queries and take advantage of indexes. It will make your queries much more faster, but you will have to take care about the data consistency across collections: if, say, person.name changes in one collection, you will need to update it in another one (immediately or with background process).", "username": "slava" }, { "code": "", "text": "yess, if I update data on organization or person, I should have to update both :(, that is why I make the collection transaction like that, it take more logic also to update on both collection , so if my collection collection like that, it will be very slow ? any option beside on your options above ??", "username": "Virtual_Database" }, { "code": "", "text": "also the People collection is use on everywhere on my mongo, , if user want to update, I should update many collections ?", "username": "Virtual_Database" }, { "code": "", "text": "btw the query u answer is not include role, and status of person, I want on transaction.person include their each roles, and status ", "username": "Virtual_Database" }, { "code": "", "text": "is that any option for this query ? because the title and role not recorded there ?", "username": "Virtual_Database" }, { "code": "db.your_collection.stats().wiredTiger.cursor\n{\n ...\n \"insert calls\" : 2,\n \"modify calls\" : 0,\n \"remove calls\" : 0,\n \"update calls\" : 0,\n \"search calls\" : 6,\n ...\n}\ndb.players.insertMany([\n { _id: 'P1', name: 'Bob', age: 22, country: 'Australia' },\n { _id: 'P2', name: 'Bill', age: 26, country: 'New Zeland' },\n]);\n\ndb.teams.insertMany([\n {\n _id: 'T1',\n name: 'team A',\n players: [\n { _id: 'P1', name: 'Bob' },\n ]\n }\n]);\n\ndb.coaches.insertMany([\n { _id: 'C1', coachesPlayers: ['P1', 'P2']}\n]);\n", "text": "Hello, @Virtual_Database!yess, if I update data on organization or person, I should have to update both :(, that is why I make the collection transaction like that, it take more logic also to update on both collection , so if my collection collection like that, it will be very slow ? For the most cases, a collection is much more often being read than written. That means, that having denormalized data model will provide good benefits, even though, you will have to write more code to take care of the data consistency across collections. So, usually, it is worth it To get the number of reads/writes - execute this in the shell:It will output this object (I removed and reordered some props for simplicity):Roughly:\nNumber of reads = search calls.\nNumber of writes = insert+modify+update+remove calls.also the People collection is use on everywhere on my mongo, , if user want to update, I should update many collections ?Let’s assume, your have 3 collections:Notice, that documents from ‘players’ collection are used in ‘teams’ and in ‘coaches’ collections.\n‘coaches’ collection uses normalized model (only _id as reference to document from other collection is used);\n‘teams’ collection uses de-normalized model (beside immutable _id there is also player’s name included);The benefit of having ‘name’ prop beside ‘_id’ in ‘teams’ collection is that you may not need to join players to teams to get player names, which will improve performance. But you will need to make sure that all collections would have same value for player’s name.Now, a bit about consistency maintenance:But, it worth to day, that with MongoDB, you will have to manually take care of the data consistency, if you have 100% normalized data. For example, if you delete any player from ‘players’ collection, you will have to update 3 collections, because they use a bit or more of player’s data!any option beside on your options above ??Yes, you can take the free course on the MongoDB’s aggregation framework and try to extend and optimize your pipeline.", "username": "slava" }, { "code": "", "text": "", "username": "Stennie_X" } ]
How to query aggregate to get result only object instead array?
2020-07-18T07:54:23.140Z
How to query aggregate to get result only object instead array?
36,335
https://www.mongodb.com/…2_2_1024x576.png
[]
[ { "code": "", "text": "\nScreenshot (17)1920×1080 163 KB\n\ni get this error when i try to connect to the cluster in mongoshell.\ni downloaded database tools for windows by changing path in system environment as mentioned in mongoDB installation and still having this error", "username": "asmaa_ali" }, { "code": "", "text": "You are at mongo prompt\nPlease exit and run the command from os prompt (#,$,C:\\ etc) as per your os", "username": "Ramachandra_Tummala" }, { "code": "", "text": "\nScreenshot (19)1920×1080 132 KB\n\nit shows this message", "username": "asmaa_ali" }, { "code": "", "text": "although i did the instructions and changed the system variables by coping the bin path", "username": "asmaa_ali" }, { "code": "mongomongoshmongomongodumpmongoexportmongoshmongodumpmongoexport", "text": "Hi @asmaa_ali and welcome to the MongoDB community forums. The mongo tool is not distributed with MongoDB version 6.0. If you recently installed/upgraded to MongoDB 6.0 this is why you get the message.In your original screen shot you had connected to a database using the mongosh tool (this replaces the older mongo tool). However you were then trying to run commands such as mongodump and mongoexport inside of the shell and that won’t work. These commands are meant to be run from your terminal/command prompt. The mongosh is an interactive shell to run queries against the data in your database. mongodump, mongoexport, etc are other tools that allow you copy your data from one server to another.If you haven’t done so already, you might want to take the M001: MongoDB Basics course.", "username": "Doug_Duncan" }, { "code": "", "text": "should i download earlier version of mongoDB then try to connect to atlas through cmd right?", "username": "asmaa_ali" }, { "code": "", "text": "You already have mongosh\nPlease use it to connect to your cluster\nmongosh “your connect string”\nFor utilities like mongoexport,mongodump run the command from your operating system prompt", "username": "Ramachandra_Tummala" }, { "code": "", "text": "\nScreenshot (20)1920×166 43 KB\n\ni have this error in cmd", "username": "asmaa_ali" }, { "code": "", "text": "\nScreenshot (21)1920×177 6.42 KB\n\ni still have the same error", "username": "asmaa_ali" }, { "code": "mongosh mongodb+srv://sandbox.b4nsufi.mongodb.net/myFristDatabase --username m001-studentmongoshmongosh mongodb+srv://....mongoshmongoimportmongoexportmongosh", "text": "You are trying to run the mongosh mongodb+srv://sandbox.b4nsufi.mongodb.net/myFristDatabase --username m001-student while you are already in a mongosh session. You can’t do that.After you open your terminal window (it looks like you’re on Windows using a command prompt), you want to type in your mongosh mongodb+srv://.... command from there. mongosh is used to interact with your database server and run queries. You will also run mongoimport or mongoexport from the operating system command prompt, not from inside of mongosh.", "username": "Doug_Duncan" }, { "code": "", "text": "thank you so much,it worked", "username": "asmaa_ali" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Connecting atlas with mongoShell or cmd
2022-08-19T10:42:16.294Z
Connecting atlas with mongoShell or cmd
1,572
null
[ "queries", "node-js", "python", "atlas-triggers" ]
[ { "code": "", "text": "I have asked this question on StackOverFlow if anyone knows why the $nin is not working.I have provided a working sample in python, and the equivalent in NodeJS not working.Thanks a lot in advance for any help or check if it’s a real bug in Atlas triggers.", "username": "Tommy_Deshairs" }, { "code": "toArray()", "text": "Someone solved it too on SO.\nI should simply use the toArray() method after the cursor to be sure to not return a promise but the actual object.", "username": "Tommy_Deshairs" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
$nin seems to not be working in Atlas trigger
2022-08-20T17:09:52.322Z
$nin seems to not be working in Atlas trigger
1,963
null
[ "aggregation", "node-js" ]
[ { "code": "", "text": "Hello !I have asked this question and i can’t see what the problem is because there is no error in the logs. However, the code is not executed because nothing changes and in my database.Thanks a lot in advance !", "username": "Tommy_Deshairs" }, { "code": "", "text": "Someone answered my question ", "username": "Tommy_Deshairs" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Aggregation pipeline in Atlas Trigger not working
2022-08-20T08:56:06.936Z
Aggregation pipeline in Atlas Trigger not working
1,196
null
[]
[ { "code": "", "text": "Currently, I have a requirement requiring me to develop a to-do list structure that includes projects, tasks, and subtasks (in hierarchical order) and each of these tasks can have a different set of users who may engage in separate conversations, similar to the comment structure we have in Google Sheets. What would be a good database schema - should I have projects, tasks, and subtasks as separate collections, or should I have them separated by type as a single collection?", "username": "Vivek_Prajapati" }, { "code": "", "text": "I think there is no single answer to this,", "username": "Kyaw_Zayar_Tun" }, { "code": "", "text": "Thanks for your response. Here is a bit more detail about my use case. Projects contain tasks, and those tasks contain subtasks, and each of them has user activity so if one user checks one subtask, then the other user should be able to see that subtask as completed in real-time. This can occur at every level, including the project, the task, and the subtask. It is also possible at each level to have a full notes section that would function similarly to comments, allowing each user to leave a comment and receive a response.", "username": "Vivek_Prajapati" } ]
Database design
2022-08-21T06:58:36.114Z
Database design
934
null
[ "queries", "dot-net" ]
[ { "code": "", "text": "Hello,How do I write C# code to retrieve only items where SID=100 for the input below?\nAlso, the C# output should preserve the input format.\nThis is for MongoDB 4.[ { “_id”: “123456”, “Continent”: { “Country”: [ [ “US”, { “State”: [ [ 100, { “Product”: “Corn”, “SID”: 100 } ], [ 200, { “Product”: “Maze”, “SID”: 200 } ], [ 100, { “Product”: “Corn-HB”, “SID”: 100 } ] ], } ] ] } } ]So the retrieved output should look like this:\n[ { “_id”: “123456”, “Continent”: { “Country”: [ [ “US”, { “State”: [ [ 100, { “Product”: “Corn”, “SID”: 100 } ], [ 100, { “Product”: “Corn-HB”, “SID”: 100 } ] ], } ] ] } } ]", "username": "Sym_Don" }, { "code": "", "text": "This topic was automatically closed 182 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
C# for Mongo Nested inner query
2022-08-20T13:17:39.140Z
C# for Mongo Nested inner query
1,313
null
[ "dot-net", "java", "atlas-device-sync", "transactions", "android" ]
[ { "code": "08-20 23:07:22.724 20309 20309 E AndroidRuntime: FATAL EXCEPTION: main\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: Process: uk.co.lucyscarter.housemovingassistant, PID: 20309\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: android.runtime.JavaProxyThrowable: System.Reflection.TargetInvocationException: Arg_TargetInvocationException\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: ---> Realms.Exceptions.RealmInvalidTransactionException: Cannot modify managed objects outside of a write transaction.\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Realms.NativeException.ThrowIfNecessary(Func`2 overrider)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Realms.ObjectHandle.SetValue(String propertyName, Metadata metadata, RealmValue& value, Realm realm)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Realms.RealmObjectBase.SetValue(String propertyName, RealmValue val)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at HouseMovingAssistant.Models.MovingTask.set_Name(String value)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at HouseMovingAssistant.Views.EditTaskPage.<InitializeComponent>typedBindingsM__1(EditMovingTaskPageViewModel , String )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.Internals.TypedBinding`2[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].ApplyCore(Object sourceObject, BindableObject target, BindableProperty property, Boolean fromTarget)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.Internals.TypedBinding`2[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].Apply(Boolean fromTarget)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.BindableObject.SetValueActual(BindableProperty property, BindablePropertyContext context, Object value, Boolean currentlyApplying, SetValueFlags attributes, Boolean silent)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.BindableObject.SetValueCore(BindableProperty property, Object value, SetValueFlags attributes, SetValuePrivateFlags privateAttributes)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.Internals.TypedBinding`2[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].ApplyCore(Object sourceObject, BindableObject target, BindableProperty property, Boolean fromTarget)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.Internals.TypedBinding`2[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].Apply(Boolean fromTarget)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.Internals.TypedBinding`2.PropertyChangedProxy[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].<OnPropertyChanged>b__16_0()\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.DispatcherExtensions.DispatchIfRequired(IDispatcher dispatcher, Action action)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.Internals.TypedBinding`2.PropertyChangedProxy[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].OnPropertyChanged(Object sender, PropertyChangedEventArgs e)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.BindingExpression.WeakPropertyChangedProxy.OnPropertyChanged(Object sender, PropertyChangedEventArgs e)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at CommunityToolkit.Mvvm.ComponentModel.ObservableObject.OnPropertyChanged(PropertyChangedEventArgs )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel.set_MovingTask(MovingTask value)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at System.Reflection.RuntimeMethodInfo.Invoke(Object , BindingFlags , Binder , Object[] , CultureInfo )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: Exception_EndOfInnerExceptionStack\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at System.Reflection.RuntimeMethodInfo.Invoke(Object , BindingFlags , Binder , Object[] , CultureInfo )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at System.Reflection.RuntimePropertyInfo.SetValue(Object , Object , BindingFlags , Binder , Object[] , CultureInfo )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at System.Reflection.PropertyInfo.SetValue(Object , Object , Object[] )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at System.Reflection.PropertyInfo.SetValue(Object , Object )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.ShellContent.ApplyQueryAttributes(Object content, ShellRouteParameters query, ShellRouteParameters oldQuery)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.ShellContent.ApplyQueryAttributes(Object content, ShellRouteParameters query, ShellRouteParameters oldQuery)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.ShellContent.OnQueryAttributesPropertyChanged(BindableObject bindable, Object oldValue, Object newValue)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.BindableObject.SetValueActual(BindableProperty property, BindablePropertyContext context, Object value, Boolean currentlyApplying, SetValueFlags attributes, Boolean silent)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.BindableObject.SetValueCore(BindableProperty property, Object value, SetValueFlags attributes, SetValuePrivateFlags privateAttributes)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.BindableObject.SetValue(BindableProperty property, Object value, Boolean fromStyle, Boolean checkAccess)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.BindableObject.SetValue(BindableProperty property, Object value)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.ShellNavigationManager.ApplyQueryAttributes(Element element, ShellRouteParameters query, Boolean isLastItem, Boolean isPopping)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.ShellSection.GetOrCreateFromRoute(String route, ShellRouteParameters queryData, IServiceProvider services, Boolean isLast, Boolean isPopping)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.ShellSection.GoToAsync(ShellNavigationRequest request, ShellRouteParameters queryData, IServiceProvider services, Nullable`1 animate, Boolean isRelativePopping)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Microsoft.Maui.Controls.ShellNavigationManager.GoToAsync(ShellNavigationParameters shellNavigationParameters, ShellNavigationRequest navigationRequest)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at HouseMovingAssistant.ViewModels.MovingTasksPageViewModel.EditTask(MovingTask task)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at CommunityToolkit.Mvvm.Input.AsyncRelayCommand.AwaitAndThrowIfFailed(Task )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at System.Threading.Tasks.Task.<>c.<ThrowAsync>b__128_0(Object )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Android.App.SyncContext.<>c__DisplayClass2_0.<Post>b__0()\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Java.Lang.Thread.RunnableImplementor.Run()\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Java.Lang.IRunnableInvoker.n_Run(IntPtr , IntPtr )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at Android.Runtime.JNINativeWrapper.Wrap_JniMarshal_PP_V(_JniMarshal_PP_V , IntPtr , IntPtr )\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at mono.java.lang.RunnableImplementor.n_run(Native Method)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at mono.java.lang.RunnableImplementor.run(RunnableImplementor.java:30)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at android.os.Handler.handleCallback(Handler.java:938)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at android.os.Handler.dispatchMessage(Handler.java:99)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at android.os.Looper.loop(Looper.java:247)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at android.app.ActivityThread.main(ActivityThread.java:8676)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at java.lang.reflect.Method.invoke(Native Method)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:602)\n08-20 23:07:22.724 20309 20309 E AndroidRuntime: at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:1130)\n08-20 23:07:22.728 20309 20309 W monodroid-assembly: Shared library 'liblog' not loaded, p/invoke '__android_log_print' may fail\n08-20 23:07:22.728 20309 20309 I MonoDroid: UNHANDLED EXCEPTION:\n08-20 23:07:22.733 20309 20309 I MonoDroid: Android.Runtime.JavaProxyThrowable: Exception_WasThrown, Android.Runtime.JavaProxyThrowable\n08-20 23:07:22.733 20309 20309 I MonoDroid:\n08-20 23:07:22.733 20309 20309 I MonoDroid: --- End of managed Android.Runtime.JavaProxyThrowable stack trace ---\n08-20 23:07:22.733 20309 20309 I MonoDroid: android.runtime.JavaProxyThrowable: System.Reflection.TargetInvocationException: Arg_TargetInvocationException\n08-20 23:07:22.733 20309 20309 I MonoDroid: ---> Realms.Exceptions.RealmInvalidTransactionException: Cannot modify managed objects outside of a write transaction.\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Realms.NativeException.ThrowIfNecessary(Func`2 overrider)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Realms.ObjectHandle.SetValue(String propertyName, Metadata metadata, RealmValue& value, Realm realm)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Realms.RealmObjectBase.SetValue(String propertyName, RealmValue val)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at HouseMovingAssistant.Models.MovingTask.set_Name(String value)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at HouseMovingAssistant.Views.EditTaskPage.<InitializeComponent>typedBindingsM__1(EditMovingTaskPageViewModel , String )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.Internals.TypedBinding`2[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].ApplyCore(Object sourceObject, BindableObject target, BindableProperty property, Boolean fromTarget)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.Internals.TypedBinding`2[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].Apply(Boolean fromTarget)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.BindableObject.SetValueActual(BindableProperty property, BindablePropertyContext context, Object value, Boolean currentlyApplying, SetValueFlags attributes, Boolean silent)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.BindableObject.SetValueCore(BindableProperty property, Object value, SetValueFlags attributes, SetValuePrivateFlags privateAttributes)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.Internals.TypedBinding`2[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].ApplyCore(Object sourceObject, BindableObject target, BindableProperty property, Boolean fromTarget)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.Internals.TypedBinding`2[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].Apply(Boolean fromTarget)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.Internals.TypedBinding`2.PropertyChangedProxy[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].<OnPropertyChanged>b__16_0()\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.DispatcherExtensions.DispatchIfRequired(IDispatcher dispatcher, Action action)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.Internals.TypedBinding`2.PropertyChangedProxy[[HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel, HouseMovingAssistant, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null],[System.String, System.Private.CoreLib, Version=6.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e]].OnPropertyChanged(Object sender, PropertyChangedEventArgs e)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.BindingExpression.WeakPropertyChangedProxy.OnPropertyChanged(Object sender, PropertyChangedEventArgs e)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at CommunityToolkit.Mvvm.ComponentModel.ObservableObject.OnPropertyChanged(PropertyChangedEventArgs )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at HouseMovingAssistant.ViewModels.EditMovingTaskPageViewModel.set_MovingTask(MovingTask value)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at System.Reflection.RuntimeMethodInfo.Invoke(Object , BindingFlags , Binder , Object[] , CultureInfo )\n08-20 23:07:22.733 20309 20309 I MonoDroid: Exception_EndOfInnerExceptionStack\n08-20 23:07:22.733 20309 20309 I MonoDroid: at System.Reflection.RuntimeMethodInfo.Invoke(Object , BindingFlags , Binder , Object[] , CultureInfo )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at System.Reflection.RuntimePropertyInfo.SetValue(Object , Object , BindingFlags , Binder , Object[] , CultureInfo )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at System.Reflection.PropertyInfo.SetValue(Object , Object , Object[] )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at System.Reflection.PropertyInfo.SetValue(Object , Object )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.ShellContent.ApplyQueryAttributes(Object content, ShellRouteParameters query, ShellRouteParameters oldQuery)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.ShellContent.ApplyQueryAttributes(Object content, ShellRouteParameters query, ShellRouteParameters oldQuery)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.ShellContent.OnQueryAttributesPropertyChanged(BindableObject bindable, Object oldValue, Object newValue)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.BindableObject.SetValueActual(BindableProperty property, BindablePropertyContext context, Object value, Boolean currentlyApplying, SetValueFlags attributes, Boolean silent)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.BindableObject.SetValueCore(BindableProperty property, Object value, SetValueFlags attributes, SetValuePrivateFlags privateAttributes)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.BindableObject.SetValue(BindableProperty property, Object value, Boolean fromStyle, Boolean checkAccess)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.BindableObject.SetValue(BindableProperty property, Object value)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.ShellNavigationManager.ApplyQueryAttributes(Element element, ShellRouteParameters query, Boolean isLastItem, Boolean isPopping)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.ShellSection.GetOrCreateFromRoute(String route, ShellRouteParameters queryData, IServiceProvider services, Boolean isLast, Boolean isPopping)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.ShellSection.GoToAsync(ShellNavigationRequest request, ShellRouteParameters queryData, IServiceProvider services, Nullable`1 animate, Boolean isRelativePopping)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Microsoft.Maui.Controls.ShellNavigationManager.GoToAsync(ShellNavigationParameters shellNavigationParameters, ShellNavigationRequest navigationRequest)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at HouseMovingAssistant.ViewModels.MovingTasksPageViewModel.EditTask(MovingTask task)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at CommunityToolkit.Mvvm.Input.AsyncRelayCommand.AwaitAndThrowIfFailed(Task )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at System.Threading.Tasks.Task.<>c.<ThrowAsync>b__128_0(Object )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Android.App.SyncContext.<>c__DisplayClass2_0.<Post>b__0()\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Java.Lang.Thread.RunnableImplementor.Run()\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Java.Lang.IRunnableInvoker.n_Run(IntPtr , IntPtr )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at Android.Runtime.JNINativeWrapper.Wrap_JniMarshal_PP_V(_JniMarshal_PP_V , IntPtr , IntPtr )\n08-20 23:07:22.733 20309 20309 I MonoDroid: at mono.java.lang.RunnableImplementor.n_run(Native Method)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at mono.java.lang.RunnableImplementor.run(RunnableImplementor.java:30)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at android.os.Handler.handleCallback(Handler.java:938)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at android.os.Handler.dispatchMessage(Handler.java:99)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at android.os.Looper.loop(Looper.java:247)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at android.app.ActivityThread.main(ActivityThread.java:8676)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at java.lang.reflect.Method.invoke(Native Method)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:602)\n08-20 23:07:22.733 20309 20309 I MonoDroid: at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:1130)\n08-20 23:07:22.733 20309 20309 I MonoDroid:\n08-20 23:07:22.733 20309 20309 I MonoDroid: --- End of managed Android.Runtime.JavaProxyThrowable stack trace ---\n", "text": "I have an MVVM MAUI app that is working fine when running in Visual Studio in debug mode. If I build for release and deploy and run, it crashes when a change in the UI causes MVVM to tell sync to send back a change. The error is “Cannot modify managed objects outside of a write transaction.”. I know what that message means but here it is when Realm code is being notified of a data change in the UI via MVVM so it is not my own code that is changing data without a transaction.To test this theory, I ran the “House Moving Assistant” sample app as demo’ed by @Luce_Carter at the .Net MAUI conference and it has the same problem when I build it for release. Again it works fine when running in Visual Studio in debug mode. In this sample app, I can add tasks but when I tap one to update its status the app crashes and log cat shows the above error. I’ll include part of the log below.I see the same on a Samsung phone (Android 12) and a Samsung tablet (Android 11).John.", "username": "John_Atkins" }, { "code": "", "text": "sorry about posting big message with logcat info. I can see now that was stupid because it’s makes replies difficult. I’m new here and just read about the option to hide parts of a post. If I could edit the original post I’d hide the logcat. If I could delete it and re-post, I’d do that but delete keeps failing.", "username": "John_Atkins" }, { "code": "", "text": "No worries, I’ll see if a moderator can edit the post for you, but in the meantime, the most likely reason for this is using compiled bindings in release mode. Realm doesn’t support seamless data binding with compiled bindings because there’s no way to detect that a change is coming from the binding engine. We’re exploring several approaches to support it but don’t have anything ready yet. The workaround is to disable compiled bindings either globally or at least those that are bound to realm objects.", "username": "nirinchev" }, { "code": "", "text": "Thank you for the quick reply. A quick test does appear to show compiled bindings were the problem. It is a shame to switch them off because they help performance. When I saw the docs show you can switch it off with:[XamlCompilation (XamlCompilationOptions.Skip)]\npublic partial class MyPage : ContentPage\n{\n…\n}I thought I could switch it off for the pages where I use Realm, but I get a build error if there is more than one of those.\nI can definitely live with this until there is a better solution.\nJohn.", "username": "John_Atkins" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MAUI App crashes when built in Release mode
2022-08-20T22:21:34.521Z
MAUI App crashes when built in Release mode
5,195
null
[ "compass" ]
[ { "code": "", "text": "Hello,I’m trying to setup SSH Tunnel in MongoDB Compass in order to connect to MongoDB located on my server. I successfully made a connection to it without SSH tunnel and by using SSH with password authentication. When trying to connect using SSH with Identity File, Compass shows error All configured authentication methods failed and the connection isn’t established. Connecting normally to the server using SSH works without issues.I’m using MongoDB Compass 1.32.6 on Pop!_OS (Ubuntu) 22.04. The server uses Ubuntu Server 20.04.What can I do to solve this issue?\nRalph", "username": "RCRalph" }, { "code": "PubkeyAcceptedKeyTypes ssh-ed25519,ssh-rsa,rsa-sha2-256,rsa-sha2-512", "text": "Fixed it, just had to add\nPubkeyAcceptedKeyTypes ssh-ed25519,ssh-rsa,rsa-sha2-256,rsa-sha2-512\nto /etc/ssh/sshd_configHope this helps somebody.", "username": "RCRalph" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Connecting to MongoDB via SSH Tunnel works when using password, but doesn't when using identify file on Ubuntu
2022-08-20T21:08:57.167Z
Connecting to MongoDB via SSH Tunnel works when using password, but doesn&rsquo;t when using identify file on Ubuntu
5,076
null
[ "aggregation" ]
[ { "code": "AppointmentsEventsprovider{\n _id: ObjectId('62fd3e631d732145ed86adff'),\n imported: false,\n provider: ObjectId('62b62342654ae12263478489'),\n status: \"SCHEDULED\",\n summary: \"Test appointment with client\",\n description: \"This is a test description for the new booking type\",\n location: \"4211 N Winfield Scott Plaza, Scottsdale, AZ 85251, USA\",\n startTime: 2022-08-17T04:30:00.000+00:00,\n endTime: 2022-08-17T04:45:00.000+00:00,\n createdAt: 2022-08-17T19:15:47.903+00:00,\n updatedAt: 2022-08-17T19:15:47.903+00:00,\n type:\"external\"\n}\n[\n {\n _id: ObjectId('62feaa0dca4ba048fa24dd45'),\n summary: \"Shampoo with Sandra Rodriguez\",\n description: \"Please make sure to arrive at least 20 minutes before the...\",\n location: \"4211 N Winfield Scott Plaza, Scottsdale, AZ 85251, USA\",\n creator: ObjectId('62b62342654ae12263478489'),\n startTime: 2022-08-18T17:00:00.000+00:00,\n endTime: 2022-08-18T17:15:00.000+00:00,\n type: \"external\"\n },\n {\n id: ObjectId('62fe983aca4ba048fa24dd43'),\n summary: \"Haircut with Derrick Jones\",\n description: \"Meet with client 15 minutes after last scheduled appointment\",\n location: \"4211 N Winfield Scott Plaza, Scottsdale, AZ 85251, USA\",\n creator: ObjectId('62b62342654ae12263478489'),\n startTime: 2022-08-18T15:00:00.000+00:00,\n endTime: 2022-08-18T15:30:00.000+00:00,\n type: \"external\"\n }\n]\nAppointment[{\n $match: {\n provider: ObjectId('62b62342654ae12263478489')\n }\n}, {\n $lookup: {\n from: 'event',\n localField: 'provider',\n foreignField: 'creator',\n as: 'Bookings'\n }\n}]\n", "text": "I am working through a small example of my own based on the M121 course.I have a simple Appointments and Events collection and would like join and return all documents that are associated with a specific provider.Appointment collection:Event collection:Below I have provided my aggregation but it’s only returning 1 document from Appointment collection and not 3 documents (1 appointment, and 2 event docs).Is this the correct way to approach this problem? Thank you for any suggestions.", "username": "Khari_T" }, { "code": "from: 'event',", "text": "Your aggregation is correct.You only shared 1 document from Appointment so I assume you only have one with the matched provider.The 2 documents from the Event will not be 2 top level documents such as the one from Appointment but 2 sub-documents with the array Bookings from the Appointment document.The one thing I notice that might be wrong is that you present your 2 collections with an uppercase first letter such asAppointment collection:andEvent collection:but in your $lookup you refer to Event in lowercase as infrom: 'event',Collection names are case sensitive. The names Event and event do not refer to the same collection.", "username": "steevej" }, { "code": "bookingsAppointment", "text": "@steevej Thanks for pointing out the case. sensitivity, I totally missed that. After fixing, I do see a bookings property on the Appointment collection however it is empty.Now I realize that maybe the $lookup is not the solution for the problem I’m trying to solve. I simply want to just have the combined results from a simple find() query in both collections.The reason I want to do this on the database layer instead of inside the application is because I want to properly apply .skip() and .limit() to the resulting list of documents.Any thoughts on this? Thanks a lot for you time.", "username": "Khari_T" }, { "code": "$unionWith", "text": "So as it turns out, what I really needed was to perform a UNION rather than a JOIN.That being said MongoDB’s $unionWith operation solves my issue.For any future readers with a situation similar to mine above, check out this link.", "username": "Khari_T" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to $lookup when foreign and local fields are ObjectId?
2022-08-18T23:49:02.047Z
How to $lookup when foreign and local fields are ObjectId?
3,332
null
[ "aggregation" ]
[ { "code": "[{\n $match: {\n email: {\n $ne: null\n },\n current_landing: 'DASHBOARD'\n }\n}, {\n $lookup: {\n from: 'profile_recommendation_info',\n localField: '_id',\n foreignField: 'profile_id',\n as: 'result'\n }\n}, {\n $sort: {\n 'result.created_date': 1\n }\n}, {\n $addFields: {\n oldest_profile_recommendation_info_data: {\n $arrayElemAt: [\n '$result',\n 0\n ]\n }\n }\n}, {\n $set: {\n oldest_profile_recommendation_info_id: '$oldest_profile_recommendation_info_data._id'\n }\n}, {\n $unset: 'oldest_profile_recommendation_info_data'\n}]\n", "text": "below is my my mongo aggregation pipelineAs seen in aove pipeline i am setting oldest_profile_recommendation_info_id with some values.I want to persist them in my data.But unable to do so", "username": "Sanjay_Naik" }, { "code": "$out$merge", "text": "Have a look at the $out and $merge pipeline stages. Both of these will allow you to save the results from your aggregation command.", "username": "Doug_Duncan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to make mongo aggregation outcome persist
2022-08-20T11:54:45.148Z
How to make mongo aggregation outcome persist
1,335
null
[ "data-modeling" ]
[ { "code": "[\n{id: 1, name: 'abc', icon: 'icon_1'},\n{id: 2, name: 'def', icon: 'icon_2'},\n{id: 3, name: 'ghi', icon: 'icon_3'},\n...\n{id: 126, name: 'xyz', icon: 'icon_364'},\n]\n{_id: ObjectId('12345'), key: 'checkboxes_list', values: [my_checkbox_options_list]}\n{_id: ObjectId('6789'), key: 'countries', values: [my_countries_list]}\n{_id: ObjectId('5985'), key: 'usertypes', values: [my_usertypes_list]}\ncollection.find({key: 'countries'})", "text": "I have an array of objects like below as an input to a list of checkboxes,Like this, I have a few more arrays too. For countries, cities, user types, … all these are going to be constant values.When it comes to storing & reading these static arrays in MongoDB, which of the following is the correct way with better performance.Create a collection for each array & insert each object in the array as a document. In this case, to get the list of options for checkboxes, I have to read all 126 documents.Create a single collection named “Global” and insert each document for each static array.In this case, to read the list of countries, I just need to read only one document using collection.find({key: 'countries'})?Which of the approach is correct? How you are handling these constant values lists with Mongodb? For the user types list, there is a maximum of 5 records. Do I really need to create a separate collection to store only 5 records/documents?In SQL, yes we do create a separate table for User type. But is it really good for MongoDB/NoSQL?Thanks.", "username": "Gopinath_Ashokan" }, { "code": "", "text": "I have attempted Option 2 & below is the feedback.To add new item to existing nested array in a document,\n\nimage1055×501 26.7 KB\nTo update existing item in a nested array of document,\n\nimage1050×477 30.3 KB\n", "username": "Gopinath_Ashokan" }, { "code": "", "text": "I just read this!", "username": "Gopinath_Ashokan" }, { "code": "", "text": "To fix the “Id” to “_id” serialization for embedded documents, I have created a new property & explicitly mapped the element name “Id” to it.", "username": "Gopinath_Ashokan" }, { "code": "", "text": "", "username": "Stennie_X" } ]
How to store static dropdown options in MongoDB? Multiple documents or Single Document with embedded array
2021-12-22T09:55:56.983Z
How to store static dropdown options in MongoDB? Multiple documents or Single Document with embedded array
6,444
null
[ "serverless" ]
[ { "code": "", "text": "I activated a MongoDB Atlas Serverless Instance, but it is not shown as available when trying to add it as a Data Source from the Realm app (via Manage > Linked Data Sources). - Well, maybe I’m looking in the wrong place.If these are not available yet due to the preview status, will they be available for Realm in the future?", "username": "Christian_Wagner" }, { "code": "", "text": "You’re correct that it isn’t possible in this preview release, but it will be supported in the future.", "username": "Andrew_Morgan" }, { "code": "", "text": "Hi there Andrew, is there an expected time for this feature to be available and would be easy to migrate from the current (shared or dedicated) cluster to serverless.", "username": "aimen_sasi" }, { "code": "", "text": "Hello,I reached this phase as well.", "username": "Andrei_Matei" }, { "code": "", "text": "Any updates ? I want to use Altas Serverless with MongoDB Realm as well!", "username": "Sahmed_Matsko" }, { "code": "", "text": "Sorry, I don’t know when Realm support for Serverless will be. Others on here might.", "username": "Andrew_Morgan" }, { "code": "", "text": "Thanks for your reply. Hope someone can tell me more about it…! Waiting patiently", "username": "Sahmed_Matsko" }, { "code": "", "text": "@Andrew_Morgan any update on this?", "username": "clueless_dev" }, { "code": "", "text": "I just tried it, and yes, you can connect a Realm app to a serverless Atlas cluster.", "username": "Andrew_Morgan" }, { "code": "", "text": "Hi Andrew,I just tried to connect a realm app to my brand new serverless cluster (only one db, one collection, one document) an it seams that I cant connect those do you have any idea why?\n\nScreenshot 2022-08-20 at 10.43.351314×1128 133 KB\n", "username": "Patrice_Muller" } ]
Can MongoDB Atlas Serverless Instances be used with Realm Apps?
2021-07-22T07:26:58.614Z
Can MongoDB Atlas Serverless Instances be used with Realm Apps?
5,386
null
[ "production", "server" ]
[ { "code": "", "text": "MongoDB 4.4.16 is out and is ready for production deployment. This release contains only fixes since 4.4.15, and is a recommended upgrade for all 4.4 users.\nFixed in this release:", "username": "Aaron_Morand" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB 4.4.16 is released
2022-08-20T01:45:43.426Z
MongoDB 4.4.16 is released
2,677
null
[ "production", "server" ]
[ { "code": "", "text": "MongoDB 5.0.11 is out and is ready for production deployment. This release contains only fixes since 5.0.10, and is a recommended upgrade for all 5.0 users.\nFixed in this release:", "username": "Aaron_Morand" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB 5.0.11 is released
2022-08-20T00:39:07.719Z
MongoDB 5.0.11 is released
2,180
null
[ "production", "server" ]
[ { "code": "", "text": "MongoDB 6.0.1 is out and is ready for production deployment. This release contains only fixes since 6.0.0, and is a recommended upgrade for all 6.0 users.\nFixed in this release:", "username": "Aaron_Morand" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB 6.0.1 is released
2022-08-20T00:19:22.616Z
MongoDB 6.0.1 is released
2,579
null
[ "swift" ]
[ { "code": "", "text": "Whenever I use @ObservedResults or @ObservedRealmObject in a view my SwiftUI preview stops working. Is there some obvious setup step that I am missing so this works reliably?", "username": "Thomas_Rademaker" }, { "code": "", "text": "@Thomas_Rademaker you can refer our sample apps. Find them in ‘realm-swift/examples/ios/swift/RealmExamples.xcworkspace’I just rechecked the latest version of RealmSwift (v10.24.2) - previews for SwiftUI are working fine for me.", "username": "Pavel_Yakimenko" }, { "code": "", "text": "Thank you @Pavel_Yakimenko . I’ll take a look through the sample apps.", "username": "Thomas_Rademaker" }, { "code": ".environment(\\.realmConfiguration, Storage.realmConfiguration)\n", "text": "I figured out my issue. I’ll leave the solution here in case anyone else runs into a similar problem.\nI set a custom realmConfiguration in the environment with this view modifier,In my SwiftUI PreviewProvider I must include the same environment view modifier.So, not a Realm bug but me not understanding how the SwiftUI PreviewProvider works.", "username": "Thomas_Rademaker" } ]
Broken SwiftUI previews
2022-03-25T20:06:23.867Z
Broken SwiftUI previews
2,600
null
[ "production", "server" ]
[ { "code": "", "text": "MongoDB 4.2.22 is out and is ready for production deployment. This release contains only fixes since 4.2.21, and is a recommended upgrade for all 4.2 users.\nFixed in this release:", "username": "Aaron_Morand" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB 4.2.22 is released
2022-08-19T22:02:42.937Z
MongoDB 4.2.22 is released
2,062
null
[ "replication", "mongodb-shell" ]
[ { "code": "2022-08-16T18:37:48.856-04:00: Document(s) exist in 'system.replset', but started without --replSet. Database contents may appear inconsistent with the writes that were visible when this node was running as part of a replica set. Restart with --replSet unless you are doing maintenance and no other clients are connected. The TTL collection monitor will not start because of this. For more info see http://dochub.mongodb.org/core/ttlcollections\n{\n \"t\": { \"$date\": \"2022-08-19T10:31:13.968-04:00\" },\n \"s\": \"F\",\n \"c\": \"REPL\",\n \"id\": 28545,\n \"ctx\": \"initandlisten\",\n \"msg\": \"Locally stored replica set configuration does not parse; See http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config for information on how to recover from this\",\n \"attr\": {\n \"error\": {\n \"code\": 40415,\n \"codeName\": \"Location40415\",\n \"errmsg\": \"member: { _id: 0, host: \\\"127.0.0.1:27017\\\", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } :: caused by :: BSON field 'MemberConfig.slaveDelay' is an unknown field.\"\n },\n \"config\": {\n \"_id\": \"rs0\",\n \"version\": 1,\n \"term\": 10,\n \"members\": [{\n \"_id\": 0,\n \"host\": \"127.0.0.1:27017\",\n \"arbiterOnly\": false,\n \"buildIndexes\": true,\n \"hidden\": false,\n \"priority\": 1,\n \"tags\": {},\n \"slaveDelay\": 0,\n \"votes\": 1\n }],\n \"protocolVersion\": 1,\n \"writeConcernMajorityJournalDefault\": true,\n \"settings\": {\n \"chainingAllowed\": true,\n \"heartbeatIntervalMillis\": 2000,\n \"heartbeatTimeoutSecs\": 10,\n \"electionTimeoutMillis\": 10000,\n \"catchUpTimeoutMillis\": -1,\n \"catchUpTakeoverDelayMillis\": 30000,\n \"getLastErrorModes\": {},\n \"getLastErrorDefaults\": { \"w\": 1, \"wtimeout\": 0 },\n \"replicaSetId\": { \"$oid\": \"603820afb35392a07c24e0a8\" }\n }\n }\n }\n}\nrs.reconfigrs.reconfig", "text": "When I use mongosh to connect to my instance of mongod I see the following warning:When I start mongod with the --replSet rs0 argument to address the error it fails to start up with the following fatal error:The linked documentation just redirects to rs.reconfig, which requires a configuration in order to run. I set up this replica set a long time ago so I’m not sure how to find its original configuration.How can I fix this error so that I can start up mongod with a replica set again? Should I run rs.reconfig with the config printed in the error message (minus the slaveDelay it’s failing to parse)?", "username": "Altay" }, { "code": "locallocal__system", "text": "I think you may have got into the interesting situation where you have previously had a <5.0 replicaset configured at one stage, switched it back to a standalone and then upgraded to 5.0+The local db needs to be removed to get rid of the warning when running in standalone, as well as if you decide to convert this back to a replica set.Start mongod without authentication enabled and drop the local database.You can take additional steps to protect your database while authentication is disabled if it is available to a wider network by binding only to localhost with an alternate port from the usual/default or you can temporarily grant __system role to your administrative user.Restart with your usual settings.", "username": "chris" }, { "code": "locallocalMongoServerError: node is not in primary or recovering staters.initiate()", "text": "Start mongod without authentication enabled and drop the local database.\nRestart with your usual settings.Thanks, this fixed my issue.\nWhen I restarted mongod after dropping local any command I ran in mongosh gave me the error MongoServerError: node is not in primary or recovering state. Based on this information I found I ran rs.initiate() to reinitialize the replicas and everything seems to be working again.", "username": "Altay" }, { "code": "", "text": "I could have been more clear there as to what state you’d be in after that.If running without --replSet you’re back to a standalone instance.If running with --replSet you’re back to an uninitialized replica.", "username": "chris" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Replica set configuration fails to parse
2022-08-19T14:55:04.917Z
Replica set configuration fails to parse
4,923
null
[]
[ { "code": "sudo pkill -f mongod\n 2020-04-19T15:46:07.509+0200 I NETWORK [listener] Listening on /tmp/mongodb-27017.sock\n2020-04-19T15:46:07.509+0200 I NETWORK [listener] Listening on 127.0.0.1\n2020-04-19T15:46:07.509+0200 I NETWORK [listener] waiting for connections on port 27017\n2020-04-19T15:46:07.509+0200 I SHARDING [LogicalSessionCacheReap] Marking collection config.transactions as collection version: <unsharded>\n2020-04-19T15:46:08.000+0200 I SHARDING [ftdc] Marking collection local.oplog.rs as collection version: <unsharded>\nFailed to unlink socket file /tmp/mongodb-27017.sock Operation not permitted\nsudo chown `whoami` /tmp/mongodb-27017.sock\n", "text": "Hi,I have to put in my command linebefore having my mongod running…and I receive in end…if not, i receiveThe solution of get right do not work", "username": "ADOU_Dick_Stephane" }, { "code": "", "text": "Hello!\nI have the same problem. Have you solved it? Could you help me?", "username": "gro" }, { "code": "", "text": "I desperately need help with this too", "username": "Koko_Dev" }, { "code": "ls -alh /tmp/mongodb-27017.sock", "text": "Hi @Koko_Dev and welcome to the MongoDB community forums.It’s best to open a new thread rather than use one that’s several years old with no answers, even if you’re having the same issue.Please supply answers to the following so we can better help you:", "username": "Doug_Duncan" } ]
Failed to unlink socket file
2020-04-19T20:38:46.859Z
Failed to unlink socket file
7,078
null
[ "installation" ]
[ { "code": "", "text": "Hello all,unfortunately I can’t find a way to check the integrity of the RPM packages.\nUnfortunately all instructions lead to checks of tar archives.I need this for (Mongo Server & shell. Version 4.2.21 Community Version for RHEL8 / Red Hat 8) rpm files.KR\nDenis", "username": "Denis_Ganiev" }, { "code": "#import key\nrpm --import https://www.mongodb.org/static/pgp/server-4.2.asc\n\n#download rpm\ncurl -OJ https://repo.mongodb.org/yum/redhat/8/mongodb-org/4.2/x86_64/RPMS/mongodb-org-mongos-4.2.21-1.el8.x86_64.rpm \n\n#make a 'bad' copy\ncp mongodb-org-mongos-4.2.21-1.el8.x86_64.rpm mongodb-org-mongos-4.2.21-1.el8.x86_64-notgood.rpm\ndd if=/dev/zero count=1 seek=9160 conv=notrunc of=mongodb-org-mongos-4.2.21-1.el8.x86_64-notgood.rpm\n\n#validate\nrpm -K mongodb-org-mongos-4.2.21-1.el8.x86_64*.rpm\nmongodb-org-mongos-4.2.21-1.el8.x86_64-notgood.rpm: DIGESTS SIGNATURES NOT OK\nmongodb-org-mongos-4.2.21-1.el8.x86_64.rpm: digests signatures OK\n\n\n\n\n\n", "text": "RPMs are signed packages.The usual way of using them is configuring repository to your system and using rpm/dnf/yum to install the package which will add the signing key to your system and validate the package when downloaded.If you REALLY want to do this semi manually.", "username": "chris" }, { "code": "rpm -K --nosignature mongodb-org-mongos-4.2.21-1.el8.x86_64*.rpm\nmongodb-org-mongos-4.2.21-1.el8.x86_64-notgood.rpm: DIGESTS NOT OK\nmongodb-org-mongos-4.2.21-1.el8.x86_64.rpm: digests OK\n", "text": "You can also do it without importing the key.", "username": "chris" }, { "code": "", "text": "RPMs are signed packages.The usual way of using them is configuring repository to your system and using rpm/dnf/yum to install the package which will add the signing key to your system and validate the package when downloaded.If you REALLY want to do this semi manually.Thank you Chris!!! ", "username": "Denis_Ganiev" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Integrity of rpm Packeges
2022-08-19T12:32:40.580Z
Integrity of rpm Packeges
2,985
null
[ "swift", "atlas-functions", "atlas-triggers" ]
[ { "code": "\n\nclass Photo: Object, ObjectKeyIdentifiable {\n\n@Persisted(primaryKey: true ) var _id: ObjectId\n\n@Persisted var picture: Data?\n\n@Persisted var pictureUrl: String\n\nconvenience init (pictureUrl: String = \"\", picture: Data? = Data()) {\n\nself .init()\n\nself .pictureUrl = pictureUrl\n\nself .picture = picture\n\n}\n\n.onChange(of: image) { _ in\n\t\t\t\tif(image != nil) {\n\t\t\t\t\tif let newData = image!.jpegData(compressionQuality: 0.5) {\n\t\t\t\t\t\ttry! realm.write {\n\t\t\t\t\t\t\trealm.add(Photo(picture: newData))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\nexports = async function(imageName, file) {\n \n const AWS = require('aws-sdk');\n\n AWS.config.update({\n accessKeyId : <key>\n secretAccessKey : <key>,\n region: <region>\n });\n \n const s3 = new AWS.S3({apiVersion: '2006-03-01'})\n const bucketName = <bucket name>\n\n return s3.upload({\n \"Bucket\": bucketName,\n \"Key\" : imageName,\n \"Body\": file.toBase64(),\n \"ACL\": \"public-read\",\n \"ContentType\": \"image/jpeg\",\n }).promise()\n}\nlet (data, _) = try await URLSession.shared.data(from: URL) // URL is the new S3 image link\n\nIf let image = UIImage(data: Data(base64Encoded: data.base64EncodedString())!) {\n...\n}\n", "text": "Hi,I am working on a SwiftUI app on iOS. I have been following the article on o-fish (Realm Data and Partitioning Strategy Behind the WildAid O-FISH Mobile Apps | MongoDB) to enable users to attach pictures while offline.My Photo model:For testing purpose, in my iOS app code, I am writing in the realm like the following:Which as explained in the article, will trigger functions on App Services to (1) upload to S3 and (2) replace the field ID by S3’s generated link.I created my AWS S3 bucket and managed to upload some blob on it through the upload function.Regarding my issue: from what I understand, the file saved on S3 is in base64 format. Thus it is impossible to download the file from S3 and open it in an image viewer, is that correct? Furthermore, I am now trying to display the base64 image on the iOS app but I only get decoding errors… :Doing a curl on the image link gives a long string looking like/9j/4AAQSkZJRgABAQAASABIAAD/4QBYRXhpZgAATU0AKgAAAAgAAgESAAMAAAABAAEAAIdpAAQAAAABAAAAJgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAGhKADAAQAAAABAAAJxAAAAAD/wAARCAnEBoQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/…Is there something I am missing either in the S3 upload or in the Swift/SwiftUI decoding?", "username": "Sonisan" }, { "code": "UIImage(data: Data(base64Encoded: String(data: data, encoding: .utf8)!)!)\n", "text": "Solution found! Answered on another forum:\nTo simplify with dirty force-unwrap:", "username": "Sonisan" } ]
Issue for encoding/decoding image on S3 (SwiftUI)
2022-08-18T09:30:25.033Z
Issue for encoding/decoding image on S3 (SwiftUI)
2,659
null
[ "queries", "node-js", "data-modeling", "mongoose-odm" ]
[ { "code": "const digitalCardSchema = new mongoose.Schema(\n {\n userId: {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"User\",\n required: true,\n },\n data: {\n type: String,\n },\n },\n {\n timestamps: true,\n }\n);\ndata", "text": "I am building a digital-card generator app in node.js. In this I want to store the card data as JSON in the MongoDB database. I am using mongoose ODM. but I think mongoose does not support the data-type of Json or object.The card schema:I want to strore the field data as JSON object from the above schema. How can I do that?", "username": "Mitul_Kheni" }, { "code": "type: Objectconst digitalCardSchema = new mongoose.Schema(\n {\n userId: {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"User\",\n required: true,\n },\n data: {\n type: Object,\n },\n },\n {\n timestamps: true,\n }\n);\nconst digitalCardSchema = new mongoose.Schema(\n {\n userId: {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"User\",\n required: true,\n },\n data: {\n card_number: String,\n holder_name: String,\n balance: Number,\n ...\n },\n },\n {\n timestamps: true,\n }\n);\n", "text": "Hi,Just add type: Object:You can also additionally specify nested properties if you want:", "username": "NeNaD" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Store data as JSON in MongoDB
2022-08-19T11:33:36.945Z
Store data as JSON in MongoDB
6,593
null
[ "spring-data-odm" ]
[ { "code": "", "text": "Hi Team,\nAm facing an issue MongoRepository save() method. Once I updated the document by calling save() method and am trying to fetch the same document by using findById(), it will not reflected the updated value in result. could you please help on this issue asap?", "username": "Rojan_John_V" }, { "code": "MongoRepository#saveSystem.out.println(returnedEntity)saveMongoRepository#.findById", "text": "Hello @Rojan_John_V, welcome to the MongoDB community forum.The MongoRepository#save method returns an instance of the saved entity. Can you verify that the returned object has the updated values (you can use a System.out.println(returnedEntity) immediately after the save )?I tried such code and found querying the same collection using the MongoRepository#.findById returned the document with updated values.I am using MongoDB v4.2.8 and Spring Boot v2.2.4.", "username": "Prasad_Saya" }, { "code": "", "text": "Thanks Prasad for your reply. This issue am facing intermittently and I could see the updated values when I am printing return object from save(). If am putting some time delay between save and findbyid, I could see the expected result.", "username": "Rojan_John_V" }, { "code": "", "text": "This issue am facing intermittentlyAre you able to reproduce the issue consistently?If am putting some time delay between save and findbyid, I could see the expected result.What is happening if you dont use the time delay?", "username": "Prasad_Saya" }, { "code": "", "text": "As per our architecture, the first request will fetch the document by using findbyid() method and ready to update content and save to db. After 200 or 300 milliseconds difference the second request will come and try to fetch the same document, that time I could see the document having old value.\nIt will happen intermittently not all time. if am setting delay between the requests it never happen, without delay it will happen sometimes.\nI tried to reproduce this issue in local mongo db by triggering bulk of requests, but I could not see this issue. In dev region I could see this with concurrent requests.\nIn dev we are using MongoAtlas with one primary node and two secondary node.", "username": "Rojan_John_V" }, { "code": "", "text": "What are your settings about the Read Preference?", "username": "Prasad_Saya" }, { "code": "", "text": "We are not setting any specific readPreference value, I believe the default should be Primary. How can we check this in MongoAtlas console?", "username": "Rojan_John_V" }, { "code": "", "text": "The connection url i like this\nmongodb+srv://{USER}:{PASSWORD}@{HOST}/{DB_NAME}?retryWrites=true&w=majority\ndo we need to set readPreference along with the connection url ?", "username": "Rojan_John_V" }, { "code": "", "text": "We are not setting any specific readPreference value, I believe the default should be PrimaryThe default Read Preference is Primary. Having a setting other than Primary, can result in reading stale data. Read Preference can be set within connection URI or the Java / Spring Boot application.", "username": "Prasad_Saya" }, { "code": "", "text": "If the default read preference is Primary, we don’t need to set the read preference specifically. do you have any other suggestions?", "username": "Rojan_John_V" }, { "code": "", "text": "I suggest check your application code if you are setting at the reads you are doing (for this specific read functionality).Further, I think you can tell more about your application and the code you are using for this functionality:As per our architecture, the first request will fetch the document by using findbyid() method and ready to update content and save to db. After 200 or 300 milliseconds difference the second request will come and try to fetch the same document, that time I could see the document having old value. It will happen intermittently not all time. if am setting delay between the requests it never happen, without delay it will happen sometimes.", "username": "Prasad_Saya" }, { "code": "", "text": "Do you have any suggestion to set read and write concern in connection url. As of now the application using below connection url\nThe connection url :\nmongodb+srv://{USER}:{PASSWORD}@{HOST}/{DB_NAME}?retryWrites=true&w=majority", "username": "Rojan_John_V" }, { "code": "", "text": "Hi @Rojan_John_V, Did you find a solution? I am facing the same issue.", "username": "Samuel_Parra" }, { "code": "", "text": "Any updated on this issue. We are also facing same issue. I need some guide to fix/solution for this issue.", "username": "Santhosh_Voodem" }, { "code": "", "text": "I’m having the same issue here, any news ?", "username": "Nait_Belkacem_Youssef" } ]
MongoRepository Save() issue
2020-11-24T18:49:08.413Z
MongoRepository Save() issue
14,683
null
[ "aggregation", "queries", "crud" ]
[ { "code": "$dateToStringlast_updated[\n {\n \"id\": \"A\",\n \"name\": \"Meades Park\",\n \"last_updated\": ISODate(\"2022-01-01T00:00:00.000Z\"),\n \"evses\": [\n {\n \"id\": \"AB\",\n \"status\": \"AVAILABLE\",\n \"last_updated\": ISODate(\"2022-01-01T00:00:00.000Z\")\n \"connectors\": [\n {\n \"id\": \"AAB\",\n \"power_type\": \"DC\",\n \"last_updated\": ISODate(\"2022-01-01T00:00:00.000Z\")\n }\n ]\n }\n ]\n },\n {\n \"id\": \"A\",\n \"name\": \"Medes Park\",\n \"last_updated\": ISODate(\"2022-01-01T00:00:00.000Z\")\n }\n]\ndb.collection.aggregate([\n {\n $addFields: {\n 'last_updated': { \n $dateToString: { \n format: '%Y-%m-%dT%H:%M:%SZ', \n date: '$last_updated'\n } \n }\n 'evses.last_updated': { \n $dateToString: { \n format: '%Y-%m-%dT%H:%M:%SZ', \n date: '$last_updated'\n } \n }\n 'evses.connectors.last_updated': { \n $dateToString: { \n format: '%Y-%m-%dT%H:%M:%SZ', \n date: '$last_updated'\n } \n }\n }\n }\n])\nevses.last_updatedevses.connectors.last_updatedlast_updatedevsesevseslast_updatedevses.last_updatedevses.connectors.last_updatedevsesconnectorsonNull$dateToString", "text": "I’m currently using the $dateToString operator to drop milliseconds from datetime fields (the last_updated fields to be specific.) and I’ve run into an issue.I have a collection of documents formatted like so:I’m able to format the date fields with the following query:While this overwrites the evses.last_updated & evses.connectors.last_updated fields with the top level last_updated field, it works well for my use case for the first document.My issue is that the second document, where the evses array is absent, this query will create an evses & connectors object and populate it with the last_updated field. Is it possible to only convert evses.last_updated and evses.connectors.last_updated if the evses and connectors arrays exist? I’m aware of the onNull operator that can be used on $dateToString, but I’ve not found a way to conditionally stringify a field using it, it seems like it is more for default fields.", "username": "Greg_Fitzpatrick-Bel" }, { "code": "evses.last_updatedevses.connectors.last.updatedevsesconnectors$isArray$evses$addFieldsconst condBody1 = {\n $cond: {\n if: { $isArray: \"$evses\" },\n then: {\n $dateToString: {\n format: \"%Y-%m-%dT%H:%M:%SZ\",\n date: \"$last_updated\"\n }\n },\n else: \"$$REMOVE\"\n }\n };\n\nconst condBody2 = {\n $cond: {\n if: { $isArray: \"$evses.connectors\" },\n then: {\n $dateToString: {\n format: \"%Y-%m-%dT%H:%M:%SZ\",\n date: \"$last_updated\"\n }\n },\n else: \"$$REMOVE\"\n }\n };\n\ndb.collection.aggregate([{\n '$addFields': {\n last_updated: {\n '$dateToString': { format: '%Y-%m-%dT%H:%M:%SZ', date: '$last_updated' }\n },\n 'evses.last_updated': {\n '$cond': {\n if: { '$isArray': '$evses' },\n then: {\n '$dateToString': { format: '%Y-%m-%dT%H:%M:%SZ', date: '$last_updated' }\n },\n else: '$$REMOVE'\n }\n },\n 'evses.connectors.last_updated': {\n '$cond': {\n if: { '$isArray': '$evses.connectors' },\n then: {\n '$dateToString': { format: '%Y-%m-%dT%H:%M:%SZ', date: '$last_updated' }\n },\n else: '$$REMOVE'\n }\n }\n }\n}])\n$condevsesevses.connector$$REMOVE[\n {\n _id: ObjectId(\"62fdc365b5f8518fe18d213e\"),\n id: 'A',\n name: 'Meades Park',\n last_updated: '2022-01-01T00:00:00Z',\n evses: [\n {\n id: 'AB',\n status: 'AVAILABLE',\n last_updated: '2022-01-01T00:00:00Z',\n connectors: [\n {\n id: 'AAB',\n power_type: 'DC',\n last_updated: '2022-01-01T00:00:00Z'\n }\n ]\n }\n ]\n },\n {\n _id: ObjectId(\"62fdca59b5f8518fe18d213f\"),\n id: 'A',\n name: 'Medes Park',\n last_updated: '2022-01-01T00:00:00Z',\n evses: { connectors: {} }\n }\n]\nevses: { connectors: {} }", "text": "Hi @Greg_Fitzpatrick-Bel and welcome to the community!!Is it possible to only convert evses.last_updated and evses.connectors.last.updated if the evses and connectors arrays exist?This would be possible using the operator $isArray which would check if the $evses field is present as an array then update the required records.The following aggregation query using the $addFields stage would check first if the array(s) exists else would return the “$$REMOVE” variable:The $cond would check for the evses and evses.connector fields and update the required field.\nHowever, if the array does not exist, it would simple use the $$REMOVEThe output response would like the following:You could change the evses: { connectors: {} } in the output by adding an extra stage to the pipeline if needed.Also, please note that the above query is tested on the sample document provided. Please make sure you do a thorough testing and ensure it meets all the use cases and requirements.Let us know if you have any further questions.Thanks\nAasawari", "username": "Aasawari" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Format Date with $dateToString only if date exists
2022-08-08T19:25:56.549Z
Format Date with $dateToString only if date exists
2,113
null
[]
[ { "code": "", "text": "Hello Everyone !I’m happy to be here !\nI’m Olivier, living in Belgium and passionate about Development and new technologies !\nI’m starting to learn building app with the Atlas App Services.Have a good day !", "username": "Olivier" }, { "code": "", "text": " Welcome to the MongoDB community @Olivier and g’day from Sydney !In addition to the documentation for Atlas App Services, you may find some helpful articles, tutorials, or code examples on the MongoDB Developer Center.We also have some forum categories where you can find (or ask about) more details: MongoDB Atlas App Services & Realm - MongoDB Developer Community ForumsRegards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Hi @Olivier, welcome to the community , we are so excited to have you here with us.I’m starting to learn building app with the Atlas App Services.That’s really awesome. The following article might be of interest to you:Learn how to perform Authentication, Authorization, CRUD operations, Analytics & Web App Deployment by building an Expense Manager in…\nReading time: 6 min read\n\nThis is an in-depth guide exploring Atlas App Services including:App Services AuthenticationAtlas GraphQL API and Custom ResolversAtlas FunctionsApp Services Static Hosting (by deploying a Single Page Application(React.js)) etc.Thanks and Regards.\nSourabh Bagrecha,\nMongoDB", "username": "SourabhBagrecha" } ]
Hello From Belgium!
2022-08-17T05:19:54.848Z
Hello From Belgium!
2,333
null
[ "queries" ]
[ { "code": "footrackID_id", "text": "TL;DR : Updating element in array always results in first element updated and results differ based on property name of key used in find params\nPlayground : Mongo playgroundI want to update an object in a array and I am using 2 of the object properties to find the object then using $set operator with array.$.updateProperty to update the object\nHere is the working playground link of what I want to do:Mongo playground: a simple sandbox to test and share MongoDB queries online\nBut I cant reproduce the same when I change a single property name (both in database as well as find parameter) , from the above example I changed property foo to trackID but then only the first element in array is always updated\nPlayground link in tldr at top\nIt seems weird as I assumed the property name shouldn’t matter as long as it used the same in find params too and its not a keyword like _id", "username": "Aditya_Patil2" }, { "code": "db.collection.update()db.collection.update({\n \"_id\": ObjectId(\"62f11e22d99c79532de6ff7f\")\n},\n{ \"$set\": {\n \"jobs.$[elemX].status\": \"Done\"\n }\n},\n{\"arrayFilters\": [\n {\n \"elemX.name\": \"kaisen_track-0_h264_1080p\"\n }]\n})\n$arrayFilters", "text": "Hi @Aditya_Patil2 and welcome to the community!!The query parameter inside the db.collection.update() command, searches for the correct document and later applies the $set to the required field.The following query would resolve the above issue:Please refer to the arrayFilters documentation for more information.Also, the $arrayFilters is available since MongoDB version 3.6. If you are looking to use the above query, would recommend you to update to the required version.Let us know if you have any more questions.Thanks\nAasawari", "username": "Aasawari" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Update document in array , always updating first element
2022-08-11T17:39:54.612Z
Update document in array , always updating first element
2,121
null
[]
[ { "code": "\"nbfc_report\": [\n {\n \"buyer\": [\n {\n \"buyer_id\": {\n \"_id\": \"62a8548c21d5976aea2f1787\",\n \"company_name\": \"HERO MOTOCORP LIMITED\"\n },\n \"seller_id\": {\n \"_id\": \"62a8548c21d5976aea2f1787\",\n \"company_name\": \"HERO MOTOCORP LIMITED\"\n },\n \"_id\": \"62ba95ac8e30735c9a505ed3\",\n \"created_at\": \"2022-06-28T05:46:20.738Z\",\n \"updated_at\": \"2022-06-28T05:46:20.739Z\"\n }\n ]\n },\n", "text": "", "username": "Kashif_Iqbal" }, { "code": "", "text": "now i have to show only company_name in buyer_id and company_name in seller_id", "username": "Kashif_Iqbal" }, { "code": "", "text": "please help me at least show some example", "username": "Kashif_Iqbal" }, { "code": "db.test.aggregate([\n\n// expand the nbfc_report array\n{\n $unwind: '$nbfc_report'\n}, \n\n// expand the buyer array\n{\n $unwind: '$nbfc_report.buyer'\n},\n\n// project the company names\n{\n $project:\n {\n buyer_company:'$nbfc_report.buyer.buyer_id.company_name', \n seller_company:'$nbfc_report.buyer.seller_id.company_name'\n }\n}\n])\n {\n \"_id\": {\n \"$oid\": \"62fdbb8fe845928ded2de1f8\"\n },\n \"buyer_company\": \"HERO MOTOCORP LIMITED\",\n \"seller_company\": \"HERO MOTOCORP LIMITED\"\n }\n", "text": "Hello @Kashif_Iqbal ,I notice you haven’t had a response to this topic yet - were you able to find a solution?\nBased on the sample document provided, you can use this to get company names from your documents.By using above query, you output will look likeIs this the output you’re looking for? If not, could you provide more details:For more details on the stages I used in the example above, please see:Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to flatten data of mongodb when it's array of nested object and populated
2022-07-25T09:13:15.911Z
How to flatten data of mongodb when it&rsquo;s array of nested object and populated
6,918
null
[ "python", "serverless", "spark-connector" ]
[ { "code": "df = spark.read.format(\"mongodb\").options(database=\"database\", collection=\"collection\").load()\n.load()22/07/27 23:00:09 INFO SparkUI: Stopped Spark web UI at http://192.168.1.173:4040\n22/07/27 23:00:09 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!\n22/07/27 23:00:09 INFO MemoryStore: MemoryStore cleared\n22/07/27 23:00:09 INFO BlockManager: BlockManager stopped\n22/07/27 23:00:09 INFO BlockManagerMaster: BlockManagerMaster stopped\n22/07/27 23:00:09 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!\n22/07/27 23:00:09 INFO SparkContext: Successfully stopped SparkContext\n", "text": "strong text Spark 3.3.0, mongodb Atlas 5.0.9, Spark connector 10.xI run a small job using pyspark reading from MongoDB Atlas and writing to BigQuery.So far, with the MongoDB Spark connector v3.0.x, I did not encounter any errors and the job was ending normally after loading MongoDB documents and saving them into BigQuery.It was only a few days ago that, after upgrading to the connector newest version (10.0.x), I’ve experienced some strange behavior: my job is still running even after finishing all tasks successfully.Here is the problematic line (by that, I mean if I comment just this one, my whole job ends correctly) :Actually, it’s precisely the .load() part of this line which seems to be an issue, the rest of the line not causing any problem alone.Every time from now, my last logs look like that :But then I have to force quit (with Ctrl-C for instance when running locally) to actually finish the job. It’s very problematic when using cloud services like Google Dataproc Serverless for instance, as the job keep running and so, the instance is never stopped.I tried with every version 10.0.x (x=0, 1, 2 and 3), but I always encounter the same behavior.Is it something expected in this version 10 that I miss or not ?", "username": "Clovis_Masson" }, { "code": "", "text": "Thank you @Clovis_Masson for your post, we will look into this issue and reply back. How large is the collection you are loading?", "username": "Robert_Walters" }, { "code": "", "text": "I’ve tested it mainly with two collections: one very small with only 2 documents and another slightly larger with 20 000.", "username": "Clovis_Masson" }, { "code": "", "text": "Here are the different jar I’ve tested to reproduce it:Again, process stops successfully using version 3.0.x of the mongodb-spark-connector jar with the other mongodb-driver jars.I was suspecting at first a new behavior due to the support of the structured streaming but it seems not.", "username": "Clovis_Masson" }, { "code": "", "text": "Hi @Clovis_Masson I filed https://jira.mongodb.org/browse/SPARK-358 to have our engineers look into this issue.", "username": "Robert_Walters" }, { "code": "", "text": "@Clovis_Masson The fix will be in the next release 10.0.4.", "username": "Robert_Walters" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Pyspark job keep running using MongoDB Spark connector v10.0.x
2022-07-27T21:33:56.017Z
Pyspark job keep running using MongoDB Spark connector v10.0.x
3,653
null
[ "node-js", "production" ]
[ { "code": "createIndexcreateIndex(['myKey', 1]){ 'myKey': 1, '1': 1 }oplogReplayoplogReplay", "text": "The MongoDB Node.js team is pleased to announce version 4.9.0 of the mongodb package!We have corrected an inconsistency with our writeConcern options in the type definitions where the MongoClient alleged to not support “writeConcern” as an option. In fact, it did support it at run time and now the types correctly reflect that, along with the corresponding deprecations we made to the nested writeConcern config settings.Our index specification handling had a few peculiar edge cases that we have detailed below, we believe these are unlikely to affect a vast majority of users as the type definitions would have likely reported an error with the impacted usage. As a feature, the typescript definitions now support a javascript Map as a valid input for an index specification.As per usual this release brings in the latest BSON release (v4.7.0) which added automatic UUID support. You can read more about that in the BSON release notes here!Special thanks to the folks who contributed to this release!We invite you to try the mongodb library immediately, and report any issues to the NODE project.", "username": "neal" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB Node.js Driver 4.9.0 Released
2022-08-18T21:12:43.244Z
MongoDB Node.js Driver 4.9.0 Released
2,882
null
[]
[ { "code": "onUpdateeditor.getJSON()throttle", "text": "It may seem not to be a pure MongoDB problem, sorry for that.I know that I can listen to onUpdate event and use editor.getJSON() to get changed content, then I can set this content through an API to the server. Then the server updates a field of MongoDB with the new value(I may store it as a JSON string).I have a few concerns about the above simple design choice:If the user only updates a small piece of document, I have to send full document JSON to the server and update the full content of the MongoDB field; all of these seem too costly.In the frontend, I may use a throttle to reduce trigger frequency, but I still have to send full JSON. It may be a bottleneck as content increases with more text and images.Can I only send the content change to the server like collaborative applications? But I have no requirement for collaborative editing between users. I only want to support fast client/server synchronization. Is Y.js a proper solution for me? If there are some better tools for me, please let me know.And if we could send the change of content, can we update the partial JSON string on MongoDB? The content of tiptap may be a complex nested JSON. Is it possible?", "username": "1224084650" }, { "code": "", "text": "I’m personally interested in using y.js via hocuspocus.dev with Realm on top in the clients but I need to learn more about this possibility", "username": "Alex_Ehlke" } ]
Request elegant way of updating content of rich text editor(tiptap) to mongodb
2022-07-21T12:09:17.393Z
Request elegant way of updating content of rich text editor(tiptap) to mongodb
1,976
null
[]
[ { "code": "", "text": "TL;DR - Try MongoDB Atlas Products | MongoDB is getting deprecated. Please use mongodb.com/try/download instead.Try MongoDB Atlas Products | MongoDB endpoint for downloading MongoDB packages and binaries is getting deprecated on September 1st, 2022 after a planned brownout on August 24th, 2022.Packages and Binaries accessible via Try MongoDB Atlas Products | MongoDB will still be available. You can use our website (Try MongoDB Atlas Products | MongoDB) or our json feeds (https://downloads.mongodb.org/current.json, https://downloads.mongodb.org/full.json) to find and download what you need.Feel free to reach out to us if you have any questions or comments.", "username": "Zakhar_Kleyman" }, { "code": "", "text": "", "username": "Stennie_X" } ]
Mongodb.org/dl EOL announcement
2022-08-18T18:27:16.994Z
Mongodb.org/dl EOL announcement
2,188
null
[ "server", "release-candidate" ]
[ { "code": "", "text": "MongoDB 4.4.16-rc0 is out and is ready for testing. This is a release candidate containing only fixes since 4.4.15. The next stable release 4.4.16 will be a recommended upgrade for all 4.4 users.\nFixed in this release:", "username": "Aaron_Morand" }, { "code": "", "text": "When will the stable version be released?", "username": "galenspikes" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
MongoDB 4.4.16-rc0 is released
2022-08-10T22:01:10.634Z
MongoDB 4.4.16-rc0 is released
2,812
null
[ "python", "production" ]
[ { "code": "", "text": "We are pleased to announce the 0.5.0 release of PyMongoArrow - a PyMongo extension containing tools for loading MongoDB query result sets as Apache Arrow tables, Pandas and NumPy arrays. This release adds support for PyArrow 9.0.See the changelog for a high level summary of what’s new and improved or see the 0.5.0 release notes in JIRA for the complete list of resolved issues.Thank you to everyone who contributed to this release!", "username": "Steve_Silvester" }, { "code": "", "text": "This topic was automatically closed after 90 days. New replies are no longer allowed.", "username": "system" } ]
PyMongoArrow 0.5 Released
2022-08-18T15:17:22.855Z
PyMongoArrow 0.5 Released
1,754