image_url
stringlengths 113
131
⌀ | tags
sequence | discussion
list | title
stringlengths 8
254
| created_at
stringlengths 24
24
| fancy_title
stringlengths 8
396
| views
int64 73
422k
|
---|---|---|---|---|---|---|
null | [
"aggregation",
"mongoose-odm"
] | [
{
"code": "const User = require('../../models/user.model'); \nconst Friends = require('../friendsModel');\nconst Util = require('../../../SharedTools/Util');\n\n/**\n * Returns suggesstions for newly registered users.\n * \n * @param {object} req \n * @param {object} req.query\n * @param {string} req.query.id\n * @param {number} req.query.page\n * @param {Express.Response} res \n */\nmodule.exports = function getSuggestionsForMe(req, res) {\n\n let USER_ID = req.headers['x-user-id'] || req.query.id;\n let PAGE_NO = req.query.page;\n\n if (Util.IsNullOrUndefined(USER_ID) || Util.IsNullOrUndefined(PAGE_NO)) {\n\n res.status(400).json(\n {\n success: false,\n message: \"Not enough data.\"\n }\n );\n\n } else {\n\n USER_ID = String(USER_ID);\n PAGE_NO = Number.parseInt(PAGE_NO);\n\n Friends.aggregate(\n [\n {\n $match: {\n \"from\": USER_ID\n }\n },\n {\n $group: {\n _id: \"$from\",\n follows: {\n $push: \"$to\"\n }\n }\n }\n ]\n )\n .then(result => {\n\n if (result.length !== 0) {\n\n User.paginate(\n { _id: { $nin: result[0].follows } }, \n {\n select: '_id name profilePic category gender place',\n lean: true,\n page: PAGE_NO,\n limit: 30\n },\n (err, users) => {\n\n if (!err) {\n\n let response = {\n success: true,\n data: users.docs,\n numPages: users.pages,\n message: \"Everything OK\"\n };\n res.json(response);\n\n } else {\n res.status(500).json({\n success: false,\n message: \"Internal Server Error.\"\n });\n }\n\n }\n );\n\n } else {\n\n User.paginate(\n { _id: { $ne: USER_ID } },\n {\n select: '_id name profilePic place category gender',\n lean: true,\n page: PAGE_NO,\n limit: 30\n },\n (err, users) => {\n\n if (!err) {\n\n let response = {\n success: true,\n data: users.docs,\n numPages: users.pages,\n message: \"Everything OK\"\n };\n res.json(response);\n\n } else {\n res.status(500).json({\n success: false,\n message: \"Internal Server Error.\"\n });\n }\n\n }\n );\n\n }\n\n })\n .catch(err => {\n res.status(500).json({\n success: false,\n message: \"Internal Server Error.\"\n });\n });\n\n }\n\n}\nconst mongoose = require('mongoose');\nconst Schema = mongoose.Schema;\n\nconst friendsSchema = new Schema({ \n\n from:{\n type: Schema.Types.ObjectId,\n required: true,\n ref:'User'\n },\n to:{\n type: Schema.Types.ObjectId,\n required: true,\n ref:'User'\n }\n\n},{\n timestamps: true\n});\n\nfriendsSchema.index({from:1, to:1}, {unique : true});\n\nmodule.exports = mongoose.model('Friends', friendsSchema, 'friends');\nconst Schema = mongoose.Schema;\n\nconst userSchema = new Schema({\n\n name: { type: Schema.Types.String, required: true },\n email: { type: Schema.Types.String, required: true, unique: true },\n password: { type: Schema.Types.String, required: true },\n category: { type: Schema.Types.Number, required: true },\n\n gender: { type: Schema.Types.String, default: \"\" },\n dob: { type: Schema.Types.String, default: \"\" },\n username:{type:Schema.Types.String, unique:true},\n about: { type: Schema.Types.String, default: \"\" },\n sport: { type: Schema.Types.String, default: \"\" },\n bankAccountNo: { type: Schema.Types.String, default: \"\" },\n ifsc: { type: Schema.Types.String, default: \"\" },\n\n referralCode : { type: Schema.Types.String, unique: true },\n points: { type: Schema.Types.Number, default: 0 }, \n\n phone: { type: Schema.Types.String, default: \"\" },\n profilePic: { type: Schema.Types.String, default: \"\" },\n coverPic: { type: Schema.Types.String, default: \"\" },\n place: { type: Schema.Types.String, default: \"\" },\n active: { type: Schema.Types.Boolean, default: false },\n token: { type: Schema.Types.String, default: \"\" }\n\n}, {\n timestamps: true\n});\nuserSchema.plugin(mongoosePaginate);\n\nmodule.exports = mongoose.model('User', userSchema);\nvar mongoose = require('mongoose');\nconst mongoosePaginate = require('mongoose-paginate'); \nvar Schema = mongoose.Schema;\n\nvar postSchema = new Schema({\n\n postedBy:{\n type:Schema.Types.ObjectId,\n ref:'User',\n required:true\n },\n text:{\n type:String,\n default:\"\"\n },\n imageUrl:{\n type:String,\n default:\"\"\n },\n mediaType:{\n type:String,\n default:\"\"\n },\n likeCount:{\n type:Number,\n default:0\n },\n likes:{\n type:[Schema.Types.ObjectId],\n ref:'User',\n default:[]\n },\n commentCount:{\n type:Number,\n default:0\n },\n tags:{\n type:[Schema.Types.String],\n default:[]\n }\n\n},{\n timestamps:true\n});\npostSchema.plugin(mongoosePaginate);\n\n\nmodule.exports = mongoose.model('Post', postSchema);\n",
"text": "I want to send number of posts for each user in this controller response , sorted in descending order\nmaybe by applying aggregate pipeline on “Post” model and Populating “User” model with that no. posts is the most optimized way but i don’t know how to do it so, please helpThis is my Friends model (friendSchema):This is my User model (Userschema):This is my Post model (PostScema):",
"username": "Anil_Yadav"
},
{
"code": "",
"text": "@Anil_Yadav could you please re-format your question, it is difficult to trace your code. and also please state your question clearly.",
"username": "coderkid"
},
{
"code": "",
"text": "\nScreenshot_2020-03-16_14-38-55995×482 58.9 KB\nThis what my post collections looks like in compass, i want to send number of posts for each user in my response",
"username": "Anil_Yadav"
},
{
"code": "",
"text": "i want to send number of posts for each user in my getsuggestionforme function in a optimized way",
"username": "Anil_Yadav"
}
] | Help with Aggregate pipeline in Mongoose | 2020-03-15T19:35:12.611Z | Help with Aggregate pipeline in Mongoose | 2,594 |
null | [] | [
{
"code": "",
"text": "Hi,I am not able to get the schema view in Atlas.\nThe document view opens perfectly for me.\nI am using the stable edition not the community one.Please refer the ss below ",
"username": "Saptarshi_Ghosh_95875"
},
{
"code": "",
"text": "Are you using the latest version?\nTry to click on analyze\nIf it is not giving results restart compass and see",
"username": "Ramachandra_Tummala"
},
{
"code": "analyzeanalyze",
"text": "Hi @Saptarshi_Ghosh_95875,Adding to @Ramachandra_37567’s suggestion above. If after clicking on analyze in the main/center view of the Compass UI you are unable to get to the Schema View, try clicking on analyze (a 2nd time) in the upper right hand corner of the Compass UI. That should work. I’ve had to do this a couple of times myself in order to get to the Schema View.Hope this helps:-)",
"username": "Juliette_Tworsey"
},
{
"code": "",
"text": "Thanks @Ramachandra_37567 and @juliettet for you suggestions.I tried I after restarting the compass and I got the schema view.\nThanks ",
"username": "Saptarshi_Ghosh_95875"
},
{
"code": "",
"text": "This issue is recurring \nsd31269×610 42.3 KB\n",
"username": "Saptarshi_Ghosh_95875"
},
{
"code": "",
"text": "It probably lost connection during the process. I’m guessing you’re connected via Wifi, is it a fast connection?Try again by refreshing the connection:\nOr close and re-open Compass and try again.Use the Analyze button next to Reset, not the one in the middle.",
"username": "007_jb"
},
{
"code": "",
"text": "Hi @Saptarshi_Ghosh_95875,Are you connected to any corporate network / VPN network ?If so, then are you able to make outgoing requests to all these three nodes ?cluster0-shard-00-00-jxeqq.mongodb.net\ncluster0-shard-00-01-jxeqq.mongodb.net\ncluster0-shard-00-02-jxeqq.mongodb.net~ Shubham",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "",
"username": "system"
}
] | Unable to get Schema View | 2020-03-14T12:23:01.992Z | Unable to get Schema View | 1,252 |
[
"replication"
] | [
{
"code": "",
"text": "Hi all,\nwhat is the DR architecture for mongodb replica set? Only add new secondary member on DR site Datacenter? or should I consider another solutions?\nThank you.\nBaki.",
"username": "baki_sahin"
},
{
"code": "",
"text": "Welcome @baki_sahin,It looks like you’ve already found the relevant documentation for distributing a replica set across two or more data centres. There are a few approaches you can take with caveats as described on that page.You may also be interested in the white paper on Multi-Data Centre Deployments.Regards,\nStennie",
"username": "Stennie_X"
}
] | Disaster Recover architecture | 2020-03-11T18:49:35.764Z | Disaster Recover architecture | 1,546 |
|
null | [
"kafka-connector"
] | [
{
"code": "",
"text": "How can I support specified list of collections sync using mongodb to kafka source connector",
"username": "Urvish_Saraiya"
},
{
"code": " {\n \"config\": {\n \"collection\": \"\",\n \"connection.uri\": \"mongodb://mongo1:27017,mongo2:27017,mongo3:27017\",\n \"connector.class\": \"com.mongodb.kafka.connect.MongoSourceConnector\",\n \"database\": \"test\",\n \"name\": \"mongo-source\",\n \"pipeline\": \"[ { $match: { \\\"ns.coll\\\": { \\\"$in\\\": [\\\"test1\\\" ,\\\"test2\\\" ] } } } ]\",\n \"tasks.max\": \"1\",\n \"topic.prefix\": \"mongo\"\n },\n \"name\": \"mongo-source\",\n \"tasks\": [\n {\n \"connector\": \"mongo-source\",\n \"task\": 0\n }\n ],\n \"type\": \"source\"\n}\n",
"text": "You can filter specific collections using pipeline command:",
"username": "Urvish_Saraiya"
}
] | MongoDB kafka source connector How can I support specific set of collections? | 2020-03-06T04:54:28.353Z | MongoDB kafka source connector How can I support specific set of collections? | 1,959 |
null | [] | [
{
"code": "",
"text": "I have the below querydb.orders.aggregate([\n{\n$lookup:\n{\nfrom: “warehouses”,\nlet: { order_item: “$item”, order_qty: “$ordered” },\npipeline: [\n{ $match:\n{ $expr:\n{ $and:\n[\n{ $eq: [ “$stock_item”, “$$order_item” ] },\n{“instock”: { “$exists”: true }}, ---- Tried exists in both ways but nothing is working\nand giving syntax error.\n{ $exists: [ “$instock”, true ] }\n]\n}\n}\n},\n{ $project: { stock_item: 0, _id: 0 } }\n],\nas: “stockdata”\n}\n}\n])\nCould you please let me know how to use exists in pipeline query of lookup\nBelow is the sample collections datadb.orders.insert([\n{ “_id” : 1, “item” : “almonds”, “price” : 12, “ordered” : 2 },\n{ “_id” : 2, “item” : “pecans”, “price” : 20, “ordered” : 1 },\n{ “_id” : 3, “item” : “cookies”, “price” : 10, “ordered” : 60 }\n])db.warehouses.insert([\n{ “_id” : 1, “stock_item” : “almonds”, warehouse: “A” },\n{ “_id” : 2, “stock_item” : “pecans”, warehouse: “A”, “instock” : 80 },\n{ “_id” : 3, “stock_item” : “almonds”, warehouse: “B”},\n{ “_id” : 4, “stock_item” : “cookies”, warehouse: “B”, “instock” : 40 },\n{ “_id” : 5, “stock_item” : “cookies”, warehouse: “A” }\n])Thanks",
"username": "eswar_sunny"
},
{
"code": "$exists$expr{ $match: { instock: { $exists: true } }$match$and{ $match: { $expr:{ $eq: [ \"$stock_item\", \"$$order_item\" ] } } },\n{ $match: { instock: { $exists: true } } },\n",
"text": "You cannot use $exists within a $expr; only aggregation expressions are allowed. But, you can use something like this:\n{ $match: { instock: { $exists: true } }So, your present $match may look like this; a combination of two stages instead of the earlier one stage with $and:Now, the aggregation will run without any errors!",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "Thank you for your response. It works for me.",
"username": "eswar_sunny"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Not able to use $exists in $lookup pipeline | 2020-03-10T08:56:08.990Z | Not able to use $exists in $lookup pipeline | 18,156 |
null | [
"aggregation"
] | [
{
"code": "{ $eq: [ \"$_id\", sha256( { $toString: \"$creatorId\" } ) ] },\n",
"text": "As part of an aggregate I need to run this transformation:creatorId comes from a lookup, and in order to compare it to _id I first need to do a sha256.How can I do it?Thanks.",
"username": "Eduardo_Cobian"
},
{
"code": "",
"text": "Ah I looked into same thing, few days ago;There is a request ticket open, and waiting for a long time… it is at https://jira.mongodb.org/browse/SERVER-30359@Eduardo_Cobian let me know if you find a workaround.",
"username": "coderkid"
}
] | Mongodb aggregate. How to apply a function to a field | 2020-03-14T16:31:12.669Z | Mongodb aggregate. How to apply a function to a field | 1,956 |
null | [
"node-js",
"mongoose-odm"
] | [
{
"code": "",
"text": "Hi mongodbians,\nI am new to MongodDB , while I am connecting to db I see there are two ways of connecting one is through mongoClient.connect and the other is Mongoose.connect I don’t know which one is better\ncan any one explain detail difference and usage",
"username": "santhosh_Giridhar"
},
{
"code": "",
"text": "AFAIK, Under the hood, mongoose uses mongoClient to connect to the server;However, I can tell you few differences between these two connects;mongoose returns promise, mongoclient returns nullmongoose connect is backed by an internal configurable connection poolThese are the 2 differences I can think top of my head right now, I am sure there are more other…",
"username": "coderkid"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | mongoClient vs mongoose | 2020-03-14T16:31:22.024Z | mongoClient vs mongoose | 9,503 |
null | [
"python"
] | [
{
"code": " function getStudentSequence(sequenceName) {\n var result = db.short_counter.findAndModify(\n {\n query: { _id: sequenceName },\n update: { $inc: { seq: 1 } },\n new: true\n }\n );\n return result.seq;\n }\n db.short.insert(\n {\n _id: getStudentSequence(\"rollNo\"),\n long_url: \"www.google.com\"\n \n }\n",
"text": "Hi Folks ,I have a functionI am able to execute this function with mongo shell like thisI would like to execute the same using pymongo from jupyter notebook.please advice",
"username": "Christopher_Daniel_D"
},
{
"code": "def get_student_sequence(sequence_name):\n\n updated_record = db.short_counter.find_one_and_update(filter={\"_id\": sequence_name},\n upsert=True, update={\"$inc\": {\"seq\": 1}},\n return_document=True)\n return f\"{updated_record['seq']}\"\ndb.short.insert_one({\"_id\": get_student_sequence(\"rollNo\"), \"long_url\": \"www.google.com\")})\n",
"text": "Why not? I just converted JavaScript to Python function, and it works for me………",
"username": "coderkid"
}
] | Calling Javascript function from pymongo | 2020-03-14T16:31:40.361Z | Calling Javascript function from pymongo | 2,777 |
null | [
"security"
] | [
{
"code": " users > db.createUser({user:'arun', pwd:'hidden', roles:['readWriteAnyDatabase']})",
"text": "Hi All,I created a user with the below command by switching to admin db. But when I switch back to the user db and try to list the users, it shows an empty array. Any idea on why the get.Users() shows an empty array. users > db.createUser({user:'arun', pwd:'hidden', roles:['readWriteAnyDatabase']})",
"username": "Arun_Kumar_kadari"
},
{
"code": "",
"text": "createrUser creates a new user for the database on which the method is run.So, switch back to admin database and type db.getUsers(), you should see the user you created.",
"username": "coderkid"
}
] | db.getUsers() show an empty array | 2020-03-14T08:25:15.891Z | db.getUsers() show an empty array | 4,706 |
null | [
"golang"
] | [
{
"code": "\tif p.Scheme == SchemeMongoDBSRV {\n\t\tparsedHosts, err = p.dnsResolver.ParseHosts(hosts, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconnectionArgsFromTXT, err = p.dnsResolver.GetConnectionArgsFromTXT(hosts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// SSL is enabled by default for SRV, but can be manually disabled with \"ssl=false\".\n\t\tp.SSL = true\n\t\tp.SSLSet = true\n\t}\n",
"text": "Hello,There are two prefix can be used when connect to a mongodb, mongodb or mongodb+srv, when I check the source code at mongo-go-driver/connstring.go at master · mongodb/mongo-go-driver · GitHub, I found the code below:here SchemeMongoDBSRV is “mongodb+srv”.based on the code, my understanding is if I deploy the mongodb on other servers than the local machine, i.e. on another docker or physical machine, I should use “mongodb+srv” as prefix when connecting to the db.\nam I right?James",
"username": "Zhihong_GUO"
},
{
"code": "",
"text": "Hello, I am using mongo go driver to replacing the globalsign mgo. I use the same connection string in mgo as in mongo-go-driver. During running the test on local machine and gitlab CI, the connection are ok, all the test passed. But when I deploy my service on pre-production, I got error “unable to authenticate using mechanism “SCRAM-SHA-1”: (AuthenticationFailed) Authentication failed”. what I should check in my code and on the pre-production db?\nthanks,James",
"username": "Zhihong_GUO"
},
{
"code": "",
"text": "SRV is a way to specify a single hostname that resolves to multiple host names. When using SRV, the driver conducts an SRV lookup to get the actual names of all of the hosts. Also, when using SRV, the driver does lookups for TXT records, which can contain specific URI options to configure the driver. Using the “mongodb+srv” prefix tells the driver that you’ve set up all of the SRV records correctly and the URI you’re giving needs to be resolved before using. If you haven’t set all of this up, you shouldn’t be using that prefix.For the authentication error, I don’t remember anything about mgo’s URI parsing off the top of my head. Can you answer the following:– Divjot",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Hello Divjot,Thanks for the answer. It looks not only an issue in code, I have to rely on the configuration manager help to check the settings.Best,James",
"username": "Zhihong_GUO"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Mongodb or mongodb+srv | 2020-03-13T10:13:22.950Z | Mongodb or mongodb+srv | 31,302 |
null | [
"atlas-search"
] | [
{
"code": "",
"text": "Hello,\nI couldn’t find details on the consistency guarantees of Atlas FTS indexes. As far I can see, indexes are based on change streams. That means indexes are eventually consistent with actual documents and ‘$searchBeta’ operator does not provide read-after-write guarantees even in a single document transaction use cases.Is this a reasonably accurate description on how Lucene indexes on atlas work?",
"username": "neeleshs"
},
{
"code": "",
"text": "Hi Neelesh -Yes, Atlas Search indexes are based on change streams, and only provide eventual consistency.We currently have no plans to support a stronger level of consistency.",
"username": "Doug_Tarr"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Atlas FTS index consistency | 2020-03-11T18:48:41.842Z | Atlas FTS index consistency | 1,998 |
null | [
"data-modeling"
] | [
{
"code": "",
"text": "Hi! I’m currently working with big data that increases around 2 million documents each day. To increase the performance, we split the data to different collections, one collection for data in one day. The problem is we need to calculate sum in 90 days. Is there any convenient way to sum up values in 90 collections? Or is there any other way on how we design our database for this purpose?",
"username": "1119"
},
{
"code": "",
"text": "I would have used sharding in a case like that.See https://docs.mongodb.com/manual/sharding/",
"username": "steevej"
},
{
"code": "mongod",
"text": "How are you measuring the performance? And what benefit did you see by moving from a single huge collection to 90 smaller ones? I would expect to see worse performance overall due to the added overhead of merging results from these 90 collections.As Steeve says - you should consider sharding. Some people have had success in the past with “micro-sharding” i.e. creating multiple nodes per host to create large sharded clusters of say 16 shards, built on only 4 hosts by ensuring that each host shares primary nodes and that each shard RS is still distributed over 3 physical hosts. You need to be very careful to set cacheSizeGB correctly when co-locating multiple mongod processes per host.",
"username": "Nic"
},
{
"code": "sum{ _id: \"20200313_00\", values: [ 1, 2, 3, 4, 5 ... ] }{ _id: \"20200313_01\", values: [ 6, 7, 8, 9, 10 ... ] }valuessumdb.sum.updateOne( { _id: \"20200313_01\" }, { $push: { values: 11 } }, { upsert: true } )2020-03-13_id2020031301:0001sum$match$groupsum{ _id: \"20200313\", value: 1000 }{ _id: \"20200314\", value: 2000 }",
"text": "Hi @1119,Have you considered creating another collection to help with this? Check out Building with Patterns: A Summary, specifically the Computed Pattern. Instead of aggregating across 90 days worth of documents (potentially millions and millions of documents), it may be worth storing the values that need to be summed in another place.For example, consider creating a collection called sum that contains the following documents:\n{ _id: \"20200313_00\", values: [ 1, 2, 3, 4, 5 ... ] }\n{ _id: \"20200313_01\", values: [ 6, 7, 8, 9, 10 ... ] }When you insert a new value into your primary collection (the one you’re currently aggregating), perform another update statement using upsert. Simply append the most recent value to be summed to the values array in a single document within the sum collection.\ndb.sum.updateOne( { _id: \"20200313_01\" }, { $push: { values: 11 } }, { upsert: true } )Your application knows the current date (e.g. 2020-03-13 translates to _id of 20200313) and you know the current hour (e.g. 01:00 translates to 01), and you know the value that you’ll later sum. Keeping a document per hour allows storing lots of data compactly for easy summing.Later, when you need to retrieve the sum, aggregate on the sum collection, (1) $match a range based on the last 90 days, and (2) $group with sum to reduce array of values across all 90 days to a single value.You can make this even more scalable by simply storing the sum per day instead of the original values:\n{ _id: \"20200313\", value: 1000 }\n{ _id: \"20200314\", value: 2000 }You have many options using the computed pattern.Schema tricks like this are very scalable. Not having to aggregate across millions of documents at a time will likely even remove the need to shard.",
"username": "Justin"
},
{
"code": "",
"text": "Nice idea.Thanks for sharing. The Computed Pattern was a interesting read.I would be worry about the consistency. But may be doing both update in a transaction could fix that. I have not worked with transactions.An alternative might be to run a process at the end of the day that update the sum collection with the previous day’s data.That springs a couple of related ideas.",
"username": "steevej"
}
] | Is there any convenient way to sum up values in 90 collections? | 2020-03-12T04:14:11.675Z | Is there any convenient way to sum up values in 90 collections? | 2,385 |
null | [] | [
{
"code": "db.setLogLevel(2)db.setLogLevel(2, 'query')db.getLogComponents",
"text": "I wanted to change logging level to investigate some issue but I can’t find a way to actually do it :\nI tried a\ndb.setLogLevel(2)\nor\ndb.setLogLevel(2, 'query')\nin mongos but I did not see any change in the logs. No “D” messages.db.getLogComponents is consistant with my changes\nWhat did I miss ?",
"username": "RemiJ"
},
{
"code": "db.setLogLevel(5,\"query\")db.setLogLevel(0)",
"text": "Hi Remi,Welcome to MongoDB Community!On the mongos, try running db.setLogLevel(5,\"query\").There may not be much to see at the mongos, most of the query work occurs on the shards.As soon as you’re confident that the log contains the information you are looking for, you can turn off debug logging by running the command db.setLogLevel(0) . There’s no need to have debugging enabled for the entire load test. Verify that the debug info is no longer being written to the log.Let us know how it goes.Regards,\nMartin",
"username": "Martin"
},
{
"code": "",
"text": "Hi Martin,Do you mean that the logLevel set at the mongos> prompt only affects the mongos I am connected to and that it won’t change the logging level of the shards ? That would explain why I don’t see any difference…Best regards.",
"username": "RemiJ"
}
] | Cannot change logging level | 2020-03-12T10:22:08.829Z | Cannot change logging level | 1,782 |
null | [] | [
{
"code": "",
"text": "Hello,\nI’m Neelesh, CTO, Syncari. We are a distributed SaaS data platform for managing customer and revenue data and use MongoDB Atlas very heavily.Excited to be part of the community!",
"username": "neeleshs"
},
{
"code": "",
"text": "Hello @neeleshs! Welcome to the community ",
"username": "Jamie"
},
{
"code": "",
"text": " Hi @neeleshs\nWelcome to the community! It looks like an interesting product you guys are building!\nMichael",
"username": "michael_hoeller"
}
] | Hello from Syncari | 2020-03-11T18:48:34.380Z | Hello from Syncari | 2,139 |
null | [
"server",
"release-candidate"
] | [
{
"code": "",
"text": "MongoDB 4.0.17-rc0 is out and is ready for testing. This is a release candidate containing only fixes since 4.0.16. The next stable release 4.0.17 will be a recommended upgrade for all 4.0 users.Fixed in this release:4.0 Release Notes | All Issues | DownloadsAs always, please let us know of any issues.– The MongoDB Team",
"username": "Luke_Chen"
},
{
"code": "",
"text": "",
"username": "Stennie_X"
}
] | MongoDB 4.0.17-rc0 is released | 2020-03-13T02:25:06.380Z | MongoDB 4.0.17-rc0 is released | 2,174 |
null | [
"mongoid-odm"
] | [
{
"code": "reconnectTries",
"text": "Happened now a few times that the development docker running Rails /w Mongoid lost connection to the Cloud MongoDB and doesn’t ever re-connect.\nHave to restart the Phusion-Passenger app to get it back.\nWhile Googling i found menation of reconnectTries being at a default of 30. I would rather bump that up by some order of magnitudes. But passing that as an URL option or setting in the mongoid.yml options Hash does not work, only yelling about unknown settings.So how can i properly configure mongoid so it will always reconnect no matter what?",
"username": "Ralf_Vitasek"
},
{
"code": "",
"text": "Hi @Ralf_Vitasek, welcome!Happened now a few times that the development docker running Rails /w Mongoid lost connection to the Cloud MongoDB and doesn’t ever re-connect.Do you know what causes the disconnection to begin with ? Please provide log entries from the application and also from the MongoDB server.Regards,\nWan.",
"username": "wan"
}
] | How to configure `reconnectTries` with Mongoid? | 2020-03-04T11:24:45.642Z | How to configure `reconnectTries` with Mongoid? | 3,974 |
null | [
"monitoring"
] | [
{
"code": "",
"text": "we have 2 requirements:How to create report with Mongotop and mongostat generating report either excel sheet or csv file and also those reports export into emails.Hour wise Read and write ,connections create one report this one also export into emails like below table sample format.| Date/Time | Reads| writes| Connections|\n|03/05/2020:1:28:00| 1 | 0 | 0|\n|03/05/2020:1:40:00| 2 | 5 | 9|can you please anybody know how to do above requirement script or available commands in mongoDB please let me know.",
"username": "hari_dba"
},
{
"code": "mongotopmongostatmongotopmongostat--jsonserverStatusopcountersmongod",
"text": "@hari_dba A more typical approach would be to use a monitoring platform to collect and chart ongoing metrics. For example, MongoDB Cloud Manager for detailed metrics or Free Monitoring for basic metrics. Trends in metrics are generally better visualised in charts rather than tabular text format.If you want to write something custom for metric collection, refer to Database Commands Used by Monitoring in Cloud Manager for relevant diagnostic & administrative database commands. These are the same commands used by monitoring agents and tools like mongotop/mongostat.Alternatively, you could write a cron saving the output of mongotop & mongostat (perhaps using the --json option).A downside of using command line tools is that you will be sampling at a certain rate (for example, an interval in seconds). If you use database commands like serverStatus you can get counters that will allow you to derive a better picture of activity over time. For example, opcounters are a cumulative count of database operations by type since the mongod instance last started.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "Hi Stennie,Thanks for reply…Can you please provide script for output of Mongotop and Mongostat to .cs file or excel format and also that output will be export into direct emails, if it possible please tell me that solutionHow to take output mongotop and mongostat by using --json option this option display output horizontal wise and how to generate into .CS file excel sheet export into email.",
"username": "hari_dba"
},
{
"code": "",
"text": "Can you please provide script for output of Mongotop and Mongostat to .cs file or excel format and also that output will be export into direct emails, if it possible please tell me that solutionIf this is your preferred approach, you’ll have to find or create a solution.If your goal is metrics & monitoring, I’d recommend using an existing monitoring solution such as MongoDB Cloud Manager or Free Monitoring.How to take output mongotop and mongostat by using --json option this option display output horizontal wise and how to generate into .CS file excel sheet export into email.JSON output will give you structured documents that you can save or manipulate programatically. Writing a program to work with JSON and Excel files or email is not specific to MongoDB. You need to look into available libraries and tooling available in your preferred programming language.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "node --version\nmongotop --version\ncurl -o mongotopx https://raw.githubusercontent.com/anars/mongodb-dba-tools/master/build/mongotopx\nchmod +x mongotopx\n./mongotopx --help\n--csv --delimiter=\\|./mongotopx --authenticationDatabase=admin -u admin -p admin --csv --delimiter=\\|\ntimestamp|admin.system.users (total time)|admin.system.users (total count)|admin.system.users (read time)|admin.system.users (read count)|admin.system.users (write time)|admin.system.users (write count)\n2020-03-12T16:49:35.262468992Z|0|0|0|0|0|0\n2020-03-12T16:49:36.267576976Z|0|0|0|0|0|0\n2020-03-12T16:49:37.269237744Z|0|0|0|0|0|0\n2020-03-12T16:49:38.271095919Z|0|0|0|0|0|0\n",
"text": "I believe @Stennie_X answered your question perfectly and showed you the best solution, but I also believe there are so many people out there like “you”, don’t know how to process JSON.So, here we go; I wrote a script extends mongotop, and filter/format output of it; check it out at GitHub - anars/mongodb-dba-tools: MongoDB Database Administrators' ToolsHow to install : (these instructions for linux and macos)First of all, make sure you have NodeJS is install and up-to-date on your system. to check type;Secondly, make sure you have MongoTop is install and up-to-date on your system. to check type;then, download the scriptChange the permissionand finally typefor parametersIn your case, same parameters you used for mongotop, and add these 2 parameters --csv --delimiter=\\|for example;and output will be something like thisp.s. I will document other feature later this weekend, and put to github. And (if have a time) I may implement a script for mongostat.",
"username": "coderkid"
}
] | MongoDB create report for hour wise | 2020-03-05T08:06:01.187Z | MongoDB create report for hour wise | 2,696 |
null | [
"aggregation"
] | [
{
"code": "[\n // Find all tester events in current month\n {\n $match: {\n 'user.role': 'tester',\n datetime: {\n $gte: moment().startOf('month').toDate(),\n $lte: moment().endOf('month').toDate(),\n },\n },\n },\n // Group by tester + event type and project the total\n {\n $group: {\n _id: { testerId: '$user._id', eventId: '$eventId' },\n total: { $sum: 1 },\n },\n },\n]\n[\n { \n _id: [user _id],\n user: [whole $user object],\n events: {\n 'login': 10,\n 'test-completed': 3,\n 'password-reset': 1\n }\n }\n]\n",
"text": "Hey all,I’m working on grouping and totalling log events (group by user and total of events of each type for each user). I’ve made some progress but I could use some pointers to help me get the results I’m looking for.My code so farThis is giving me the user Id and event type with total for each event type, but all at the “top level” and not as I intend, which would be something like this:I hope that this makes sense, if I’m not making sense please let me know and I’ll try to clarify.Thanks for any help you can offer ",
"username": "Perry_Trinier"
},
{
"code": "db.test.drop();\ndb.test.insertMany([\n{user: {_id: '123', role: 'tester'}, datetime: '2020-01-01T00:00:00', eventId: 'login'},\n{user: {_id: '123', role: 'tester'}, datetime: '2020-01-01T01:00:00', eventId: 'test-completed'},\n{user: {_id: '456', role: 'tester'}, datetime: '2020-01-01T02:00:00', eventId: 'login'},\n{user: {_id: '123', role: 'tester'}, datetime: '2020-01-01T03:00:00', eventId: 'password-reset'},\n{user: {_id: '123', role: 'tester'}, datetime: '2020-01-01T04:00:00', eventId: 'login'}\n])\ndb.test.aggregate([\n {\n $match: {\n 'user.role': 'tester',\n datetime: {\n $gte: '2020-01-01T00:00:00',\n $lte: '2020-02-01T00:00:00',\n },\n },\n },\n {\n $group: {\n _id: { testerId: '$user._id', eventId: '$eventId' },\n total: { $sum: 1 },\n },\n },\n]);\ndb.test.aggregate([\n {\n $match: {\n 'user.role': 'tester',\n datetime: {\n $gte: '2020-01-01T00:00:00',\n $lte: '2020-02-01T00:00:00',\n },\n },\n },\n {\n $group: {\n _id: { testerId: '$user._id', eventId: '$eventId' },\n total: { $sum: 1 },\n },\n },\n {\n $group: {\n _id: '$_id.testerId',\n events: {$push: {eventId: '$_id.eventId', total: {$sum: '$total'}}}\n },\n },\n]);\n",
"text": "It would be easier if you include actual raw data, as well as syntactically correct data that isn’t dependent on third party libraries (e.g. moment). This way, it can just be easily pasted into any environment and it’s a ton easier to help. I assume it’s something likeAnd you are doingIf you want these results to be grouped, just do another group:",
"username": "nefiga"
},
{
"code": "$push",
"text": "Hi,Yes your assumptions were correct, but I’ll be clearer about the data I’m dealing with in the future I have a lot to learn to really be comfortable with getting the data out of Mongo that I want - the $push and double-grouping stuff just didn’t even occur to me to be something I would want to reach for, so thanks for the example!",
"username": "Perry_Trinier"
}
] | Need some assistance grouping and totalling in an aggregation | 2020-03-11T17:35:42.674Z | Need some assistance grouping and totalling in an aggregation | 1,512 |
null | [
"java",
"production"
] | [
{
"code": "scala-driver",
"text": "Version 1.13.1 of the MongoDB Reactive Streams Driver has been released. Please review the changelog notes at https://mongodb.github.io/mongo-java-driver-reactivestreams/1.13/. This is a patch release and a recommended upgrade.Please feel free to post any questions on the MongoDB Community forum in the Drivers, ODMs, and Connectors category tagged with scala-driver. Bug reports should be filed against the JAVA project in the MongoDB JIRA.The JVM Drivers team",
"username": "Ross_Lawley"
},
{
"code": "",
"text": "",
"username": "system"
}
] | MongoDB Reactive Streams Driver 1.13.1 released | 2020-03-12T17:31:51.505Z | MongoDB Reactive Streams Driver 1.13.1 released | 1,813 |
null | [
"production",
"scala"
] | [
{
"code": "scala-driver",
"text": "Version 2.9.0 of the MongoDB Scala Driver has been released. Please review the changelog notes at https://mongodb.github.io/mongo-scala-driver/2.9/changelog/#2-9-0.Please feel free to post any questions on the MongoDB Community forum in the Drivers, ODMs, and Connectors category tagged with scala-driver. Bug reports should be filed against the JAVA project in the MongoDB JIRA.The JVM Drivers team",
"username": "Ross_Lawley"
},
{
"code": "",
"text": "",
"username": "system"
}
] | MongoDB Scala driver 2.9.0 released | 2020-03-12T17:28:39.381Z | MongoDB Scala driver 2.9.0 released | 3,155 |
[
"indexes",
"performance"
] | [
{
"code": "",
"text": "I did some tests to create index on MongoDB. My goal is to understand how to shorten the build index time. I was thinking to change “maxIndexBuildMemoryUsageMegabytes” value to allow mongod to use more RAM to build index.I did the following tests and got some interesting results:\nTest #1\nVM memory: 2GB\nDocument size: 100 million docs (7.7 GB data size)\nmaxIndexBuildMemoryUsageMegabytes: 500 MB\nTook 2269 sec to build index.Test #2\nChanged maxIndexBuildMemoryUsageMegabytes to 800 MB\nTook 1865 sec to build index.\nThis is what I expected.Test #3\nIncrease the document size to 150 million (11.642 GB db size)\nmaxIndexBuildMemoryUsageMegabytes: 500 MB\nTook 6085 (1.69 hrs) to build indexTest #4\nSame as test #3 but changed maxIndexBuildMemoryUsageMegabytes to 800 MB\nTook 26315 (7.3 hrs) to build index.\nThis is NOT what I’m expected. After the index built, used swap is 477 MB.Then I tried on another VM will more memory and larger document size.Test #5\nVM memory: 4GB\nDocument size: 200 million docs (15 GB data size)\nmaxIndexBuildMemoryUsageMegabytes: 500 MB\nTook 15032 sec to build index.Test #6\nSame as test #5 but changed maxIndexBuildMemoryUsageMegabytes to 1 GB.\nTook 15053 sec to build index.\nIt didn’t shorten the build index time.Then I increase the document size to 400 million docs (37 GB data size).\nI have tried both 500 MB and 1 GB on maxIndexBuildMemoryUsageMegabytes value.\nThe build index time are exactly the same.My question is:What should I set for maxIndexBuildMemoryUsageMegabytes? I know it is depended on memory size vs data size and other factors.\nWhat is the good ratio that I can maximize the memory usage on building index but not fall into using swap. (swappiness is set to 1 already).Any other ideas on how to increase build index performance?Thanks!",
"username": "Frank_Hui"
},
{
"code": "",
"text": "With so little RAM compared to the data size I suspect disk I/O is the bottleneck.Test #4 seems to corroborate that. Giving more RAM for the index build means less RAM for the working set which means more disk I/O.The above in my untested opinion.",
"username": "steevej"
}
] | How to increase create index build performance? | 2020-03-12T11:47:09.341Z | How to increase create index build performance? | 5,153 |
|
null | [
"aggregation"
] | [
{
"code": "",
"text": "hi,i have this different object that i query\n{id: 1}\n{id: 2}\n{id: 3, stickyIndex:2}currently i am querying it to get all the objects and then sort them by stickyIndex first, then by id:\ni get in return {3, 1, 2}however i want the stickyIndex to change the order and kind of splice the array of result and push it to the second position.i want to get this result {1, 3, 2}\nis there a way to get this order with one aggregate query (and not with manipulating the data after i get it back in the server side)?",
"username": "Adi_Mor"
},
{
"code": "db.test.drop();\ndb.test.insert({a: 1});\ndb.test.insert({a: 2});\ndb.test.insert({a: 3, stickyIndex: 2});\ndb.test.aggregate([{$addFields: {stickyIndex: {$ifNull: ['$stickyIndex', '$a']}}}]);\n db.test.aggregate([{$addFields: {stickyIndex: {$ifNull: ['$stickyIndex', '$a']}}}, {$sort: {'stickyIndex': 1}}]);\na",
"text": "i want the stickyIndex to change the order and kind of splice the array of result and push it to the second positTo manipulate data interpretation like this, you will need to use the aggregation framework.\nAs a first step, examine the output of:You can see that using $ifNull is a way to get what you want. Then, to sort:If you want to ensure the ordering of a stickyIndex that also has a duplicate a field, then you could use $add to differentiate the null field, or utilize an alternate name for the addFields operation, and sort by both fields.",
"username": "nefiga"
},
{
"code": "",
"text": "thanks for the quick answer. however it is a bit more complicated from that.actually i have data set like this:\n{id: 1, date: ISODate(“2018-01-03T13:45:38.909Z”)}\n{id: 2, date: ISODate(“2018-01-02T13:45:38.909Z”)}\n{id: 3, stickyIndex:2, date: ISODate(“2018-01-06T13:45:38.909Z”)}i want to get it all and then sort it by this order:so the result should be:\n{id: 1, id: 3, id: 2}is there a way to get this? (it isn’t the same measurement the stickyIndex and the date variables)",
"username": "Adi_Mor"
},
{
"code": "stickyIndex_id",
"text": "In the sample collection you are showing three documents. What if there are four or more documents; and what would be the stickyIndex's document _id's position?",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "let’s take a more complicated data set and i will try to explain again the logic.{id: 1, date: ISODate(“2018-01-03T13:45:38.909Z”)}\n{id: 2, date: ISODate(“2018-01-02T13:45:38.909Z”)}\n{id: 3, stickyIndex:4, date: ISODate(“2018-01-06T13:45:38.909Z”)}\n{id: 4, stickyIndex:2, date: ISODate(“2018-01-01T13:45:38.909Z”)}\n{id: 5, date: ISODate(“2018-01-01T13:45:38.909Z”)}what i want it to do is:after this query i am supposed to get this result set:\n{1, 4, 2, 3, 5} - each one represnts its id.",
"username": "Adi_Mor"
}
] | One aggregate query that changes the position of the results based on an attribute | 2020-03-11T18:49:07.536Z | One aggregate query that changes the position of the results based on an attribute | 3,027 |
null | [
"sharding"
] | [
{
"code": "",
"text": "I am having trouble finding documentation on where exactly cursors are stored in a sharded cluster, and would like to better understand how cursors work internally.Main reason for this being that there is a decision in the company I work at to couple 1 mongos instance with each application instance, the reason stated being that in a sharded cluster, cursors are stored on the mongos processes in memory. I have, however, come across sharded cluster deployments that do not have the same architecture - only having a few mongos processes running on larger machines to allow more client applications to connect to them.Is there any definitive way to find out this information? I’ve searched online for documentation, but have come up short.",
"username": "George_Kontridze"
},
{
"code": "mongosmongosmongosmongosmongosmongos",
"text": "HI @George_Kontridze, welcome!Main reason for this being that there is a decision in the company I work at to couple 1 mongos instance with each application instance, the reason stated being that in a sharded cluster, cursors are stored on the mongos processes in memoryIt is a common pattern to place a mongos on each application server. However, the reason for deploying one mongos router on each application server is to reduce network latency between the application and the router.If you have many application clients however, the architecture above may not be suitable depending on the case. This is because mongos routers communicate frequently with the config servers, and too many mongos may affect the performance of the config servers.Another reason to host mongos on a dedicated instance is to avoid memory contention between the application client and the mongos, especially if the application client is memory hungry. As you can see it really depends on the use case, but it’s not related to cursors memory.I would recommend reviewing the following resources:Regards,\nWan.",
"username": "wan"
}
] | Where are cursors stored? | 2020-03-05T00:30:24.801Z | Where are cursors stored? | 1,907 |
null | [
"node-js",
"production"
] | [
{
"code": "hasNexthasNexttopologyDescriptionChangedServerDescriptionlastUpdateTimelastWriteDateServerDescriptionTimeout_calledundefinedpromiseLibrarymaybePromisePromise",
"text": "The MongoDB Node.js team is pleased to announce version 3.5.5 of the driver@peterbroadhurst helped point out a regression introduced in v3.5.4 where using hasNext\non a cusor with a limit would not return the full set of results.A change introduced across all MongoDB drivers, and in particular v3.5.0 of the Node.js\ndriver, attempted to prevent needless duplicate topologyDescriptionChanged topology events\nby introducing a ServerDescription equality operator. Since equality does not take the\nlastUpdateTime and lastWriteDate fields of an ismaster into account, the driver could\neventually consider servers non-suitable for server selection, since they would fall out\nof the latency window.\nAll updates are considered viable for topology updates now, and only event emission is\ngated by ServerDescription equality.The legacy topology types (in particular if you were connected to a replic set) used a\ncustom Timeout class to wrap a timer. Unfortunately, the class depended on an undocumented,\nprivate variable _called, which was removed in Node.js v12. This would lead to the driver\nthinking the timeout never occurred, and therefore never releasing the object for garbage\ncollection. We recommend users of the legacy topology types immediately update to this\nversion of the driver, or use the Unified Topology which is unaffected by this bug.@erfanium and @Paic helped us identify an issue in rare failover events where multiple\nrequests to process the server selection queue would result in an attempted property\naccess of an undefined variable.@tobyealden pointed out that an internal refactor to use a helper to optionally\nreturn a Promise for top level API methods was not, in fact, using a custom\npromise library if one was provided!Reference: MongoDB Node.js Driver\nAPI: Index\nChangelog: node-mongodb-native/HISTORY.md at 3.5 · mongodb/node-mongodb-native · GitHubWe invite you to try the driver immediately, and report any issues to the NODE project.Thanks to all who contributed to this release!The MongoDB Node.js team",
"username": "mbroadst"
},
{
"code": "",
"text": "",
"username": "system"
}
] | MongoDB Node.js Driver 3.5.5 Released | 2020-03-11T20:40:30.661Z | MongoDB Node.js Driver 3.5.5 Released | 2,148 |
null | [
"stitch"
] | [
{
"code": "exports = async function(){\n\n const mongodb = context.services.get(\"mongodb-atlas\");\n const collection = mongodb.db(\"mydatabase\").collection(\"mycollection\");\n \n var result = {};\n var resultJson = {};\n \n try { result = await collection.find({}, {_id:1})\n \n resultJson = JSON.stringify(result);\n\n }\n catch (e){\n console.log(e);\n }\n\n console.log('res', resultJson)\n\n return result;\n};\n",
"text": "In my function below I am trying to get all the ObjectIDs in ‘mycollection’. Since the find function returns an EJSON object when I log the output I just see ‘{ }’. I cannot manage to output the ObjectIDs, can anyone point me in the right direction please?\n(I know the result is not empty by the way, as it outputs the correct JSON-looking object in the console)",
"username": "Daniel_Gold"
},
{
"code": "toArray()result = await collection.find({}, {_id:1}).toArray();\nresult = await collection.findOne({}, {_id:1});\n",
"text": "Hi @Daniel_Gold,Since the find function returns an EJSON object when I log the output I just see ‘{ }’The collection.find() returns RemoteMongoReadOperation. You need to call its methods to return Promise object. For example toArray():This is slightly different than collection.findOne(), which returns Promise for the resulting document. i.e.Regards,\nWan.",
"username": "wan"
},
{
"code": "",
"text": "Thanks very much that’s really helpful - saved me a lot of time!",
"username": "Daniel_Gold"
}
] | Stitch parse EJSON result from find() | 2020-03-10T22:25:26.764Z | Stitch parse EJSON result from find() | 2,636 |
null | [
"java"
] | [
{
"code": "",
"text": "Hello,\nThere is not enough information available on documentation about “GROUP By” in JAVA language.You should provide some better example and brief explanation about all Aggregation operation so that everyone can understand easily.\nWe can use GROUP by with DBObject also but in GROUPOPERATION method, It is fatigue task to do as it is not easily understandable.\nI hope, You may see this request and take immediate action.Thank you…",
"username": "Prem_Parmar"
},
{
"code": "",
"text": "One thing that helped me a lot with aggregation is the course M121 - The MongoDB Aggregation Framework from https://university.mongodb.com/I also use MongoDB Compass | MongoDB to built my pipelines.Finally, since I have projects in Java, some in nodejs and use the shell on daily basis, I keep my aggregation pipelines in json, even in Java where I do not use the aggregation builder classes.",
"username": "steevej"
},
{
"code": "",
"text": "I second @steevej on both M121 and compass.",
"username": "chris"
},
{
"code": "groupGroupOperation",
"text": "You can try the newer (and easy to use) com.mongodb.client.model.Aggregates aggregation pipeline builder class’s group methods. Also see Aggregation Tutorials and Example usage of Aggregates.As of Java driver v3.9 GroupOperation is deprecated.Compass’s Aggregation Pipeline Builder has Export Pipeline to Specific Language feature, and this allows the built pipeline to be converted to corresponding Java code.",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "In Java, for same DTO above bold query works fine. Can you please give me reason behind this and also how this fields works?collection’s document is :\n\nScreenshot from 2020-03-11 12-16-201600×900 278 KB\n",
"username": "Prem_Parmar"
}
] | Aggregation with JAVA | 2020-03-11T10:43:04.060Z | Aggregation with JAVA | 2,791 |
null | [
"data-modeling"
] | [
{
"code": "{\n \"workspace_id\" : 1\n \"attributes\" : {\n \"first_name\" : \"John\",\n \"last_name\" : \"Doe\",\n \"email\" : \"[email protected]\",\n \"phone_number\" : \"+1234567890\",\n \"gender\" : \"Male\",\n \"location\" : \"London\"\n }\n}\ndb.clients.find({ \"workspace_id\": 1, \"attributes.first_name\": \"John\", \"attributes.gender\": \"Male\" })db.clients.find({ \"attributes.first_name\": \"John\", \"attributes.gender\": \"Male\" }){\n \"workspace_id\" : 1,\n \"attributes\" : [\n { k: \"first_name\", v: \"John\" }\n { k: \"last_name\", v: \"Doe\" }\n { k: \"email\", v: \"[email protected]\" }\n { k: \"phone\", v: \"+1234567890\" }\n { k: \"gender\", v: \"Male\" }\n { k: \"location\", v: \"London\" }\n ]\n}\n",
"text": "Hi everyone,My users are able to insert clients which have different attributes/properties. Basically, it’s totally unpredictable.Additionally, users are able to create multiple workspaces so they have some clients in one workspace, some in another (in case they have multiple projects, for example).My document looks like this:The first question how to structure indexes.If I create:then it seems to be slow when I query something like this:db.clients.find({ \"workspace_id\": 1, \"attributes.first_name\": \"John\", \"attributes.gender\": \"Male\" })However, it’s fast to query something like this:db.clients.find({ \"attributes.first_name\": \"John\", \"attributes.gender\": \"Male\" })As you can see, it’s getting slow when we mix workspace_id and attributes. Note that wildcard index and compound index don’t go together.Here is what could be an alternative:My tests shows that this is much faster when I mix workspace_id and attributes (which is always) because I added compound index to workspace_id, k and v.The FIRST question is: Did we choose the right index?Next, I would like to know what should we take for sharding?I mean, we’re not ready yet to do sharding (nor we need it), but since we’re designing a scheme, then it could be useful to consider it for the future.The SECOND question is which sharding key should we choose for our data?Next, we want to push different events to clients. We can create a seperate collection (events) or we can embed (which is risky for reaching out 16 MB).We would like to go with events collection and then use lookup in clients. How about performance if we are talking about millions of events and millions of clients.The THIRD question is what is your experience with lookup?Thank you and sorry if there are so many questions asked.",
"username": "jellyx"
},
{
"code": "db.clients.find({ \"workspace_id\": 1, \"attributes.first_name\": \"John\", \"attributes.gender\": \"Male\" })workspace_idattributesworkspace_idattributesattributeskvworkspace_idkv",
"text": "Some thoughts.The FIRST question is: Did we choose the right index?db.clients.find({ \"workspace_id\": 1, \"attributes.first_name\": \"John\", \"attributes.gender\": \"Male\" })For the above query to effectively use the index on workspace_id and attributes (its fields) you have to have a compound index with workspace_id and the attributes’s fields.The attributes’s fields can be individual fields or as specified by the attributes’s k and v fields (as per the Attribute Design Pattern).It looks like the compound index on workspace_id, k and v is the right choice.NOTE: Wild card indexes have these following restrictions: (1) You cannot shard a collection using a wildcard index, and (2) You cannot create a compound index.Next, I would like to know what should we take for sharding?Sharding is about distributing data evenly across multiple servers (or shards); it is horizontal scaling.This is based upon your application requirements, mainly, the large amount of collection data, its distribution and performance accessing it. The queries that access the data often and important (fast access) are among the considerations. Shard key plays an important factor in these.We would like to go with events collection and then use lookup in clients. How about performance if we are talking about millions of events and millions of clients.It looks like your data has clients with multiple events - a one-to-many relationship. How many events per client? What kind of queries happen with this data? These are the factors to make a decision about embedding vs referencing.The post 6 Rules of Thumb for MongoDB Schema Design: Part 1 has useful discussion on “How do I model a one-to-N relationship?”.Thank you and sorry if there are so many questions asked.Indeed, quite a few questions and covering aspects of design and development; interesting ",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "Sharding is about distributing data evenly across multiple servers (or shards); it is horizontal scaling.This is based upon your application requirements, mainly, the large amount of collection data, its distribution and performance accessing it. The queries that access the data often and important (fast access) are among the considerations. Shard key plays an important factor in these.First of all, thanks for the answer!Well yes, I understand what is sharding, nodes, cluster, etc. But I do wonder what should I set as a sharding key because of my data structure.I understand that data needs to be evenly distributed. It makes no sense to set “gender” as a sharding key, for example.Since my data has dynamic attributes, I’m not sure which sharding key I should set. I mean, that’s for later…, but still just curious.We would like to go with events collection and then use lookup in clients. How about performance if we are talking about millions of events and millions of clients.It looks like your data has clients with multiple events - a one-to-many relationship. How many events per client? What kind of queries happen with this data? These are the factors to make a decision about embedding vs referencing.The post 6 Rules of Thumb for MongoDB Schema Design: Part 1 has useful discussion on “How do I model a one-to-N relationship?”.Yes, I read that post. It’s useful. However, I figured out:Option 1:Embed events into clients and then I have a limit of 16 MB which might be exceeded for some customers.Option 2:If I separate collections then I can store unlimited events, but then I have problems with lookup because it doesn’t support sharding (from collection).That means I need to do two queries (clients, events) and then I should intersect two results.Note that I also have problem with grouping events by clients because $group doesn’t support indexes and it’s slower.Since there is a lot of computations, all of this could lead to Apache Spark. There is an Apache connector: Connector For Apache Spark | MongoDBWhat do you think?Cheers!",
"username": "jellyx"
}
] | Dynamic attributes, sharding and embedding vs separate collection | 2020-03-09T22:18:02.564Z | Dynamic attributes, sharding and embedding vs separate collection | 2,041 |
null | [
"python"
] | [
{
"code": "",
"text": "HI! I have a problem with Pymongo.I installed trough pip but when I try run the Python script , launch this error.\nTraceback (most recent call last):\nFile “C:\\Users\\Dell\\Desktop\\Python\\app2.py”, line 1, in \nimport pymongo\nModuleNotFoundError: No module named ‘pymongo’Thanks!!!",
"username": "Sebastian_Prolo"
},
{
"code": "",
"text": "Not too familiar with python on windows.When this happens to me on linux it is usually that I have installed the module into the wrong environment.i.e. Installed into python2 but app is invoking python3. Or I have installed globally instead of the virtualenv.Its a python thing, not mongo.",
"username": "chris"
}
] | Pymongo and Mongo db | 2020-03-11T10:42:31.245Z | Pymongo and Mongo db | 2,470 |
null | [
"production",
"php"
] | [
{
"code": "pecl install mongodb\npecl upgrade mongodb\n",
"text": "The PHP team is happy to announce that version 1.7.4 of the mongodb PHP extension is now available on PECL.Release HighlightsThis release fixes a compilation issues when using an SSL library installed in a non-standard directory.A complete list of resolved issues in this release may be found at: Release Notes - MongoDB JiraDocumentationDocumentation is available on PHP.net:\nPHP: MongoDB - ManualFeedbackWe would appreciate any feedback you might have on the project:\nhttps://jira.mongodb.org/secure/CreateIssue.jspa?pid=12484&issuetype=6InstallationYou can either download and install the source manually, or you can install the extension with:or update with:Windows binaries are available on PECL:\nhttp://pecl.php.net/package/mongodb",
"username": "Andreas_Braun"
},
{
"code": "",
"text": "",
"username": "system"
}
] | MongoDB PHP Extension 1.7.4 released | 2020-03-11T12:44:01.215Z | MongoDB PHP Extension 1.7.4 released | 3,090 |
null | [
"backup"
] | [
{
"code": "",
"text": "please advice how can i restore database into a new database.",
"username": "Lital_ez"
},
{
"code": "mongodumpmongorestore",
"text": "@Lital_ez Welcome to the forums! Can you please clarify the specific versions of MongoDB server you are trying to restore from and to? There is no MongoDB 2.9 server release, and the oldest version of the server that isn’t currently end-of-life is the 3.6 release series.The general approach for backing up and restoring a single database is using mongodump and mongorestore as per Back Up and Restore with MongoDB Tools.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "ops, version 3.2.9 restore database from gz archive into different database on the same server",
"username": "Lital_ez"
},
{
"code": "-d / --db--gzip--archive",
"text": "The options you are looking for are:\n-d / --db for the database to restore into.\n--gzip For the gz\n--archive For the archive formatExample on the mongorestore documentation.",
"username": "chris"
}
] | Restore database in version 2.9 | 2020-03-08T10:18:06.365Z | Restore database in version 2.9 | 1,839 |
null | [
"connector-for-bi"
] | [
{
"code": "mongodb:\n versionCompatibility:xxx\n net:\n uri: xxx\n auth:\n username: xxx\n password: xxx\n source: xxxx\n mechanism: xxxx\n",
"text": "I downloaded the BI connector and in mongosqld.conf file, need to allow access to multiple ldap users instead of single user.Is there any other way to provide access to multiple users to DBVisualizer client tool",
"username": "Vallikannu_Arumugam"
},
{
"code": "mongosqldnet.authsecurity",
"text": "Welcome @Vallikannu_Arumugam,The details in mongosqld’s net.auth configuration are for Admin Authentication to a MongoDB deployment. These credentials must provide read-only access to the the superset of data you wish to query via the connector.User Authentication is configured in the security section of the configuration file. See Security Options for more details on available configuration settings.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "gracePLAIN",
"text": "Thanks for the help! In this document, https://docs.mongodb.com/bi-connector/master/authentication/#bi-short-authentication under Client Authentication for LDAP, I see an example like this:\nTo authenticate as user grace with the LDAP ( PLAIN ) authentication mechanism, write the username in this format:\ngrace?mechanism=PLAIN&source=$external\nSo, where this should be added, in configuration file under security or in other place? Is it possible to authenticate multiple LDAP users by using this same format or only single user?",
"username": "Vallikannu_Arumugam"
}
] | Need to allow multiple ldap users to access DBvisualizer client tool | 2020-03-10T19:51:16.503Z | Need to allow multiple ldap users to access DBvisualizer client tool | 2,137 |
null | [] | [
{
"code": "",
"text": "Hi Team,Can Collation can be used while creating Database in MongoDB?Just verified in MongoDB Websites, I am not able to find. If Collation allowed with different language, Kindly assist with sample/Syntax for the same.Regards,\nJay",
"username": "Jayaprakash_Nagappan"
},
{
"code": "",
"text": "You may find the following helpful.",
"username": "steevej"
},
{
"code": "db.createCollection()mongo",
"text": "Can Collation can be used while creating Database in MongoDB?Collation can currently only be specified for a collection, view, index, or operation that supports collation.Databases are created implicitly when you first store data for that database. As at MongoDB 4.2, there is no API to set database-level defaults such as collation.Collections are typically also created implicitly, but an empty collection can be created with specific options such as collation and compression. See the db.createCollection() documentation for a reference on collection options using the mongo shell. You can also pass equivalent options using your MongoDB driver.There is also a relevant feature request you can watch/upvote in the MongoDB issue tracker: SERVER-28362: Default collation for database.Regards,\nStennie",
"username": "Stennie_X"
}
] | Can collation can be used while creating a database in MongoDB? | 2020-03-10T05:23:11.121Z | Can collation can be used while creating a database in MongoDB? | 1,345 |
null | [
"atlas"
] | [
{
"code": "***aborting after invariant() failure\n\n\n2020-03-09T15:02:13.689+0000 F - [conn316] Got signal: 6 (Aborted).\n 0x55ca8cd9c541 0x55ca8cd9b759 0x55ca8cd9bc3d 0x7f24d60735f0 0x7f24d5ccc337 0x7f24d5ccda28 0x55ca8b29e516 0x55ca8b6c095a 0x55ca8bad6698 0x55ca8b93d4bb 0x55ca8c7d88e6 0x55ca8c7df2f9 0x55ca8b3dfd6e 0x55ca8b3e1c69 0x55ca8b3e2bb1 0x55ca8b3ce85a 0x55ca8b3daf8a 0x55ca8b3d61e7 0x55ca8b3d9a01 0x55ca8c5c86f2 0x55ca8b3d43d0 0x55ca8b3d7515 0x55ca8b3d5927 0x55ca8b3d626d 0x55ca8b3d9a01 0x55ca8c5c8c55 0x55ca8ccf39d4 0x7f24d606be65 0x7f24d5d9488d\n----- BEGIN BACKTRACE -----\n{\"backtrace\":[{\"b\":\"55CA8A84D000\",\"o\":\"254F541\",\"s\":\"_ZN5mongo15printStackTraceERSo\"},{\"b\":\"55CA8A84D000\",\"o\":\"254E759\"},{\"b\":\"55CA8A84D000\",\"o\":\"254EC3D\"},{\"b\":\"7F24D6064000\",\"o\":\"F5F0\"},{\"b\":\"7F24D5C96000\",\"o\":\"36337\",\"s\":\"gsignal\"},{\"b\":\"7F24D5C96000\",\"o\":\"37A28\",\"s\":\"abort\"},{\"b\":\"55CA8A84D000\",\"o\":\"A51516\",\"s\":\"_ZN5mongo24invariantOKFailedWithMsgEPKcRKNS_6StatusERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES1_j\"},{\"b\":\"55CA8A84D000\",\"o\":\"E7395A\",\"s\":\"_ZN5mongo21WiredTigerRecordStore7compactEPNS_16OperationContextEPNS_25RecordStoreCompactAdaptorEPKNS_14CompactOptionsEPNS_12CompactStatsE\"},{\"b\":\"55CA8A84D000\",\"o\":\"1289698\",\"s\":\"_ZN5mongo14CollectionImpl7compactEPNS_16OperationContextEPKNS_14CompactOptionsE\"},{\"b\":\"55CA8A84D000\",\"o\":\"10F04BB\",\"s\":\"_ZN5mongo10CompactCmd9errmsgRunEPNS_16OperationContextERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKNS_7BSONObjERS8_RNS_14BSONObjBuilderE\"},{\"b\":\"55CA8A84D000\",\"o\":\"1F8B8E6\",\"s\":\"_ZN5mongo23ErrmsgCommandDeprecated3runEPNS_16OperationContextERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKNS_7BSONObjERNS_14BSONObjBuilderE\"},{\"b\":\"55CA8A84D000\",\"o\":\"1F922F9\",\"s\":\"_ZN5mongo12BasicCommand10Invocation3runEPNS_16OperationContextEPNS_19CommandReplyBuilderE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B92D6E\"},{\"b\":\"55CA8A84D000\",\"o\":\"B94C69\"},{\"b\":\"55CA8A84D000\",\"o\":\"B95BB1\",\"s\":\"_ZN5mongo23ServiceEntryPointCommon13handleRequestEPNS_16OperationContextERKNS_7MessageERKNS0_5HooksE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B8185A\",\"s\":\"_ZN5mongo23ServiceEntryPointMongod13handleRequestEPNS_16OperationContextERKNS_7MessageE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B8DF8A\",\"s\":\"_ZN5mongo19ServiceStateMachine15_processMessageENS0_11ThreadGuardE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B891E7\",\"s\":\"_ZN5mongo19ServiceStateMachine15_runNextInGuardENS0_11ThreadGuardE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B8CA01\"},{\"b\":\"55CA8A84D000\",\"o\":\"1D7B6F2\",\"s\":\"_ZN5mongo9transport26ServiceExecutorSynchronous8scheduleESt8functionIFvvEENS0_15ServiceExecutor13ScheduleFlagsENS0_23ServiceExecutorTaskNameE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B873D0\",\"s\":\"_ZN5mongo19ServiceStateMachine22_scheduleNextWithGuardENS0_11ThreadGuardENS_9transport15ServiceExecutor13ScheduleFlagsENS2_23ServiceExecutorTaskNameENS0_9OwnershipE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B8A515\",\"s\":\"_ZN5mongo19ServiceStateMachine15_sourceCallbackENS_6StatusE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B88927\",\"s\":\"_ZN5mongo19ServiceStateMachine14_sourceMessageENS0_11ThreadGuardE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B8926D\",\"s\":\"_ZN5mongo19ServiceStateMachine15_runNextInGuardENS0_11ThreadGuardE\"},{\"b\":\"55CA8A84D000\",\"o\":\"B8CA01\"},{\"b\":\"55CA8A84D000\",\"o\":\"1D7BC55\"},{\"b\":\"55CA8A84D000\",\"o\":\"24A69D4\"},{\"b\":\"7F24D6064000\",\"o\":\"7E65\"},{\"b\":\"7F24D5C96000\",\"o\":\"FE88D\",\"s\":\"clone\"}],\"processInfo\":{ \"mongodbVersion\" : \"4.0.16\", \"gitVersion\" : \"2a5433168a53044cb6b4fa8083e4cfd7ba142221\", \"compiledModules\" : [ \"enterprise\" ], \"uname\" : { \"sysname\" : \"Linux\", \"release\" : \"3.10.0-1062.12.1.el7.x86_64\", \"version\" : \"#1 SMP Tue Feb 4 23:02:59 UTC 2020\", \"machine\" : \"x86_64\" }, \"somap\" : [ { \"b\" : \"55CA8A84D000\", \"elfType\" : 3, \"buildId\" : \"E771748E4A839BBBD202C6EE993FAAA39DB36DAD\" }, { \"b\" : \"7FFE334E1000\", \"elfType\" : 3, \"buildId\" : \"4AF65CC22641CA1EF6020AAC0B8769BA121B370E\" }, { \"b\" : \"7F24D904F000\", \"path\" : \"/usr/lib64/libldap_r/libldap-2.4.so.2\", \"elfType\" : 3, \"buildId\" : \"E17DAD36A8A8D068135B66CFF68E2E55C0B7ECB9\" }, { \"b\" : \"7F24D8E40000\", \"path\" : \"/lib64/liblber-2.4.so.2\", \"elfType\" : 3, \"buildId\" : \"3192C56CD451E18EB9F29CB045432BA9C738DD29\" }, { \"b\" : \"7F24D8987000\", \"path\" : \"/lib64/libnetsnmpmibs.so.31\", \"elfType\" : 3, \"buildId\" : \"F81FF95F7D949F4600F793CD931E9D1AAA574A9D\" }, { \"b\" : \"7F24D8778000\", \"path\" : \"/lib64/libsensors.so.4\", \"elfType\" : 3, \"buildId\" : \"A2ACE3E193F25778AA87D2E221945FDCCFCF220F\" }, { \"b\" : \"7F24D8574000\", \"path\" : \"/lib64/libdl.so.2\", \"elfType\" : 3, \"buildId\" : \"18113E6E83D8E981B8E8D808F7F3DBB23F950A1D\" }, { \"b\" : \"7F24D830C000\", \"path\" : \"/lib64/librpm.so.3\", \"elfType\" : 3, \"buildId\" : \"54CE5D0D50631EC1887BC8C7BBD0B91C1A9484E9\" }, { \"b\" : \"7F24D80DF000\", \"path\" : \"/lib64/librpmio.so.3\", \"elfType\" : 3, \"buildId\" : \"E1EBFDA8DAE64D8A88790EDF43107FBA7E5247BA\" }, { \"b\" : \"7F24D7E70000\", \"path\" : \"/lib64/libnetsnmpagent.so.31\", \"elfType\" : 3, \"buildId\" : \"364D0B1B785E4EDDC1D6DC8D93560DDCB0ADB069\" }, { \"b\" : \"7F24D7C65000\", \"path\" : \"/lib64/libwrap.so.0\", \"elfType\" : 3, \"buildId\" : \"8C4AA46577D3AA7EBF8338BDFAECC6586EF29906\" }, { \"b\" : \"7F24D7962000\", \"path\" : \"/lib64/libnetsnmp.so.31\", \"elfType\" : 3, \"buildId\" : \"1B2EFF0A2F1F6B442E4CF9762FDEA5607BE3149C\" }, { \"b\" : \"7F24D76F0000\", \"path\" : \"/lib64/libssl.so.10\", \"elfType\" : 3, \"buildId\" : \"3B305C3BA17FE394862E749763F2956C9C890C2E\" }, { \"b\" : \"7F24D728D000\", \"path\" : \"/lib64/libcrypto.so.10\", \"elfType\" : 3, \"buildId\" : \"4CF1939F660008CFA869D8364651F31AACD2C1C4\" }, { \"b\" : \"7F24D7070000\", \"path\" : \"/lib64/libsasl2.so.3\", \"elfType\" : 3, \"buildId\" : \"E2F2017F821DD1B9D307DA1A9B8014F2941AEB7B\" }, { \"b\" : \"7F24D6E23000\", \"path\" : \"/lib64/libgssapi_krb5.so.2\", \"elfType\" : 3, \"buildId\" : \"E2AA8CA3D3164E7DBEC293BFA0B55D2B10DAC05D\" }, { \"b\" : \"7F24D6BB9000\", \"path\" : \"/lib64/libcurl.so.4\", \"elfType\" : 3, \"buildId\" : \"7C71A471444AD18F73AFAEA3EB42431A6DA96534\" }, { \"b\" : \"7F24D68B7000\", \"path\" : \"/lib64/libm.so.6\", \"elfType\" : 3, \"buildId\" : \"5681C054FDABCF789F4DDA66E94F1F6ED1747327\" }, { \"b\" : \"7F24D669E000\", \"path\" : \"/lib64/libresolv.so.2\", \"elfType\" : 3, \"buildId\" : \"3009B26B33156EAAF99787AA3DA0C6AE99649755\" }, { \"b\" : \"7F24D6496000\", \"path\" : \"/lib64/librt.so.1\", \"elfType\" : 3, \"buildId\" : \"4749697BF078337576C4629F0D30B296A0939779\" }, { \"b\" : \"7F24D6280000\", \"path\" : \"/lib64/libgcc_s.so.1\", \"elfType\" : 3, \"buildId\" : \"DAC0179F4555AEFEC9E97476201802FD20C03EC5\" }, { \"b\" : \"7F24D6064000\", \"path\" : \"/lib64/libpthread.so.0\", \"elfType\" : 3, \"buildId\" : \"8B33F7F8C86F8D544C63C5541A8E42B3DDFEF8B1\" }, { \"b\" : \"7F24D5C96000\", \"path\" : \"/lib64/libc.so.6\", \"elfType\" : 3, \"buildId\" : \"398944D32CF16A67AF51067A326E6C0CC14F90ED\" }, { \"b\" : \"7F24D92AE000\", \"path\" : \"/lib64/ld-linux-x86-64.so.2\", \"elfType\" : 3, \"buildId\" : \"5CC1A53B747A7E4D21198723C2B633E54F3C06D9\" }, { \"b\" : \"7F24D5A3D000\", \"path\" : \"/lib64/libssl3.so\", \"elfType\" : 3, \"buildId\" : \"B6321C434B5C7386B144B925CEE2798D269FDDF5\" }, { \"b\" : \"7F24D5815000\", \"path\" : \"/lib64/libsmime3.so\", \"elfType\" : 3, \"buildId\" : \"BDA454441F59F41D2DA36E13CEA1FC4CE95B2BBB\" }, { \"b\" : \"7F24D54E6000\", \"path\" : \"/lib64/libnss3.so\", \"elfType\" : 3, \"buildId\" : \"DC3B36B530F506DE4FC1A6612D7DF44D4A3DDCDB\" }, { \"b\" : \"7F24D52B6000\", \"path\" : \"/lib64/libnssutil3.so\", \"elfType\" : 3, \"buildId\" : \"32C8FB6C2768FFE41E0A15CBF2089A4202CA2290\" }, { \"b\" : \"7F24D50B2000\", \"path\" : \"/lib64/libplds4.so\", \"elfType\" : 3, \"buildId\" : \"325B8CE57A776DE0B24B362A7E0C90E903B1A4B8\" }, { \"b\" : \"7F24D4EAD000\", \"path\" : \"/lib64/libplc4.so\", \"elfType\" : 3, \"buildId\" : \"0460FF10A3C63749113D380C40E10DFCF066C76E\" }, { \"b\" : \"7F24D4C6F000\", \"path\" : \"/lib64/libnspr4.so\", \"elfType\" : 3, \"buildId\" : \"8840B019EDB66B0CFBD2F77EF196440F7928106E\" }, { \"b\" : \"7F24D48E1000\", \"path\" : \"/usr/lib64/perl5/CORE/libperl.so\", \"elfType\" : 3, \"buildId\" : \"E2C3C10A756404CC8888CD6CA8DFAD26064EF3CB\" }, { \"b\" : \"7F24D46C7000\", \"path\" : \"/lib64/libnsl.so.1\", \"elfType\" : 3, \"buildId\" : \"DD24971BA9AB317654ED2C1DCEB76BBDCDA5A6D1\" }, { \"b\" : \"7F24D4490000\", \"path\" : \"/lib64/libcrypt.so.1\", \"elfType\" : 3, \"buildId\" : \"84467C988F41D853C58353BEB247670E15DA8BAD\" }, { \"b\" : \"7F24D428D000\", \"path\" : \"/lib64/libutil.so.1\", \"elfType\" : 3, \"buildId\" : \"E0D39E293DC99997E7B4C9B6203301E6CD904B50\" }, { \"b\" : \"7F24D407D000\", \"path\" : \"/lib64/libbz2.so.1\", \"elfType\" : 3, \"buildId\" : \"0C85C0386F0CF41EA39969CF7F58A558D1AD3235\" }, { \"b\" : \"7F24D3E67000\", \"path\" : \"/lib64/libz.so.1\", \"elfType\" : 3, \"buildId\" : \"B9D5F73428BD6AD68C96986B57BEA3B7CEDB9745\" }, { \"b\" : \"7F24D3C4F000\", \"path\" : \"/lib64/libelf.so.1\", \"elfType\" : 3, \"buildId\" : \"F580CBEA123378EEDE9427F54758697A458411F5\" }, { \"b\" : \"7F24D3A29000\", \"path\" : \"/lib64/liblzma.so.5\", \"elfType\" : 3, \"buildId\" : \"3B2C97C1937B73A69C412A96D0810C43DF0C6F54\" }, { \"b\" : \"7F24D381F000\", \"path\" : \"/lib64/libpopt.so.0\", \"elfType\" : 3, \"buildId\" : \"7AE00165FBAF6920DD5AED6905820DDBAB589E84\" }, { \"b\" : \"7F24D35F8000\", \"path\" : \"/lib64/libselinux.so.1\", \"elfType\" : 3, \"buildId\" : \"D2DD4DA3FDE1477D25BFFF80F3A25FDB541A8179\" }, { \"b\" : \"7F24D33F3000\", \"path\" : \"/lib64/libcap.so.2\", \"elfType\" : 3, \"buildId\" : \"3BC565E0565C33B1BD37AE0070F7D8E2CE4313E4\" }, { \"b\" : \"7F24D31EA000\", \"path\" : \"/lib64/libacl.so.1\", \"elfType\" : 3, \"buildId\" : \"7F39882FC0B80BE53790C2EAC307D39F7DE1AD6E\" }, { \"b\" : \"7F24D2FBC000\", \"path\" : \"/lib64/liblua-5.1.so\", \"elfType\" : 3, \"buildId\" : \"BDD4B9CFC1D3F31D3A5A430D2F9080E020C5B0BA\" }, { \"b\" : \"7F24D2BFD000\", \"path\" : \"/lib64/libdb-5.3.so\", \"elfType\" : 3, \"buildId\" : \"CA8916E2C5EB6FF8582E059700E3347178823728\" }, { \"b\" : \"7F24D29D4000\", \"path\" : \"/lib64/libaudit.so.1\", \"elfType\" : 3, \"buildId\" : \"2E36E1B9A2D92C969E38CDDCC729F55D8BACBB2B\" }, { \"b\" : \"7F24D26EB000\", \"path\" : \"/lib64/libkrb5.so.3\", \"elfType\" : 3, \"buildId\" : \"3EE7267AF7BFD3B132E6A222D997DA09C96C90DD\" }, { \"b\" : \"7F24D24E7000\", \"path\" : \"/lib64/libcom_err.so.2\", \"elfType\" : 3, \"buildId\" : \"67E935BFABA2C914C01156B88947DD515EA51170\" }, { \"b\" : \"7F24D22B4000\", \"path\" : \"/lib64/libk5crypto.so.3\", \"elfType\" : 3, \"buildId\" : \"82E28CACB60C27CD6F14A6D2268F0CFF621664D0\" }, { \"b\" : \"7F24D20A4000\", \"path\" : \"/lib64/libkrb5support.so.0\", \"elfType\" : 3, \"buildId\" : \"4F5FBB2087BE132892467C4E7A46A3D07E5DA40B\" }, { \"b\" : \"7F24D1EA0000\", \"path\" : \"/lib64/libkeyutils.so.1\", \"elfType\" : 3, \"buildId\" : \"2E01D5AC08C1280D013AAB96B292AC58BC30A263\" }, { \"b\" : \"7F24D1C6D000\", \"path\" : \"/lib64/libidn.so.11\", \"elfType\" : 3, \"buildId\" : \"2B77BBEFFF65E94F3E0B71A4E89BEB68C4B476C5\" }, { \"b\" : \"7F24D1A40000\", \"path\" : \"/lib64/libssh2.so.1\", \"elfType\" : 3, \"buildId\" : \"1AF123CADB2F2910E89CBD540A06D3B33692F95E\" }, { \"b\" : \"7F24D183D000\", \"path\" : \"/lib64/libfreebl3.so\", \"elfType\" : 3, \"buildId\" : \"197680DAE6538245CB99723E57447C4EF2E98362\" }, { \"b\" : \"7F24D15DB000\", \"path\" : \"/lib64/libpcre.so.1\", \"elfType\" : 3, \"buildId\" : \"9CA3D11F018BEEB719CDB34BE800BF1641350D0A\" }, { \"b\" : \"7F24D13D6000\", \"path\" : \"/lib64/libattr.so.1\", \"elfType\" : 3, \"buildId\" : \"2617ECC6738047E207AE3ADD990BD6A34D11B265\" }, { \"b\" : \"7F24D11D0000\", \"path\" : \"/lib64/libcap-ng.so.0\", \"elfType\" : 3, \"buildId\" : \"43578677DF613E9D58128ED4AE0C344FBC1E44C0\" } ] }}\n mongod(_ZN5mongo15printStackTraceERSo+0x41) [0x55ca8cd9c541]\n mongod(+0x254E759) [0x55ca8cd9b759]\n mongod(+0x254EC3D) [0x55ca8cd9bc3d]\n libpthread.so.0(+0xF5F0) [0x7f24d60735f0]\n libc.so.6(gsignal+0x37) [0x7f24d5ccc337]\n libc.so.6(abort+0x148) [0x7f24d5ccda28]\n mongod(_ZN5mongo24invariantOKFailedWithMsgEPKcRKNS_6StatusERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES1_j+0x0) [0x55ca8b29e516]\n mongod(_ZN5mongo21WiredTigerRecordStore7compactEPNS_16OperationContextEPNS_25RecordStoreCompactAdaptorEPKNS_14CompactOptionsEPNS_12CompactStatsE+0xBA) [0x55ca8b6c095a]\n mongod(_ZN5mongo14CollectionImpl7compactEPNS_16OperationContextEPKNS_14CompactOptionsE+0x1A8) [0x55ca8bad6698]\n mongod(_ZN5mongo10CompactCmd9errmsgRunEPNS_16OperationContextERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKNS_7BSONObjERS8_RNS_14BSONObjBuilderE+0x96B) [0x55ca8b93d4bb]\n mongod(_ZN5mongo23ErrmsgCommandDeprecated3runEPNS_16OperationContextERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKNS_7BSONObjERNS_14BSONObjBuilderE+0x46) [0x55ca8c7d88e6]\n mongod(_ZN5mongo12BasicCommand10Invocation3runEPNS_16OperationContextEPNS_19CommandReplyBuilderE+0xD9) [0x55ca8c7df2f9]\n mongod(+0xB92D6E) [0x55ca8b3dfd6e]\n mongod(+0xB94C69) [0x55ca8b3e1c69]\n mongod(_ZN5mongo23ServiceEntryPointCommon13handleRequestEPNS_16OperationContextERKNS_7MessageERKNS0_5HooksE+0x3D1) [0x55ca8b3e2bb1]\n mongod(_ZN5mongo23ServiceEntryPointMongod13handleRequestEPNS_16OperationContextERKNS_7MessageE+0x3A) [0x55ca8b3ce85a]\n mongod(_ZN5mongo19ServiceStateMachine15_processMessageENS0_11ThreadGuardE+0xBA) [0x55ca8b3daf8a]\n mongod(_ZN5mongo19ServiceStateMachine15_runNextInGuardENS0_11ThreadGuardE+0x97) [0x55ca8b3d61e7]\n mongod(+0xB8CA01) [0x55ca8b3d9a01]\n mongod(_ZN5mongo9transport26ServiceExecutorSynchronous8scheduleESt8functionIFvvEENS0_15ServiceExecutor13ScheduleFlagsENS0_23ServiceExecutorTaskNameE+0x1A2) [0x55ca8c5c86f2]\n mongod(_ZN5mongo19ServiceStateMachine22_scheduleNextWithGuardENS0_11ThreadGuardENS_9transport15ServiceExecutor13ScheduleFlagsENS2_23ServiceExecutorTaskNameENS0_9OwnershipE+0x150) [0x55ca8b3d43d0]\n mongod(_ZN5mongo19ServiceStateMachine15_sourceCallbackENS_6StatusE+0xB55) [0x55ca8b3d7515]\n mongod(_ZN5mongo19ServiceStateMachine14_sourceMessageENS0_11ThreadGuardE+0x357) [0x55ca8b3d5927]\n mongod(_ZN5mongo19ServiceStateMachine15_runNextInGuardENS0_11ThreadGuardE+0x11D) [0x55ca8b3d626d]\n mongod(+0xB8CA01) [0x55ca8b3d9a01]\n mongod(+0x1D7BC55) [0x55ca8c5c8c55]\n mongod(+0x24A69D4) [0x55ca8ccf39d4]\n libpthread.so.0(+0x7E65) [0x7f24d606be65]\n libc.so.6(clone+0x6D) [0x7f24d5d9488d]\n----- END BACKTRACE -----\n2020-03-09T15:02:26.364+0000 I CONTROL [main] ***** SERVER RESTARTED *****\n",
"text": "Hello,we currently have instance in Atlas and we would like to compact our storage space as we did some data cleanup after filling up the whole data storage.We are not able to run compact command on SECONDARY nodes anymore, even when we tried to rotate nodes to PRIMARY.After running compact command MongoDB Server restarts and prints backtrace, we are able to run compact command on other collections, which seems to be even more strange.MongoDB Version 4.0.16Logs:\n2020-03-09T15:02:10.647+0000 I REPL [conn316] going into maintenance mode with 0 other maintenance mode tasks in progress\n2020-03-09T15:02:10.647+0000 I REPL [conn316] transition to RECOVERING from SECONDARY\n2020-03-09T15:02:10.647+0000 I REPL [conn316] Resetting sync source to empty, which was\n2020-03-09T15:02:10.647+0000 I COMMAND [conn316] compact saleschamp.webhookRequests begin, options: paddingMode: NONE validateDocuments: 1\n2020-03-09T15:02:11.334+0000 W REPL [rsBackgroundSync] Fetcher stopped querying remote oplog with error: InvalidSyncSource: sync source (config version: 1; last applied optime: { ts: Timestamp(1583766131, 1), t: 87 }; sync source index: -1; primary index: 0) is no longer valid\n2020-03-09T15:02:12.948+0000 I REPL [SyncSourceFeedback] SyncSourceFeedback error sending update to app-prod-01-shard-00-00-ea5oi.mongodb.net:27017: InvalidSyncSource: Sync source was cleared. Was app-prod-01-shard-00-00-ea5oi.mongodb.net:27017\n2020-03-09T15:02:13.663+0000 E STORAGE [conn316] WiredTiger error (16) [1583766133:663912][13639:0x7f24aa4dc700], WT_SESSION.compact: __compact_worker, 302: compaction halted by eviction pressure: Device or resource busy Raw: [1583766133:663912][13639:0x7f24aa4dc700], WT_SESSION.compact: __compact_worker, 302: compaction halted by eviction pressure: Device or resource busy\n2020-03-09T15:02:13.663+0000 F - [conn316] Invariant failure: ret resulted in status UnknownError: 16: Device or resource busy at src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp 1516\n2020-03-09T15:02:13.664+0000 F - [conn316]",
"username": "jan"
},
{
"code": "",
"text": "we currently have instance in Atlas and we would like to compact our storage space as we did some data cleanup after filling up the whole data storage.Welcome @jan,Since this is an Atlas operational question, I suggest contacting the support team when logged into your account.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "Hello @Stennie_X,I got forwarded to the forum as regular basic support is not able to solve this issue.",
"username": "jan"
},
{
"code": "mongodfoo.barmongod",
"text": "Hi @jan,The compaction error suggests your deployment may not have sufficient resources to complete (since the command is halted by cache eviction pressure), however the “device or resource busy” message and invariant failure seem unexpected. This is perhaps a generic message from the system I/O library, but I couldn’t find any similar problem reports of errors running compact.Compaction may not be the best solution if you are trying to free up a significant amount of unused disk space: it is per collection and requires the secondary to go into maintenance mode (which will lead to cache pressure for a busy deployment). Compact attempts to reduce the space used by data and indexes, but effectiveness depends on the contents of the data files.In normal usage compaction should be unnecessary as unused space within data files is marked as available for reuse by new write activity.I got forwarded to the forum as regular basic support is not able to solve this issue.Given you are encountering an invariant failure which causes the mongod process to abort, this is something that the support team should help you with. I’ve checked with the support team and if you can raise the issue again they’ll investigate.I would open with a more concise description of the help needed, along the lines of:When we try to run compact on the foo.bar collection on a secondary, the mongod process aborts with an invariant failure preceded by “compaction halted by eviction pressure: Device or resource busy”.However, investigating the compaction error may not be the most productive approach if the underlying issue is insufficient resources.If you want to maximise space recovery, I would recommend requesting a re-sync for secondaries with excessive preallocated storage. This is also something the support team can assist with for an Atlas cluster, using rolling maintenance to re-sync one replica set member at a time.Regards,\nStennie",
"username": "Stennie_X"
}
] | "compact" fails with 302: compaction halted by eviction pressure: Device or resource busy | 2020-03-10T19:51:28.540Z | “compact” fails with 302: compaction halted by eviction pressure: Device or resource busy | 4,424 |
null | [
"legacy-realm-cloud"
] | [
{
"code": "",
"text": "Dear anyone,\nI sent ticket but got no replies yet (#5870 #5875)\nI have fill in my personal credit card and would not be refund from my company\nThey told me to use company credit card. I would like to get a refund and continue with my enterprise accountSincerely yours,Thank you for doing great platform",
"username": "Ph_m_Quan"
},
{
"code": "",
"text": "I sent ticket but got no replies yet (#5870 #5875)@Ph_m_Quan Both tickets were filed within the last 24 hours; someone should reply shortly.I merged the second ticket into #5870; it is best to keep related discussion on the same ticket to avoid confusion.Thanks,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "Thank for quick response.\nBtw I would like to know if I should use Realm Cloud now to build new project or just wait for MongoDb Realm. I used to use Realm long time ago. But now it has changed a lot.",
"username": "Ph_m_Quan"
},
{
"code": "",
"text": "I would like to know if I should use Realm Cloud now to build new project or just wait for MongoDb Realm. I used to use Realm long time ago. But now it has changed a lot.If you need a sync solution in the short term, use Realm Cloud.MongoDB Realm is still in development, but you can check out the MongoDB Realm Roadmap for updates and subscribe to be notified when a public beta is available.Per the public roadmap, we ultimately expect to offer an automated or guided migration from Realm Cloud to MongoDB Realm.Regards,\nStennie",
"username": "Stennie_X"
}
] | ASAP Please help me cancel/refund my wrong subscription | 2020-03-10T19:50:54.369Z | ASAP Please help me cancel/refund my wrong subscription | 2,366 |
null | [] | [
{
"code": "",
"text": "Hi everyone,I’m Danilo Joksimovic and I’m the CEO of Srvice (www.srvice.ca) - an online service marketplace.I joined the MongoDB Startup Accelerator in Summer 2019 to meet other founders and to gain community support with our MongoDB infrastructure.",
"username": "Danilo_Joksimovic"
},
{
"code": "",
"text": "Welcome @Danilo_Joksimovic Fellow KW community member here.",
"username": "chris"
},
{
"code": "",
"text": " Hi @Danilo_Joksimovic and welcome to the community!",
"username": "Doug_Duncan"
},
{
"code": "",
"text": "Hi @Danilo_Joksimovic! Thanks for joining us & welcome ",
"username": "Jamie"
}
] | Greetings from Waterloo, Canada! | 2020-03-10T08:55:57.419Z | Greetings from Waterloo, Canada! | 2,172 |
null | [] | [
{
"code": "",
"text": "Hi all,I’m Ruben a.k.a. Aartsie from the Netherlands. Currently I work as software developer and using Mongodb since 2012/2013.I would like to share and keep up to date with the community. What i like about these forums instead of slack is that you can always search back to an older post with valuable information.Best regards,\nRuben",
"username": "Aartsie"
},
{
"code": "",
"text": " Welcome to the community @Aartsie! The forums are definitely a better way to find past information. And it’s nice to have a single place to go with the consolidation of Slack and Google Groups.",
"username": "Doug_Duncan"
},
{
"code": "",
"text": "Hello, welcome @Aartsie@Doug_Duncan I was not aware of google groups, will they shut down the slack channel completely soon?",
"username": "coderkid"
},
{
"code": "",
"text": "Welcome, @Aartsie!@coderkid - yes, as of this past Monday, the Slack group is retired.",
"username": "Jamie"
}
] | Hello from the Netherlands | 2020-03-10T08:55:50.708Z | Hello from the Netherlands | 1,777 |
null | [] | [
{
"code": "",
"text": "Hello everyone, my name is Alexandre from Brazil.Is there any information about when the list of sessions from MongoDB World 2020 will be available ?Alexandre",
"username": "Alexandre_Araujo"
},
{
"code": "",
"text": "Hi @Alexandre_AraujoWelcome! We’re in the process of finalising the programme at the moment and hope to have it finished within the next fortnight, possibly this week. We’ll then need to work with our web team and other stakeholders to get it published. I’d expect to have all of that done late this month, everything proceeding smoothly.I can tell you we have a great line up already and it covers the full spectrum of our data platform.Is there anything you are particularly interested in or that you think would make the event better if it was in the sessions ?Thanks!\nEoin",
"username": "Eoin_Brazil"
},
{
"code": "",
"text": "Wanted to follow up here and close the loop. Just announced today: In response to the coronavirus/COVID-19, MongoDB World 2020 will now be MongoDB.live - a free virtual event, taking place on May 4-5. Visit MongoDB World 2022 | June 7-9, 2022 | MongoDB to learn more.Folks who have already registered and purchased a ticket can either transfer their ticket to MongoDB World 2021 or receive a refund. Here’s an FAQ for more details: https://www.mongodb.com/world/faq",
"username": "Jamie"
},
{
"code": "",
"text": "",
"username": "Stennie_X"
}
] | MongoDB World 2020 session catalog? | 2020-02-10T21:38:22.696Z | MongoDB World 2020 session catalog? | 2,200 |
null | [] | [
{
"code": "",
"text": "Are any considerations being given to postpone or change the date for the MongoDB World Conference? I ask because a majority of major events are starting to be canceled (latest being Google I/O) due to concerns of the Coronavirus spreading to the US.Stealing the headline of an article:\n’ Coronavirus Update: Google I/O, Facebook F8 and more 2020 tech conference cancellations and travel bansAs the COVID-19 outbreak spreads, tech conferences are being canceled, postponed, or turned into virtual events. Tech companies are also restricting employee travel.’",
"username": "Jason_Jackson"
},
{
"code": "",
"text": "Due to this insecurity I delay my fight booking from Germany to NYC.\nI assume that no one knows the facts right now.\nJust looking at the numbers, I almost made my decision that I have to skip MDBW20Michael",
"username": "michael_hoeller"
},
{
"code": "",
"text": "Add SxSW to the list of cancelled events.SXSW, the annual Austin music and arts festival, has been canceled over concerns about coronavirus.",
"username": "Jason_Jackson"
},
{
"code": "",
"text": "In light of global precautions around the spread of coronavirus (COVID-19), and in alignment with the recommendations laid out by the CDC, WHO, and other relevant entities, we are reinventing our global annual user conference. MongoDB World 2020 is now MongoDB.live, and will take place online on May 4th and 5th.Please see MongoDB World 2022 | June 7-9, 2022 | MongoDB for more information including answers to Frequently Asked Questions (such as what happens if you have already purchased a pass).Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "Thank you for the update!",
"username": "Jason_Jackson"
},
{
"code": "",
"text": "",
"username": "Stennie_X"
}
] | MongoDB World Status? | 2020-03-05T15:18:25.962Z | MongoDB World Status? | 2,246 |
null | [
"aggregation",
"golang"
] | [
{
"code": "\t\t\t\"$lookup\": bson.M{\n\t\t\t\t\"from\": \"place\",\n\t\t\t\t\"let\": bson.M{\"resource_id\": \"$resource_id\"},\n\t\t\t\t\"pipeline\": []bson.M{\n\t\t\t\t\tbson.M{\n\t\t\t\t\t\t\"$match\": bson.M{\n\t\t\t\t\t\t\t\"$expr\": bson.M{\n\t\t\t\t\t\t\t\t\"$and\": []bson.M{\n\t\t\t\t\t\t\t\t\tbson.M{\"$eq\": []string{\"$_id\", \"$$resource_id\"}},\n\t\t\t\t\t\t\t\t\tbson.M{\"$eq\": []string{\"$status\", \"active\"}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"as\": \"place\",\n\t\t\t},\n\t\t},\n$match --> $expr --> $and --> $eq$match --> $and --> $expr --> $eq\t\t{\n\t\t\t\"$lookup\": {\n\t\t\t\t\"from\": \"place\",\n\t\t\t\t\"let\" : {\"resource_id\" : \"$resource_id\"},\n\t\t\t\t\"pipeline\" : [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"$match\" : {\n\t\t\t\t\t\t\t\"$and\" :[\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"$expr\" : {\n\t\t\t\t\t\t\t\t\t\t\"$eq\": [\"$_id\" , \"$$resource_id\"]\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"$expr\" : {\n\t\t\t\t\t\t\t\t\t\t\"$eq\": [\"$status\", \"active\"]\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"as\" : \"place\"\n\t\t\t}\n\t\t}\n",
"text": "Hello,I take over a code in below:I am quite confusing about the sequence of $match --> $expr --> $and --> $eq\nI think the correct one should be: $match --> $and --> $expr --> $eq. If both of the two sequence are correct, what’s the difference on the result?in the mean while, I want to use json to represent the stage above, so here is my code:is it correct?thanks,James",
"username": "Zhihong_GUO"
},
{
"code": "",
"text": "Hi,I’m not sure if your aggregation pipeline is correct, but you should be able to verify the correctness using the shell and some test data. If the ordering of the pipeline matters, I’d recommend using an ordered type like bson.D. bson.M is a map[string]interface{} and the Go language does not guarantee that maps will have the same order each time they’re iterated.– Divjot",
"username": "Divjot_Arora"
},
{
"code": "func BuildFilterFromJson(jsonString string) interface{} {\n var filter bson.M //here if I can user bson.D\n if err := bson.UnmarshalExtJSON([]byte(jsonString), false, &filter); \n err != nil {\n fmt.Println(err.Error())\n return nil\n }\n return filter\n}\n{\"$and\" : [{\"$text\":{\"$search\" : %q}}, {\"status\" : \"active\"}]}map[$and:[map[$text:map[$search:bureau]] map[status:active]]]\n[{$and [[{$text [{$search bureau}]}] [{status active}]]}]\n",
"text": "Hello Divjot,Thanks to trigger another question and I hope it will not bother you. My understanding about bson.D and bson.M are limited, I know one is slice and the other is map. Then any other difference?For example I have a function to create filter from json string:For a json string like: {\"$and\" : [{\"$text\":{\"$search\" : %q}}, {\"status\" : \"active\"}]}The BuildFilterFromJson can build the filter successfully to be a bson.M or bson.D. The output are:andFor a input json string, is there any cases that a only 1 can work, i.e bson.D can be created but bson.M can’t be created, or vice versa. If I use bson.D in func BuildFilterFromJson, should I define some rule or guildline for people when they writing the json string?thanks,James",
"username": "Zhihong_GUO"
},
{
"code": "Database.RunCommandBuildFilterFromJsonbson.Dbson.Mbson.D",
"text": "bson.D and bson.M both represent a BSON document. Like you said, bson.D is a slice and bson.M is a map. The difference is that bson.D will maintain insertion order but bson.M will not. This is because maps in the Go language are not ordered when you iterate over them. In cases where you’re building commands (e.g. if you’re using Database.RunCommand) or doing something else where ordering matters, we recommend using bson.D or a struct that gets translated to BSON.You should be able to switch your BuildFilterFromJson function to use bson.D without any issues. Anything that can be represented in a bson.M can be represented in a bson.D and vice versa.– Divjot",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Hello Divjot,As usual, your answer is clear and helpful. Many thanks for the support!James",
"username": "Zhihong_GUO"
}
] | The sequence of operators in pipeline | 2020-03-10T10:28:36.606Z | The sequence of operators in pipeline | 3,506 |
null | [] | [
{
"code": "",
"text": "If the worst comes true … I’d be interested joining MDBW20 as an online event. I have no clue if the MDBW Team could handle this. Surely not everything could be changed to be online. But better than no event or?\nSure, we will miss the chats and the one or the other beer, but that can be covered in 2021 …?What do you think about that ?\n0\nvoters\n",
"username": "michael_hoeller"
},
{
"code": "",
"text": "Those aren’t the only two options - some events are rescheduling for later in the year rather than canceling outright. (I have no knowledge of what will be the case with MDBW, just pointing out another possibility).",
"username": "Asya_Kamsky"
},
{
"code": "",
"text": "Hi Asya,\nglad to hear from you and see that plans seem to be around. I am looking forward to any kind of event.\nHope to meet you soon again\nMichael",
"username": "michael_hoeller"
},
{
"code": "",
"text": "Those aren’t the only two options - some events are rescheduling for later in the year rather than canceling outright. (I have no knowledge of what will be the case with MDBW, just pointing out another possibility).i really want news about this, i need to urgently plan the trip or not!",
"username": "Leandro_Domingues"
},
{
"code": "",
"text": "In light of global precautions around the spread of coronavirus (COVID-19), and in alignment with the recommendations laid out by the CDC, WHO, and other relevant entities, we are reinventing our global annual user conference. MongoDB World 2020 is now MongoDB.live, and will take place online on May 4th and 5th.Please see MongoDB World 2022 | June 7-9, 2022 | MongoDB for more information including answers to Frequently Asked Questions (such as what happens if you have already purchased a pass).Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "",
"username": "Stennie_X"
}
] | What to do if MDBW20 gets canceled? | 2020-03-07T17:00:00.985Z | What to do if MDBW20 gets canceled? | 1,910 |
null | [
"replication",
"security",
"ops-manager"
] | [
{
"code": "",
"text": "Hi,We have a large replicaset that is currently encrypted with a keyfile. We have introduced a KMIP solution and have successfully encrypted a number of internal replicasets. However, I am unable to push this change through Ops Manager. I receive this error.[initandlisten] Unable to retrieve key .system, error: KMIP get key ‘local’ failed, code: 1 error: Object with unique identifier ‘local’ not found.Is it possible to remove the encryptionKeyFile option and add in the KMIP encryption options in one change? Note, I am making MongoDB generate the keys rather that use a key identifier at this point so the 4 new options added are the kmipServerName, kmipPort. kmipServerCAFile, kmipClientCertificateFile.My assumption here is that ‘local’ in the error log is referring to the original keyfile I had in place.Maybe it’s a case of having to do this in two steps? 1) Remove the encryptionKeyFile & set enableEncryption to False and allow this to perform an initial sync (PSA) and step 2) Add in kmip encryption as I have done per other replicasets?Thoughts,\nClive",
"username": "clivestrong"
},
{
"code": "",
"text": "As an Ops Manager customer this definitely something our dedicated support team can help you with. I would raise a ticket with them directly.",
"username": "Joe_Drumgoole"
},
{
"code": "",
"text": "Hi Joe,I have a ticket open with Support. Brian has been assisting me with this. As this happened on Saturday, I updated the ticket and thought I’d drop a question in here in case I could get this moving during the weekend!Regards,\nClive",
"username": "clivestrong"
},
{
"code": "",
"text": "An update on this issue should anyone else have problems. The issue seems to be related to the fact that Ops Manager cannot effectively clear down the dbpath and get’s somewhat confused by the keyfile encryption that is in place.Initially, the MongoDB support team suggested that once I updated the replicaset to add in the kmip server, I would have to suspend the arbiter and clear down the dbpath manually, but the data nodes should be fine. This was not the case. To encrypt the data, I had to perform the stop, cleardown, start on all 3 members.Hope that helps!\nClive",
"username": "clivestrong"
}
] | Changing the encryption method of a replica set | 2020-02-22T10:34:15.828Z | Changing the encryption method of a replica set | 2,544 |
null | [
"aggregation",
"golang"
] | [
{
"code": "{\n\t\"_id\" : \"AAX\",\n\t\"user_id\" : ObjectId(\"5cc6ebf58a158c00010b3d74\"),\n\t\"name\" : \"mon super Bureau\",\n\t\"status\" : \"active\",\n\t\"location\" : {\n\t\t\"type\" : \"Point\",\n\t\t\"coordinates\" : [\n\t\t\t48.1259880065918,\n\t\t\t-1.6275769472122192\n\t\t],\n\t\t\"accuracy\" : 14\n\t},\n\t\"creation_date\" : ISODate(\"2019-05-28T14:26:03.579Z\")\n} \n{\n \"id\": \"AAX\",\n \"user_id\": \"5cc6ebf58a158c00010b3d74\",\n \"name\" : \"mon super Bureau\", \n \"status\": \"active\",\n \"location\": {\n \"type\": \"Point\",\n \"coordinates\": [\n 48.125988,\n -1.627576\n ],\n \"accuracy\": 14\n },\n \"creation_date\": {\n \"seconds\": 1559053563,\n \"nanos\": 579000000\n },\n}\n",
"text": "Hello, when I search the database by pipeline and aggregation, I will get this error when I execute cursor.Decode to my struct. But if I search the db by collection.Find (ctx, filter), then no such error when executing the cursor Decode.The data stored in db is something like:The result returned looks like:It looks the lng and lat in location has been truncated and cause such error. But why if I use Find, such error will not happen? any parameters should I set for the pipeline and aggregation?thanks,James",
"username": "Zhihong_GUO"
},
{
"code": "FindCursorFindAggregateDecodeFindFindtrunactetype Foo struct {\n Coordinates []int `bson:\"coordinates,truncate\"`\n}\n",
"text": "Hi,The error you’re seeing in the aggregation is expected because decoding would lose precision and therefore could result in data loss. I’m not sure why you’re not seeing the error when using Find because the Cursor returned by Find and Aggregate is the same, so the Decode code path should be the same. Is it possible that the filter you’re providing to Find is filtering out the document so it’s never returned? If not, can you share the code you’re using for Find as well as the definition of the struct you’re decoding into?If you want the driver to ignore the precision loss and truncate each float into an integer, you can use the trunacte struct tag:This will tell the driver that you’re opting into truncating any floats into integers, even if that will cause loss of precision.– Divjot",
"username": "Divjot_Arora"
},
{
"code": "ID string `json:\"id,omitempty\" bson:\"_id,omitempty\"`\n\nUserID primitive.ObjectID `json:\"user_id,omitempty\" bson:\"user_id,omitempty\"`\n\nName string `json:\"name,omitempty\" bson:\"name,omitempty\"`\n\nStatus Status `json:\"status,omitempty\" bson:\"status,omitempty\"`\n\nLocation geo.GeoJSON `json:\"location,omitempty\" bson:\"location,omitempty\"`\n\nCreationDate time.Time `json:\"creation_date,omitempty\" bson:\"creation_date,omitempty\"`\njson:\"type,omitempty\" bson:\"type,omitempty\"json:\"coordinates,omitempty\" bson:\"coordinates,omitempty\"json:\"accuracy,omitempty\" bson:\"accuracy,omitempty\"{\"$and\" : [{\"$text\":{\"$search\" : %q}}, {\"status\" : \"active\"}]}if err != nil {\n\treturn errors.InternalServerError(\"place_internal_error\", err.Error())\n}\n",
"text": "Hello Divjot,Thank you again for your help the struct for decoding istype Place struct {}\nthe GeoJSON is:\ntype GeoJSON struct {\nType string json:\"type,omitempty\" bson:\"type,omitempty\"\nCoordinates float32 json:\"coordinates,omitempty\" bson:\"coordinates,omitempty\"\nAccuracy int json:\"accuracy,omitempty\" bson:\"accuracy,omitempty\"\n}I check the filter used by Find, which is search place by text:\njsonFilter = {\"$and\" : [{\"$text\":{\"$search\" : %q}}, {\"status\" : \"active\"}]}The other difference is there is no skip and limit on Find. The code of Find looks like:\ncur, err := collection.Find(context.TODO(), filter)\ndefer cur.Close(context.TODO())\nplaces := place.Place{}\nerr = cur.All(context.TODO(), &places)after the cur.All, the places get all of the result and no error happens, so it is not returned from “place_internal_error”",
"username": "Zhihong_GUO"
},
{
"code": "Collection.Aggregate",
"text": "Can you share the code for your call to Collection.Aggregate as well as the struct the aggregation is decoding into? Apologies for asking you to type out so much code, but it is super helpful for us when we have code we can copy/paste and run to reproduce errors.– Divjot",
"username": "Divjot_Arora"
},
{
"code": "geoStage := createGeoStage(in.Lng, in.Lat, in.Radius)\nmatchStage := createMatchStage(in.UserId)\n\ncountStage := `{ \"$count\":\"number\"}`\n\ncounting, err := BuildPipelineFromJsons( geoStage, matchStage, countStage)\nsearching, err := BuildPipelineFromJsons( geoStage, matchStage)\nsearching = appendToPipeline(searching, \"$skip\", 0)\nsearching = appendToPipeline(searching, \"$limit\", 20)\ncur, err := collection.Aggregate(context.TODO(), searching, options.Aggregate())\ndefer cur.Close(context.TODO())\nfor cur.Next(context.TODO()) {\n\tplace := place.Place{}\n\terr = cur.Decode(&place)\n",
"text": "omit the code to count the places as there are no error}the other different of Find and Aggregation is in Find I call CountDocuments to get the total number of places",
"username": "Zhihong_GUO"
},
{
"code": "{%q: %v}geoStage := `\n{\n\t\"$geoNear\":{\n\t\t\"includeLocs\":\"location\",\n\t\t\"distanceField\":\"distance\",\n\t\t\"near\":{\n\t\t\t\"type\":\"Point\",\n\t\t\t\"coordinates\":[ %g, %g]\n\t\t},\n\t\t\"maxDistance\": %v,\n\t\t\"spherical\":true\n\t}\n}`\ngeoStage = fmt.Sprintf(geoStage, lng, lat, radius)\nreturn geoStage\nif id, err := primitive.ObjectIDFromHex(userID); err == nil {\n\tmatchStage := `\n\t{\n\t\t\"$match\":{\n\t\t\t\"$and\":[\n\t\t\t\t{\n\t\t\t\t\t\"$or\":[\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"proprietary\":false\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"user_id\": {\"$oid\": %q}\n\t\t\t\t\t\t}\n\t\t\t\t\t]\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"status\":\"active\"\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t}`\n\tmatchStage = fmt.Sprintf(matchStage, id)\n\treturn matchStage\n} else {\n\treturn `\n\t{\n\t\t\"$match\":{\n\t\t\t\"$and\":[\n\t\t\t\t{\n\t\t\t\t\t\"proprietary\":false\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"status\":\"active\"\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t}`\n}\n",
"text": "func appendToPipeline(pipeline mongo.Pipeline, operator string, parameter int32) mongo.Pipeline {\nstageRaw := fmt.Sprintf({%q: %v}, operator, parameter)\nvar stageCooked bson.D\nerr := bson.UnmarshalExtJSON(byte(stageRaw), false, &stageCooked)\nif err == nil {\npipeline = append(pipeline, stageCooked)\n}\nreturn pipeline\n}func createGeoStage(lng float32, lat float32, radius int32) (jsonStage string) {}func createMatchStage(userID string) (jsonStage string) {}",
"username": "Zhihong_GUO"
},
{
"code": "",
"text": "that’s all of the related code",
"username": "Zhihong_GUO"
},
{
"code": "IntDecodeValueGeoJSON.Accuracyaccuracycur.DecodeAggregatecur.Currentfmt.Println(cur.Current)",
"text": "Hmm I’m not sure what’s going on here. The error is from IntDecodeValue and the only integer field in your struct is GeoJSON.Accuracy so my guess is that the server is returning the accuracy field as a float instead of an integer.One thing to try: instead of calling cur.Decode on the cursor from the Aggregate call, can you print out cur.Current (i.e fmt.Println(cur.Current))? This will print out the exact BSON document the driver received from the server and could help understand what field is malformed.– Divjot",
"username": "Divjot_Arora"
},
{
"code": "type Place struct {\n\t\n\tID string `json:\"id,omitempty\" bson:\"_id,omitempty\"`\n\t\n\tUserID primitive.ObjectID `json:\"user_id,omitempty\" bson:\"user_id,omitempty\"`\n\t\n\tName string `json:\"name,omitempty\" bson:\"name,omitempty\"`\n\t\n\tStatus Status `json:\"status,omitempty\" bson:\"status,omitempty\"`\n\n\tLocation geo.GeoJSON `json:\"location,omitempty\" bson:\"location,omitempty\"`\n\n\tCreationDate time.Time `json:\"creation_date,omitempty\" bson:\"creation_date,omitempty\"`\n\t\n\tDistance int32 `json:\"distance,omitempty\" bson:\"distance,omitempty\"`\n}\n",
"text": "Hello Divjot,Thanks for your guidance. It really helps to find the root of the issue; and forgive my careless yesterday: when I copy the Place struct I missed the last one, there is a “distance” at the end of the struct but I failed to copy it. I believe this field is the root of the error.When I Find the place in fact it is a kind of “search by text”, so no additional “distance” value generated by db. When I use $geoNear I think the db will sort the result by “distance” by default, and the db generated a double value for “distance”. When I send the result to the front end I remove the “distance”, making it more difficult to compare the difference of output from “Find” vs. “Aggregate”. . Everything becomes clear when Print the cur.Current.Thank you again for your time and the great help!Last question, is there a way to set the “distance” in float32 when db generating this value? or I have to change the “Distance” to be in64 in my Place struct?Regards,James",
"username": "Zhihong_GUO"
},
{
"code": "Distance",
"text": "A BSON double value is always 64 bits, so I don’t think there’s a way to change the aggregation to output a float32. Also, doing so wouldn’t help because you’d be trying to decode a float32 into an int32, which could still cause precision loss. My advice would be to update the Distance field to be a float64 to guarantee that precision is never lost.",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Hello Divjot,Got it, many thanks.James",
"username": "Zhihong_GUO"
}
] | IntDecodeValue can only truncate float64 to an integer type when truncation is enabled | 2020-03-07T12:05:40.868Z | IntDecodeValue can only truncate float64 to an integer type when truncation is enabled | 8,882 |
null | [
"data-modeling"
] | [
{
"code": "\"Parentnode\": {}\n\"Parentnode\": {\"n1\":\"abc\",\"n2\":\"xyz\"}\n",
"text": "{}\n{}i have one key “parentnode” in mongo db. which can have data and could be nothing also. so should i keep {} or null.yours sincerely.",
"username": "Rajesh_Yadav"
},
{
"code": "",
"text": "Mongo DB have a flexible schema. Even if you haven’t or forgot to define a key in your data model it will accept when you will insert the data it will accept it. The format should only be followed by the key you have specified in yours schema.",
"username": "Atul_tiwari"
},
{
"code": "",
"text": "question was which one is better , null or {}",
"username": "Rajesh_Yadav"
},
{
"code": "",
"text": "My preference would be in order",
"username": "steevej"
}
] | Should I keep null in object or empty {} | 2020-03-09T08:51:54.041Z | Should I keep null in object or empty {} | 3,664 |
null | [] | [
{
"code": "",
"text": "Suppose I have a collection of companies that contains fields such as phone, name, companyName, email. I want the admin to search on the basis of either phone, name or email.",
"username": "Shubham_Goel"
},
{
"code": "",
"text": "You will find the document for find() at https://docs.mongodb.com/manual/reference/method/db.collection.find/I would also recommend that you take the course M001 from https://university.mongodb.com/",
"username": "steevej"
},
{
"code": "",
"text": "Adding to the question, I know the basic find query, I should have mentioned in question that I want to make search on the basis of one of the fields at a time with regular expressions kind of thing. Like if Ama is typed then results starting with Ama would appear in email field, or like if 9465 is typed then companies whose phoneNo starts from 9465 would be returned.",
"username": "Shubham_Goel"
},
{
"code": "",
"text": "In this case what you want ispattern matching on strings in MongoDB 6.0\nor",
"username": "steevej"
}
] | Searching multiple fields | 2020-03-09T11:23:45.841Z | Searching multiple fields | 6,782 |
null | [] | [
{
"code": "",
"text": "!!Need a help.I have added 1000000 numbers and make ordered:false if it have duplicate value in every 1000 items less than 200 it work fine.But if more than 500 duplicates in 1000 it gave a error and stop the insert other values.",
"username": "Buddika_Bandara"
},
{
"code": "db.collection.initializeUnorderedBulkOp()",
"text": "You can use Bulk.insert() with db.collection.initializeUnorderedBulkOp().",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "Yes i have done with this but its insert as 1000 bulk and if in 1000 ,800 duplicates then all process failed",
"username": "Buddika_Bandara"
},
{
"code": "",
"text": "@Buddika_Bandara Can you provide some more details to help investigate this:A snippet of code showing how you are performing the bulk insert operation would also be useful.Thanks,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "@Stennie this what i found on the errorerror occur:- when addding 1000000 number to database with bulk insert\nerror- process stop with giving too man duplication value\nfinding:-as i think bulk insert happen with 1000 chunk.so if in 1000 chunk 800 duplicate all process stop without trying other 900000.But if duplicate value less than 100 the insert process happen smoothlymongo ---- MongoDB shell version: 3.2.10\ndeployment — standalone i m using molecular js",
"username": "Buddika_Bandara"
},
{
"code": "{ MongoError: E11000 duplicate key error collection: campaignDB.campaign_number index: _id_ dup key: { : \"5e673dc3a53cf3730568a19b_94715987119\" }\n at Function.create (/Users/waruna/Anuja/GitBuddika/Message_hub/MHNG-Backend/campaign-services/node_modules/mongodb/lib/core/error.js:44:12)\n at toError (/Users/waruna/Anuja/GitBuddika/Message_hub/MHNG-Backend/campaign-services/node_modules/mongodb/lib/utils.js:150:22)\n at UnorderedBulkOperation.handleWriteError (/Users/waruna/Anuja/GitBuddika/Message_hub/MHNG-Backend/campaign-services/node_modules/mongodb/lib/bulk/common.js:1125:11)\n at resultHandler (/Users/waruna/Anuja/GitBuddika/Message_hub/MHNG-Backend/campaign-services/node_modules/mongodb/lib/bulk/common.js:501:23)\n at handler (/Users/waruna/Anuja/GitBuddika/Message_hub/MHNG-Backend/campaign-services/node_modules/mongodb/lib/core/sdam/topology.js:973:24)\n at wireProtocol.(anonymous function) (/Users/waruna/Anuja/GitBuddika/Message_hub/MHNG-Backend/campaign-services/node_modules/mongodb/lib/core/sdam/server.js:437:5)\n at /Users/waruna/Anuja/GitBuddika/Message_hub/MHNG-Backend/campaign-services/node_modules/mongodb/lib/core/connection/pool.js:420:18\n at process._tickCallback (internal/process/next_tick.js:61:11)\n name: 'BulkWriteError',\n driver: true,\n code: 11000,\n writeErrors:\n [ WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n WriteError { err: [Object] },\n ... 899 more items ],\n result:\n BulkWriteResult {\n result:\n { ok: 1,\n writeErrors: [Array],\n writeConcernErrors: [],\n insertedIds: [Array],\n nInserted: 0,\n nUpserted: 0,\n nMatched: 0,\n nModified: 0,\n nRemoved: 0,\n upserted: [] } },\n [Symbol(mongoErrorContextSymbol)]: {} }\n",
"text": "this was the error** **",
"username": "Buddika_Bandara"
}
] | Bulk insert failing with large number of duplicate values | 2020-03-06T19:39:34.258Z | Bulk insert failing with large number of duplicate values | 3,586 |
null | [
"dot-net"
] | [
{
"code": "",
"text": "in c# driver builder is not working and i want to keep date in string only in json. pls suggest.\n{\n“ProgressDate” : “2019-01-01”\n}\nvar filter1 = Builders.Filter.Where(w => w[“ProgressDate”]==Convert.ToDateTime(“2019-01-01”));yours sincerley",
"username": "Rajesh_Yadav"
},
{
"code": "ProgressDateDateTimeyyyy-mm-dd2019-01-01var stringDate = \"2019-01-01\";\nDateTime dateFilter = DateTime.Parse(stringDate, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal );\nvar filterDefinition = Builders<Test>.Filter.Where(t => t.ProgressDate==dateFilter);\nDateTimeStyles.AssumeUniversalDateTimeStyles.AdjustToUniversal",
"text": "Hi @Rajesh_Yadav,Just to clarify, you have documents in a collection with field ProgressDate that stores value in DateTime, and you would like to query using a string format of date given only yyyy-mm-dd i.e. 2019-01-01 ?If so, you can convert using DateTime.Parse() method. For example:Please note that MongoDB stores time in UTC by default. This is the DateTimeStyles.AssumeUniversal param option is for. If you intend to convert the string date from local time to UTC just replaced with DateTimeStyles.AdjustToUniversal. See more info System.Globalization.If this doesn’t answer your question, please provide the following information so that others can help you better:Regards,\nWan.",
"username": "wan"
},
{
"code": "",
"text": "it is not getting the record , that means it is not filltering.\nversion i have used is 1.10.2\nand json i have given u which i have kept in mongodb.\nand finlly the quey is also given by me.\nresult is i must get the json i have entered in mondogdb. in c# var filter1",
"username": "Rajesh_Yadav"
},
{
"code": "Convert.ToDateTime()var stringDate = \"2019-01-01\";\nvar filterDefinition = Builders<Test>.Filter.Where(t => t.ProgressDate==stringDate);\n",
"text": "and json i have given u which i have kept in mongodb.If that’s the case, the field value is string you don’t need to call Convert.ToDateTime() and just use string to query it. i.e.Regards,\nWan.",
"username": "wan"
},
{
"code": "",
"text": "this one is also not working\nvar filter = Builders.Filter.Where(w=> w[“ProgressDate”] <= “2019-01-01”);",
"username": "Rajesh_Yadav"
},
{
"code": "",
"text": "i wanted to use <= insted of ===\nlike following.var stringDate = “2019-01-01”;\nvar filterDefinition = Builders.Filter.Where(t => t.ProgressDate <= stringDate)–other thing is u can use bsondocument also in place of Test , if that works i will take that one also.",
"username": "Rajesh_Yadav"
}
] | How can I use date string in c# driver builder? | 2020-03-06T06:21:08.056Z | How can I use date string in c# driver builder? | 13,598 |
null | [
"scala"
] | [
{
"code": "4.0.0build.sbtorg.mongodb.scala » bson-scala_2.13",
"text": "This is mostly a bug report, but I am not sure if there is any workaround.I am trying to use the new Scala driver version 4.0.0 but after bumping the dependency on my build.sbt file and reloading I got the following error.sbt.librarymanagement.ResolveException: Error downloading org.mongodb.scala:bson-scala_2.13:4.0.0Even on the Maven Repository you can see that the it depends on an unexisting dependency org.mongodb.scala » bson-scala_2.13 which you can also see on the published pom.Do you have an estimated time of a followup release fixing this problem?",
"username": "BalmungSan"
},
{
"code": "",
"text": "Hi @BalmungSan, welcome!Thanks for reporting the issue. I’m able to reproduce the dependency issue that you’re seeing and opened JAVA-3655. Feel free to watch/up-vote the ticket to receive notification on it.In the meantime, version 2.8.0 should work for now.Regards,\nWan.",
"username": "wan"
},
{
"code": "org.mongodb.scala:bson-scala_2.13:4.0.0org.mongodb.scala:mongo-scala-bson:4.0.0",
"text": "Hi @wan thanks for the quick answer.\nI will watch it closely.BTW, for what I understand, it is not a new dependency, but rather a wrong one.\nThe failed one is org.mongodb.scala:bson-scala_2.13:4.0.0 however the artifact also depends on org.mongodb.scala:mongo-scala-bson:4.0.0 which feels very similar and that one does exists.",
"username": "BalmungSan"
},
{
"code": "<dependency>\n <groupId>org.mongodb.scala</groupId>\n <artifactId>bson-scala_2.13</artifactId>\n <version>4.0.0</version>\n <scope>compile</scope>\n</dependency>\n<dependency>\n <groupId>org.mongodb.scala</groupId>\n <artifactId>mongo-scala-bson_2.13</artifactId>\n <version>4.0.0</version>\n <scope>compile</scope>\n</dependency> \nbson-scala libraryDependencies += \"org.mongodb.scala\" %% \"mongo-scala-driver\" % \"4.0.0\" exclude (\"org.mongodb.scala\", \"bson-scala_2.13\")\n",
"text": "Hi @BalmungSan,Thanks for reporting, I’ll look to fix and push a new release with a fixed pom in the near future. Looking at the pom file it looks like the build file has produced a duplication for bson:So for now please update your sbt and ignore the the bson-scala dependency:Ross",
"username": "Ross_Lawley"
}
] | Can't use the Scala driver 4.0.0 due a broken dependency | 2020-03-09T20:35:24.446Z | Can’t use the Scala driver 4.0.0 due a broken dependency | 4,700 |
null | [
"compass"
] | [
{
"code": "",
"text": "Hello!I’m using MongoDB Compass connected to Atlas (cluster of 3 shards).As one of my collections has grown bigger (total size 10mb, average document size 116kb) the initial load (collection.find({})) has started to time out (~30 seconds).Since I can’t change MongoDB Compass’es “initial query” when clicking the collection name it takes very long for me to hop onto the collection to start querying.I have indexes set up for specific searches, but collection.find({}) I guess dumps all that it can.",
"username": "Kim_Korte"
},
{
"code": "db.collection.find({})",
"text": "Hi @Kim_Korte,It has been a while since you posted this question … were you able to find a solution?A query using db.collection.find({}) uses natural order rather than an index, but this should be the fastest way to retrieve documents. Compass only retrieves 20 documents by default, so the size of the collection should not be an issue. Your documents are also relatively small, so any timeouts are likely more related to networking issues.If you are still experiencing this problem, can you provide more information including your specific version of MongoDB Compass and steps to reproduce? Also, what tier of Atlas cluster are you using (M?) and what are the regions for your cluster and the computer you are connecting from?Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "Hello!It works better now. I was previously storing whole blobs to the MongoDB rather that changing subsets of the documents using $set. This cause my oplog to grow out of control, which in turn seemed to affect the querying in MongoDB compass. After my changes to how I store data, the initial load of the 20 posts takes only 3-5 down from ~30 seconds.However, I still feel like this is a bit annoying. Is there anyway I can “choose what the default query is”, clicking a collection in the menu to the left in Compass? The way Compass deals with the load of the collection is locking any user iteraction out until loaded. This is very frustrating and becomes a bottleneck when I try to interact with the database.Compass: Version 1.19.12\nMongoDB Atlas: M10To reproduce:\nStore a few hundred blobs of data un-indexed and without using $set (so that oplog takes a hit). Open compass, click collection, wait until it loads.One big difference having 5 second load time and 30, is that I’m running on a paid tier. That could be the main difference now from then. However 30 seconds is ALOT and also being locked out from user interaction in Compass as it loads is another problem.Thanks ",
"username": "Kim_Korte"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Initial load times out MongoDB Compass | 2020-02-21T11:29:51.259Z | Initial load times out MongoDB Compass | 3,484 |
[] | [
{
"code": "",
"text": "My project has four submodules. each independent Code Igniter 3.1.9 with the same Mongodb 4.2, in ubuntu 18.04 using Nginx web server. getting this error while trying to connect to one of submodule. The remaining three are working fine .one module is getting this error.\n\nimage1205×289 3.63 KB\n",
"username": "dev_mcb"
},
{
"code": "",
"text": "@dev_mcb The error message you’ve provided does not appear to be directly related to MongoDB.Can you provide more details on the submodules you are referring to and how MongoDB fits in?This may be a better question for the CodeIgniter community.Regards,\nStennie",
"username": "Stennie_X"
}
] | mongodb config group : get does not exist. | 2020-03-10T05:45:08.200Z | mongodb config group : get does not exist. | 1,548 |
|
null | [
"sharding",
"monitoring"
] | [
{
"code": "xxxConfig:PRIMARY> db.serverStatus().asserts\n{ \"regular\" : 0, \"warning\" : 0, \"msg\" : 0, \"user\" : 335, \"rollovers\" : 0 }\nuse config \ndb.system.session.find() # is blank .\n",
"text": "My mongodb is community server. It’s a sharding cluster, In config server:Increase to 335 in 3 hours.In config server log:2020-03-05T17:01:14.724+0800 D COMMAND [replSetDistLockPinger] assertion while executing command ‘findAndModify’ on database ‘config’ with arguments ‘{ findAndModify: “lockpings”, query: { _id: “ConfigServer” }, update: { $set: { ping: new Date(1583398874724) } }, upsert: true, writeConcern: { w: “majority”, wtimeout: 15000 }, $db: “config” }’: NotMaster: Not primary while running findAndModify command on collection config.lockpingsand2020-03-06T08:45:11.555+0800 D COMMAND [conn321719] assertion while executing command ‘createIndexes’ on database ‘config’ with arguments ‘{ createIndexes: “system.sessions”, indexes: [ { key: { lastUse: 1 }, name: “lsidTTLIndex”, expireAfterSeconds: 1800 } ], allowImplicitCollectionCreation: false, $clusterTime: { clusterTime: Timestamp(1583455507, 1), signature: { hash: BinData(0, 53B67F92D69C8BFBFE808B0127033E527960EC14), keyId: 6766524995490283521 } }, $configServerState: { opTime: { ts: Timestamp(1583455502, 2), t: 6 } }, $db: “config” }’: CannotImplicitlyCreateCollection{ ns: “config.system.sessions” }: request doesn’t allow collection to be created implicitly.Run cmd:I think so memory’s sessions info cannot write collection “config.system.sessions” , so asserts.user are growing.How should I solve this problem?Thanks!",
"username": "Ruoxue_Feng"
},
{
"code": "db.system.sessions.find()",
"text": "How should I solve this problem?Since these assertions are generated by system activity (updating sharding metadata), I’d suspect a bug in your server version or deployment configuration. Shards should detect changes in the primary config server and should not be sending write commands directly to secondaries (as appears to be the case in the few log messages you’ve included).Can you provide some further details:use config\ndb.system.session.find()The query should be db.system.sessions.find(), however there does appear to be an issue with this collection not existing since one of your config secondaries is trying to create it implicitly.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "mongos> sh.status()\n--- Sharding Status --- \n sharding version: {\n \"_id\" : 1,\n \"minCompatibleVersion\" : 5,\n \"currentVersion\" : 6,\n \"clusterId\" : ObjectId(\"5d51088a47213054f0685153\")\n }\n shards:\n { \"_id\" : \"cldRS0\", \"host\" : \"cldRS0/node01:27018,node02:27018,node03:27018\", \"state\" : 1 }\n { \"_id\" : \"cldRS1\", \"host\" : \"cldRS1/node04:27018,node05:27018,node06:27018\", \"state\" : 1 }\n { \"_id\" : \"cldRS2\", \"host\" : \"cldRS2/node07:27018,node08:27018,node09:27018\", \"state\" : 1 }\n active mongoses:\n \"4.0.10\" : 2\n autosplit:\n Currently enabled: yes\n balancer:\n Currently enabled: yes\n Currently running: no\n Failed balancer rounds in last 5 attempts: 1\n Last reported error: Couldn't get a connection within the time limit\n Time of Reported error: Thu Nov 07 2019 13:04:37 GMT+0800 (CST)\n Migration Results for the last 24 hours: \n No recent migrations\n databases: \n... ... \n\n { \"_id\" : \"config\", \"primary\" : \"config\", \"partitioned\" : true }\n config.system.sessions\n shard key: { \"_id\" : 1 }\n unique: false\n balancing: true\n chunks:\n cldRS0 1\n { \"_id\" : { \"$minKey\" : 1 } } -->> { \"_id\" : { \"$maxKey\" : 1 } } on : cldRS0 Timestamp(1, 0) \ncldRS1:PRIMARY> db.serverStatus().asserts\n{\n \"regular\" : 0,\n \"warning\" : 0,\n \"msg\" : 0,\n \"user\" : 107478,\n \"rollovers\" : 0\n}\n\n\nmongos> db.serverStatus().asserts\n{\n \"regular\" : 0,\n \"warning\" : 0,\n \"msg\" : 0,\n \"user\" : 3103,\n \"rollovers\" : 0\n}\n\n\nxxxConfig:SECONDARY> db.serverStatus().asserts\n{\n \"regular\" : 0,\n \"warning\" : 0,\n \"msg\" : 0,\n \"user\" : 11149,\n \"rollovers\" : 0\n}\n",
"text": "Thank you very much!\nMy cluster status:More details:Now, all nodes asserts is growthing.Best Regards,\nRuoxue Feng",
"username": "Ruoxue_Feng"
},
{
"code": "",
"text": "Could you help me analyze this problem? (my second post) . Thank you very much.",
"username": "Ruoxue_Feng"
}
] | Why "db.serverStatus().asserts.user" high number? | 2020-03-06T08:16:06.609Z | Why “db.serverStatus().asserts.user” high number? | 4,935 |
[
"replication"
] | [
{
"code": "",
"text": "I have a MongoDB replica set with 6 members. 3 members (including the primary) are using MongoDB 3.4 and remaining 3 are MongoDB 3.6 which i have added as part of upgrading.For 2 collections documents have been replicated to new secondary (version 3.6) but when I query document count is zero . Remaining collections are replicated to secondary without any issues.I’m not sure why document count displays zero for these two collections.\nimage943×532 46.7 KB\n https://i.stack.imgur.com/ZymaT.png",
"username": "Manikanta_KS"
},
{
"code": "validate",
"text": "Hi,This issue sounds unusual, but I wouldn’t recommend running mixed major releases for an extended period of time beyond validating your upgrade.It has been some time since you posted this question - were you able to find a solution? If you are still running mixed versions, what specific version(s) of MongoDB 3.4 are you using?One possibility is that the collection metadata (count & size stats) is somehow incorrect. Usually this happens after an unexpected shutdown, but you could try to run validate on affected collections to see if this corrects your issue.Regards,\nStennie",
"username": "Stennie_X"
}
] | Document count showing zero despite documents existing in secondary | 2020-02-18T12:17:09.581Z | Document count showing zero despite documents existing in secondary | 3,777 |
|
null | [] | [
{
"code": "",
"text": "I am an agronomy engineer and a self-taught web developer I love both fields they are very challenging and fun ",
"username": "Ahlam_bey"
},
{
"code": "",
"text": " Welcome to the community @Ahlam_bey!",
"username": "Doug_Duncan"
},
{
"code": "",
"text": "Hi @Ahlam_bey, Welcome!",
"username": "Jamie"
},
{
"code": "",
"text": "Hello, and welcome @Ahlam_bey ",
"username": "coderkid"
}
] | Hi there I am Ahlam from Algeria | 2020-03-09T16:43:22.866Z | Hi there I am Ahlam from Algeria | 2,117 |
null | [] | [
{
"code": "",
"text": "Hi all! It’s great to be part of this community since the very beginning of Voxes.\nMy name is Patrick and I’m engineer from Poland. I’m working on software that could help you manage users feedback and share product development (https://voxes.io). Of course it has MongoDB under the hood! We’ve just launched - if you are developing any project, feel invited to try Voxes ",
"username": "Azard"
},
{
"code": "",
"text": "Welcome Patrick,I just check out Voxes, looks great!Looking forward to participating in this community with you!",
"username": "coderkid"
},
{
"code": "",
"text": "Welcome to the community, @Azard!",
"username": "Jamie"
},
{
"code": "",
"text": " Welcome to the community Patryk! It looks like an interesting product you guys are building!",
"username": "Doug_Duncan"
}
] | Hey friends, I'm Patrick from Voxes | 2020-03-06T22:58:34.215Z | Hey friends, I’m Patrick from Voxes | 1,778 |
null | [
"production",
"mongoid-odm"
] | [
{
"code": "Criteria",
"text": "The MongoDB Ruby Driver Team is pleased to announce the release of Mongoid 7.1.0.The principal improvement in Mongoid 7.1 is the unified treatment of logical operations when building queries (i.e., operating on Criteria objects). This makes Mongoid’s behavior match that of ActiveRecord, and results in consistent queries being constructed regardless of the mechanism used to construct them.Please refer to 7.1.0.rc0 release notes and 7.1.0 release notes for the detailed list of changes.Please report any issues using the Mongoid Jira project.The Ruby Driver Team",
"username": "Oleg_Pudeyev"
},
{
"code": "",
"text": "",
"username": "system"
}
] | Mongoid 7.1.0 Released | 2020-03-09T16:45:27.807Z | Mongoid 7.1.0 Released | 2,960 |
null | [] | [
{
"code": "",
"text": "Not able to identify the connection string from the tutorial.",
"username": "Utkarsh_14115"
},
{
"code": "",
"text": "@Utkarsh_14115, have a look at this thread:\nhttps://www.mongodb.com/community/forums/t/error-while-connecting-to-mongodb-compass/48281/2",
"username": "007_jb"
},
{
"code": "",
"text": "Hi @Utkarsh_14115,I hope you found @007_jb’s response helpful. Please let us know if you are still having any issue.Thanks,\nShubham Ranjan\nCurriculum Services Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "I don’t see how giving us the answer will help. It is like cheating on a test.\nI can use the url to bring up the databases, but I won’t know how to do it again, or for a different instance. Why is there a difference between the course showing a full page of fields to enter and my Compass that only has the one URL field? How would I get a different URL for my own if I wanted to start using it?",
"username": "Mark_03136"
},
{
"code": "",
"text": "@Mark_03136, does the question in this thread relate to a lab that counts towards the final score, or is it just a quiz (which doesn’t count towards the final score)?Q: Why is there a difference …?\nA: The lectures are referencing a much older version of Compass. @Shubham_Ranjan (the Curriculum Services Engineer for this course) has mentioned that they’ll soon get an upgrade.Q: How would I get a different URL…?\nA: You get the connection strings on Atlas (which I believe the lectures cover). On there, you’ll find a Version dropdown for different connection string formats. The version that matches the lecture is 3.4 or earlier.\n",
"username": "007_jb"
},
{
"code": "",
"text": "Neither, I just wanted to understand what options to consider when connecting.",
"username": "Mark_03136"
},
{
"code": "",
"text": "Hi @Mark_03136,As @007_jb mentioned, the user interface of Compass has changed a lot since the time of recording of the videos. As I can see, you are enrolled in the February, 2020 offering. In the March offerings, we have published the upgraded version of course to reflect some of the recent changes.Regarding the connection string, I hope you found @007_jb response helpful. Acquiring the connection string is pretty much same for mongo shell and Compass. For instance, this is how you can acquire the connection string for Compass for your sandbox cluster.Hope it helps!Please let us know if you still have any questions.Thanks,\nShubham Ranjan\nCurriculum Services Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "",
"username": "system"
}
] | Mongo DB Compass Connection String | 2020-03-02T17:34:23.193Z | Mongo DB Compass Connection String | 11,679 |
null | [
"dot-net"
] | [
{
"code": "",
"text": "var credentials = Credentials.UsernamePassword(RealmConnectionInfo.Username, RealmConnectionInfo.Password, false);\nvar admin = await Realms.Sync.User.LoginAsync(credentials, new Uri(RealmConnectionInfo.LoginUri));After the recent update to realm 4.3.0 I get the Metadata Realm encryption specified error on attempting LoginAsync. Reverted to 4.2.0 and the error persisted. Restored project from previous commit and error persisted. No changes have been made to the login code since the beginning of the project. This seems very similar to this bug on github: Keychain exception on iOS after upgrading to 4.3.0 · Issue #1956 · realm/realm-dotnet · GitHub.As some background info, this is a windows service project I am setting up to import data from a realm cloud. .Net Framework 4.6.1(also tested on 4.7.2). Realm version 4.3.0 latest stable and tested in reversion to 4.2.0 as well.",
"username": "Ryan_Ziegler"
},
{
"code": "realm-dotnet:#1967Realms.Sync.SyncConfigurationBase.Initialize (Realm.Sync.UserPersistenceMode.NotEncrypted)",
"text": "After the recent update to realm 4.3.0 I get the Metadata Realm encryption specified error on attempting LoginAsync. Reverted to 4.2.0 and the error persisted. Restored project from previous commit and error persisted. No changes have been made to the login code since the beginning of the project.@Ryan_Ziegler Per the follow-up issue you created on GitHub realm-dotnet:#1967, this was confirmed as a bug.Quoting the relevant comment from one of the Realm .NET engineers:This is a 4.3.0 bug, unfortunately. You need to do a complete clean of your project (delete bin and obj folders) for NuGet to correctly update the native libraries included in your project when you’re downgrading.You can either downgrade to 4.2.0 (if you clean your project), or work around this by calling Realms.Sync.SyncConfigurationBase.Initialize (Realm.Sync.UserPersistenceMode.NotEncrypted) at the start of your application. This should be the default behavior on non-Apple platforms, but due to a bug user metadata encryption is forced even on platforms where it’s not automatically applicable.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | 'Metadata Realm encryption was specified, but no encryption key was provided.' | 2020-03-03T18:54:20.573Z | ‘Metadata Realm encryption was specified, but no encryption key was provided.’ | 2,035 |
null | [
"compass"
] | [
{
"code": "",
"text": "Hi everyoneI just would like to know if it is possible to have access to all aggregations saved on MongoDB Compass?Best regards\nEzequias.",
"username": "Ezequias_Rocha"
},
{
"code": "",
"text": "Aggregations on the Compass are built for each individual collection. When you access a collection you can access all the saved pipelines for that collection.",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "I just would like to know if it is possible to have access to all aggregations saved on MongoDB Compass?Are you referring to retrieving all of the saved aggregation pipelines?In the current versions of Compass (1.20 and older), saved aggregation pipelines are stored in an IndexedDB format that is embedded in the application preferences. This is only intended for use in the application, so accessing the full source of saved aggregations isn’t a current end user feature: you would have to load and export pipelines individually via the user interface.However, a few users have requested easier access/sharing of pipelines, so future versions of Compass should load saved pipelines from disk instead. The improvement for this is currently in code review: COMPASS-3884: Migrate saved aggregations from IndexedDB to the filesystem.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "Thank you so much @Stennie_XI didn’t know the MongoDB backlog would be seen for anyone.I am completely fascinated by this. Congratulations for this commitment with users.Sincerely\nEzequias.",
"username": "Ezequias_Rocha"
},
{
"code": "",
"text": "I didn’t know the MongoDB backlog would be seen for anyone.Backlog development issues and improvement requests for MongoDB drivers, server, and other projects are visible via https://jira.mongodb.org. You can create, comment, watch, and upvote issues there.We also have a MongoDB Feedback site which provides a more use-case focused view of feature requests and direct connection with the product management team.I am completely fascinated by this. Congratulations for this commitment with users.MongoDB Compass started off as closed source development, but as of Compass 1.20 the source code is now available under the SSPL: MongoDB Compass is becoming more open than ever.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Where are saved aggregations in Compass? | 2020-03-04T18:33:42.842Z | Where are saved aggregations in Compass? | 3,955 |
null | [
"sharding"
] | [
{
"code": "",
"text": "I am building a sharding infrastructure but I am stuck at a point to select an appropriate shard key. Can anyone help me out in selecting an appropriate shard key for my data? Further, an issue I am facing is that I am having a total of 147 collections so is there any way I can shard all of my collections in one one go? or I have to manually shard each of the collection one by one?",
"username": "Qamber_Ali"
},
{
"code": "",
"text": "@Qamber_Ali Welcome to the forum!Can anyone help me out in selecting an appropriate shard key for my data?I would start by reviewing the documentation on Choosing a shard key and then follow-up with specific questions for your use case. The most appropriate shard key will depend on your use case and data distribution, so you’ll need to consider factors like: shard key field(s) should appear in every document, have high cardinality and even distribution (without being monotonically increasing), and support your most common queries.Once you have candidate shard key(s), you could post a follow-up comment here if more specific advice is needed. It would be helpful to describe your concerns around how the candidate shard keys may (or may not) suit your use case.Further, an issue I am facing is that I am having a total of 147 collections so is there any way I can shard all of my collections in one one go?Collections have to be individually sharded, but you could certainly automate this if you know what the desired shard key is. Before sharding all collections, I would consider whether it actually makes sense to do so. A sharded cluster can contain both sharded and unsharded collections, and some smaller or lower traffic collections may not benefit from sharding.Regards,\nStennie",
"username": "Stennie_X"
}
] | Choosing an appropriate Shard key | 2020-03-06T19:38:41.291Z | Choosing an appropriate Shard key | 1,748 |
null | [
"atlas-search"
] | [
{
"code": "",
"text": "Hi, I am fairly new to mongo and iam using a full text search (beta) which is recently launched by mongodb.I am creating an aggregation and running a search for list of paths.\nMy question is, is there any way to loop through all the paths automatically without defining each path (consisting of arrays and array of objects) ?\nIf so how can I achieve that ?Thanks in advance",
"username": "David_Barrington"
},
{
"code": "path",
"text": "I am creating an aggregation and running a search for list of paths.\nMy question is, is there any way to loop through all the paths automatically without defining each path (consisting of arrays and array of objects) ?@David_Barrington Welcome to the forum!Can you provide an example document and the query/outcome you are trying to match?You can include multiple fields in an Atlas search index definition and use either static mapping (if all fields are known in advance) or dynamic mapping (index all fields in a collection as needed). You can also specify fields to search using the path parameter: Atlas Search Path Construction.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Full text search iterating through path | 2020-03-06T22:02:28.602Z | Full text search iterating through path | 2,414 |
null | [
"java"
] | [
{
"code": "1.8.0_192Caused by: com.mongodb.MongoSocketReadException: Prematurely reached end of stream\n\tat com.mongodb.internal.connection.SocketStream.read(SocketStream.java:112)\n\tat com.mongodb.internal.connection.InternalStreamConnection.receiveResponseBuffers(InternalStreamConnection.java:579)\n\tat com.mongodb.internal.connection.InternalStreamConnection.receiveMessage(InternalStreamConnection.java:444)\n\tat com.mongodb.internal.connection.InternalStreamConnection.receiveCommandMessageResponse(InternalStreamConnection.java:298)\n\tat com.mongodb.internal.connection.InternalStreamConnection.sendAndReceive(InternalStreamConnection.java:258)\n\tat com.mongodb.internal.connection.CommandHelper.sendAndReceive(CommandHelper.java:83)\n\tat com.mongodb.internal.connection.CommandHelper.executeCommand(CommandHelper.java:33)\n\tat com.mongodb.internal.connection.InternalStreamConnectionInitializer.initializeConnectionDescription(InternalStreamConnectionInitializer.java:103)\n\tat com.mongodb.internal.connection.InternalStreamConnectionInitializer.initialize(InternalStreamConnectionInitializer.java:60)\n\tat com.mongodb.internal.connection.InternalStreamConnection.open(InternalStreamConnection.java:128)\n\tat com.mongodb.internal.connection.UsageTrackingInternalConnection.open(UsageTrackingInternalConnection.java:50)\n\tat com.mongodb.internal.connection.DefaultConnectionPool$PooledConnection.open(DefaultConnectionPool.java:435)\n\tat com.mongodb.internal.connection.DefaultConnectionPool.get(DefaultConnectionPool.java:117)\n\tat com.mongodb.internal.connection.DefaultConnectionPool.get(DefaultConnectionPool.java:102)\n\tat com.mongodb.internal.connection.DefaultServer.getConnection(DefaultServer.java:90)\n\tat com.mongodb.internal.binding.ClusterBinding$ClusterBindingConnectionSource.getConnection(ClusterBinding.java:112)\n\tat com.mongodb.client.internal.ClientSessionBinding$SessionBindingConnectionSource.getConnection(ClientSessionBinding.java:136)\n\tat com.mongodb.internal.operation.FindOperation$1.call(FindOperation.java:628)\n\tat com.mongodb.internal.operation.FindOperation$1.call(FindOperation.java:625)\n\tat com.mongodb.internal.operation.OperationHelper.withReadConnectionSource(OperationHelper.java:462)\n\tat com.mongodb.internal.operation.FindOperation.execute(FindOperation.java:625)\n\tat com.mongodb.internal.operation.FindOperation.execute(FindOperation.java:77)\n\tat com.mongodb.client.internal.MongoClientDelegate$DelegateOperationExecutor.execute(MongoClientDelegate.java:190)\n\tat com.mongodb.client.internal.FindIterableImpl.first(FindIterableImpl.java:189)\n\tat gui.Controller.cloudData(Controller.java:120)\n\tat gui.Controller.lambda$pushAction$1(Controller.java:111)\n\tat java.util.ArrayList.forEach(ArrayList.java:1257)\n\tat gui.Controller.pushAction(Controller.java:111)\n\t... 58 more\nmongodb://admin:[email protected]:27017,cluster0-shard-00-01-ox90k.mongodb.net:27017,cluster0-shard-00-02-ox90k.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true&maxIdleTimeMS=40000maxIdleTimeMSmongodb://admin:[email protected]:27017,cluster0-shard-00-01-ox90k.mongodb.net:27017,cluster0-shard-00-02-ox90k.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=truemongodb+srv://admin:[email protected]/test?retryWrites=true&w=majoritymongodb+srv://admin:[email protected]/test?retryWrites=true&w=majority&readPreference=secondary&replicaSet=Cluster0-shard-0&ssl=true",
"text": "I’m using JDK/JRE version like: 1.8.0_192\nWhen I’m inserting documents to the Mongodb cloud cluster, I’m getting always the issue:I’ve tried different connection strings for older drivers like:mongodb://admin:[email protected]:27017,cluster0-shard-00-01-ox90k.mongodb.net:27017,cluster0-shard-00-02-ox90k.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true&maxIdleTimeMS=40000and without maxIdleTimeMS parameter:\nmongodb://admin:[email protected]:27017,cluster0-shard-00-01-ox90k.mongodb.net:27017,cluster0-shard-00-02-ox90k.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=trueFor newer drivers like:\nmongodb+srv://admin:[email protected]/test?retryWrites=true&w=majorityand like:mongodb+srv://admin:[email protected]/test?retryWrites=true&w=majority&readPreference=secondary&replicaSet=Cluster0-shard-0&ssl=trueIt is worth noting that I can insert half of the documents, but then this error occurs.And nothing helps here.",
"username": "invzbl3"
},
{
"code": "",
"text": "Caused by: com.mongodb.MongoSocketReadException: Prematurely reached end of streamThis looks like SSL connection issue… check whether or not JRE’s SSL keystore is set properly.",
"username": "coderkid"
},
{
"code": "trustore",
"text": "Thanks for the answer, coderkid. can you a bit elaborate how to check it properly?\nI’m not sure, but maybe these limits messages would be helpful here.UPD: I suppose I’ve found, at least, place where is located trustore:And I have inside something like:\n\nтест44441048×352 17.4 KB\n",
"username": "invzbl3"
},
{
"code": "",
"text": "To clarify a bit:I’ve tried everything what I’ve already mentioned here: How to avoid an exception Prematurely reached end of stream using mongoDB Java driver 3.4+ or 3.6+? (during insertion) - Stack Overflowas you can see my post above.My Network Access looks like:\nNetwork Access988×297 13.4 KB\nAlso I can’t provide any logs, because:“M0 Free Tier and M2/M5 shared clusters do not provide downloadable logs.”",
"username": "invzbl3"
},
{
"code": "Caused by: com.mongodb.MongoSocketReadException: Prematurely reached end of stream\n ....\nat com.mongodb.client.internal.FindIterableImpl.first(FindIterableImpl.java:189)\nFind()",
"text": "Hi @invzbl3,The fact that you can connect and insert for a period of time, could be an indication that it’s not related to the keystore.Looking at both of these log lines:It seems that you are looping through an iterable from Find(), and within the loop you’re inserting documents. If so, it is possible that each of the loop process is taking too long, that either the find cursor has timeout or the server that the cursor is connected to was disconnected.Could you provide more information about how you’re inserting the documents with a snippet code example ?Regards,\nWan.",
"username": "wan"
},
{
"code": " public void data(MongoCollection<Document> collection, String title, String country) {\n\n Document found = collection.find(new Document(\"title\", title) \n .append(\"country\", country)).first();\n\n if (found == null) {\n collection.insertOne(new Document(\"title\", title)\n .append(\"country\", country));\n } \n} \n .append(\"country\", country)).first();",
"text": "Hi, @wan, thanks for the answer. My code looks like:The cause of error stacktrace shows me on line: .append(\"country\", country)).first();By the same logic, I had this exactly error using code snippet like here:I have so far inserted about 50 documents instead of the 176 that I planned using this snippet.",
"username": "invzbl3"
},
{
"code": "",
"text": "Thanks for the snippet code.\nThis function is called within a loop right ? Also, are you running this in a concurrent/parallel operations ?at java.util.ArrayList.forEach(ArrayList.java:1257)Could you check in Atlas cluster monitoring views the number of connections and operations displayed around the time you’re getting the error ? See also Monitor a Cluster for information how to view the metrics.I’m suspecting that you could be hitting the shared clusters limitations.Regards,\nWan.",
"username": "wan"
},
{
"code": "cloudDatadataJournalprivate List<Journal> journalList;for eachjournalList.forEach(journal -> cloudData(journal.getTitle(), journal.getCountry()));100000100000040000",
"text": "at gui.Controller.cloudData(Controller.java:120) at gui.Controller.lambda$pushAction$1(Controller.java:111) at java.util.ArrayList.forEach(ArrayList.java:1257)@wan, yes, you’re right about the loop. To correct a bit, my method of program is named like cloudData not data (as I mentioned earlier in snippet), so stacktrace is correct here.Journal is my custom class of parameters that I’m adding inside list.So I’m declaring firstly instance variable:\nprivate List<Journal> journalList;and then I’m using for each to call this method using getters like:journalList.forEach(journal -> cloudData(journal.getTitle(), journal.getCountry()));I don’t think it’s related to concurrent/parallel operations, because I simply using the same function until all documents from list will be added to MongoDb Tier cluster one by one.My metrics looks like:\n\n Also I’m getting alert like:\nCan you tell me, please, can I somehow control/regulate limit of % connections from connection string or something? I suppose it can be solution here if I add something like connection limiter as parameter, but still can insert everything that I need to cluster.P.s. I’ve tried to increase number of milliseconds for parameter maxIdleTimeMS up to 100000 or 1000000 instead of 40000, but it doesn’t affect the overall situation, the same error appears.",
"username": "invzbl3"
},
{
"code": "maxPoolSizecloudData",
"text": "Can you tell me, please, can I somehow control/regulate limit of % connections from connection string or something?Depending on your use case, there are a number of ways that you could try.You could try to reduce the connection pool limit using maxPoolSize parameter (default is 100). Alternatively could also change the connections pool limit via ConnectionPoolSettings.Builder().maxSize(int).\nIf you reduce the number of connection pool, you need to be mindful about the time limit a thread may wait for a connection to become available. The default is 2 minutes see also ConnectionPoolSettings.Builder().maxWaitTime(long, TimeUnit).In addition to the above, try to restructure your code. Instead of inserting a document one by one you could utilise BulkWrites operations. Also, looking at your example method cloudData, where you try to find a document and insert if it doesn’t exist, try performing update upsert operation whenever possible.Depending on your use case, you could also consider upgrading the cluster to a dedicated cluster. See also Free, Shared and Dedicated Cluster Comparison.Regards,\nWan.",
"username": "wan"
},
{
"code": "public void cloudData(MongoCollection<Document> collection, String title, String country) {\n List<WriteModel<Document>> updates = Collections.singletonList(\n new ReplaceOneModel<>(\n new Document(\"title\", title) // find part\n .append(\"country\", country), \n new Document(\"title\", title) // update part\n .append(\"country\", country), \n new ReplaceOptions().upsert(true) \n )\n );\n}\nmaxPoolSize8050100mongodb+srv://admin:[email protected]/test?retryWrites=true&w=majority&maxPoolSize=80Caused by: com.mongodb.MongoSocketReadException: Prematurely reached end of streammaxPoolSizemaxWaitTime",
"text": "Thanks, for valuable answer, @wan. I’ve changed my code to:It works also fine, but the same issue appears after half of inserted documents.Also I’ve already tried to add maxPoolSize and reduce value to 80 and 50 instead of default 100 in format like:mongodb+srv://admin:[email protected]/test?retryWrites=true&w=majority&maxPoolSize=80unfortunately to no avail, still here:\nCaused by: com.mongodb.MongoSocketReadException: Prematurely reached end of streamProbably yes, the most likely option as I see it’s really to pay for upgrade.P.s. I’ll try to combine maxPoolSize and maxWaitTime, as you mentioned, but not sure if it possible solution for connection string.",
"username": "invzbl3"
},
{
"code": "mongodb.MongoClient.connect",
"text": "Finally solved. I should call mongodb.MongoClient.connect once, not each request. So I’ve restructured my code a bit, to call connection once instead of each time when I insert specific document.Thanks for the help, everyone.",
"username": "invzbl3"
},
{
"code": "",
"text": "This topic was automatically closed 24 hours after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Cannot insert all documents to the MongoDB cloud cluster: Prematurely reached end of stream | 2020-02-29T07:04:03.183Z | Cannot insert all documents to the MongoDB cloud cluster: Prematurely reached end of stream | 22,463 |
null | [] | [
{
"code": "",
"text": "i’m a beginner of programming\nand i want to make a discord-bot which use mongo db in kotlinin my reference, the author always close the db connection\nbut i have no idea where to close connection.is it necessary to close the db connection?( i’m not good at english, so if i made a mistake, feel free to correct me)",
"username": "1118"
},
{
"code": "",
"text": "Hey @1118Just wanted to share some stackover flows related to your question.I am using Mongoose as an ORM for my communication with Mongodb Atlas. And I do not close the connection anywhere. I just open a connection in the node express server, and monitor the connection for disconnection/reconnects",
"username": "Natac13"
},
{
"code": "MongoClient",
"text": "The Kotlin application uses a MongoDB Java driver to access the database server. This is a link to a Kotlin program where the code shows the closing of the MongoClient object: How to Insert a MongoDB Document using Kotlin.In general, it is always a good practice to close the resources after they are used and before exiting the application. The MongoClient#close() method API documentation says:Close the client, which will close all underlying cached resources, including, for example, sockets and background monitoring threads.Here is another example: Connect to MongoDB from Kotlin Application – Example",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "@Natac13 @Prasad_Saya\nThank you for the reply.\nhmm…, so i should close the connection or not?",
"username": "1118"
},
{
"code": "MongoClientMongoClientclose()",
"text": "i should close the connection or not?When you create a MongoClient instance you create a connection; actually a pool of connections (aka connection pool). By default, the pool has 100 connections (and this depends upon the driver). This is configurable within the Java code. This means, ideally, you should have one Mongo Client instance per JVM.So, the application’s client uses a connection from the pool and returns it back to the connection pool after its usage. There is no need for closing the connection explicitly. This way the application avoids creating and closing connections (which is an expensive operation).Of course, you should close the MongoClient instance, with the close() method (I had mentioned in my previous reply) at the closing of the application, to clear all the resources.",
"username": "Prasad_Saya"
},
{
"code": "MongoClientclose()MongoClientclose()MongoClientjava.lang.AutoCloseabletry-with-resourcesclose()trytry-with-resourcestryclosetry (MongoClient mongoClient = MongoClients.create(\"mongodb://localhost/\")) {\t\t\n MongoDatabase database = mongoClient.getDatabase(\"test\");\n MongoCollection<Document> coll = database.getCollection(\"testColl\");\t\t\n Document doc = coll.find(new Document()).first();\t\n System.out.println(doc.toJson());\n}",
"text": "Of course, you should close the MongoClient instance, with the close() method (I had mentioned in my previous reply) at the closing of the application, to clear all the resources.This note is about using the code for creating the MongoClient object and its close() method.MongoClient extends an interface called as java.lang.AutoCloseable. When the object implementing this interface is used within a try-with-resources statement (as in code sample below) the object’s close() method is called automatically at the exit of the try block (releasing the associated resources).A feature of using this try-with-resources statement is that, even when the code within the try block aborts abruptly (due to an exception), the close method is called automatically, and safely releasing the associated resources.",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Where to close db connection? | 2020-03-05T17:05:14.494Z | Where to close db connection? | 63,475 |
[
"c-driver"
] | [
{
"code": "",
"text": "I am trying to install the MongoDB C Driver. I followed the instructions from the following tutorial:\nhttp://mongoc.org/libmongoc/current/installing.htmlI have tried building from tarball, and from git, and both times, i get the same outcome.Header files are generated my /usr/local/include/ folder\nbut no library files are generated in my /usr/local/lib/ folderinsead i get the following .so files in my lib folder.\n\nimage1060×245 24 KB\nAt this stage im completely stuck as to what is wrong.\nIs somebody able to provide me with some advise on how to debug this, and what to look for,\nor what i might be missing.the only thing i can see is during the cmake command i get the following not found results:\nLooking for reallocf - not found\nLooking for strlcpy - not foundNo package ‘libzstd’ found\nLooking for res_ndestroy - not found\nSearching for compression library header snappy-c.h\n– Not found (specify -DCMAKE_INCLUDE_PATH=/path/to/snappy/include for Snappy compression)but the command finished with the output:\nbuild files have been written to :// so it looks like the command finished successfull.Any help would be greatly appreciated",
"username": "arif_saeed"
},
{
"code": "make installInstall the project...\n-- Install configuration: \"RelWithDebInfo\"\n-- Installing: /usr/local/share/mongo-c-driver/COPYING\n-- Installing: /usr/local/share/mongo-c-driver/NEWS\n-- Installing: /usr/local/share/mongo-c-driver/README.rst\n-- Installing: /usr/local/share/mongo-c-driver/THIRD_PARTY_NOTICES\n-- Installing: /usr/local/lib/libbson-1.0.so.0.0.0\n-- Installing: /usr/local/lib/libbson-1.0.so.0\n-- Installing: /usr/local/lib/libbson-1.0.so\n...\n",
"text": "Hi @arif_saeed!The “not found” messages are informational. After installation, the output should include the paths to the installed files. E.g. the output of make install should include at the end lines like:The C driver determines install paths using cmake’s GNU install directories module. So if you are on a Debian system, library files may be installed to /usr/local/lib64.",
"username": "Kevin_Albertson"
}
] | Lib\mongoc-1.0.lib files not generated when installing C driver | 2020-03-06T19:39:23.136Z | Lib\mongoc-1.0.lib files not generated when installing C driver | 2,617 |
|
null | [
"golang"
] | [
{
"code": "func countPlaceNear(db *mongo.Database, lng float64, lat float64) error {\npipeline := []bson.M{\n\t\tbson.M{\n\t\t\t\"$geoNear\": []bson.M{\n\t\t\t\tbson.M{\"includeLocs\": \"location\"},\n\t\t\t\tbson.M{\"distanceField\": \"distance\"},\n\t\t\t\tbson.M{\"near\": []bson.M{bson.M{\"type\": \"Point\"}, bson.M{\"coordinates\": []float64{lat, lng}}}},\n\t\t\t\tbson.M{\"maxDistance\": 1000},\n\t\t\t\tbson.M{\"spherical\": true},\n\t\t\t},\n\t\t},\n\t\tbson.M{\n\t\t\t\"$count\": \"number\",\n\t\t},\n}\ncur, err := db.Collection(\"place\").Aggregate(context.TODO(), pipeline, options.Aggregate())\nbson.M{\"near\": []float64{lat, lng}},\n",
"text": "Hello, I try to use the geoNear in pipeline, the code as below:I get the error: (Location16605) $geoNear requires a ‘near’ option as an Array.\nthen I change the line to:the same error.any help?James",
"username": "Zhihong_GUO"
},
{
"code": "$geoNearpipeline := []bson.M{\n\tbson.M{\n\t\t\"$geoNear\": bson.M{\n\t\t\t\"includeLocs\": \"location\",\n\t\t\t\"distanceField\": \"distance\",\n\t\t\t\"maxDistance\": 1000,\n\t\t\t\"spherical\": true,\n\t\t\t\"near\": bson.M{\n\t\t\t\t\"type\": \"Point\",\n\t\t\t\t\"coordinates\": []float64{lat, long},\n\t\t\t},\n\t\t},\n\t},\n\tbson.M{\n\t\t\"$count\": \"number\",\n\t},\n}\n",
"text": "Hi James,The argument for $geoNear should be a single BSON document, not a slice of BSON documents. See https://docs.mongodb.com/manual/reference/operator/aggregation/geoNear/ for some examples. For your pipeline specifically, I think it can be written as the following:Can you see if this works for you?– Divjot",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Hello Divjot,Thank you for you answer. It is not so straightforward to write the pipeline in this way. I found at github.com/mongodb/mongo-go-driver/bson there is a function bson.ParseExtJSONArray, which can turn the json string directly to be a pipeline. It is not in the official library go.mongodb.org/mongo-driver/bson, can I use it in my project?Thanks,James",
"username": "Zhihong_GUO"
},
{
"code": "bson.ParseExtJSONArray",
"text": "Can you explain why it’s not straightforward to write this pipeline? It seems simpler than the original pipeline you had in your question. Also, there is no bson.ParseExtJSONArray function in the current driver. Perhaps you’re looking at a very old version of the driver? Can you send a link to the source code for this function on Github?",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Hello Divjot,Many thanks for the answer. For me it is not straightforward, because It is hard to find good examples to write the pipeline correctly, and if there are errors, the error message print out are not clear enough to let me fix the issue. Personally, I think here the its better to say: near should be a bson.M object. The second issue: I will try the pipeline from the mongo cli client, but the pipeline text can’t be used directly in the golang code, we have to make it as bson manually(please tell me there are other tool to help if I am wrong). So here the better way for me, and for other developers reviewing the code, is to use json string build the pipeline. Except the geoNear operators, I have other questions related to how to write pipeline to “search text”, like bson.M{\"$text\": bson.M{\"$search\": in.Term}}, and how to write pipeline to match (an objectID and a bool value), when there are nested operator and parameters, I will feel confused…Regards,James",
"username": "Zhihong_GUO"
},
{
"code": "bson.ParseExtJSONArray",
"text": "and as to the bson.ParseExtJSONArray, is there other equivalent functions can do the job? My purpose is simple: I will write and test the json in mongo cli, then use this function to translate it to bson objects and used in golang code. any suggestions?",
"username": "Zhihong_GUO"
},
{
"code": "bson.UnmarshalExtJSONbuildPipeline",
"text": "Yeah, that makes sense. I definitely sympathize with the difficulty of writing complex pipelines. You could try using bson.UnmarshalExtJSON for this and that should do what you want. I wrote up a small example for this at JSON pipeline stages · GitHub and it seems to work as expected.The only shortcoming is that the input must be a document, but aggregation pipelines are arrays. The workaround in the code I linked above is to define a separate JSON document for each stage and pass all of the stages into the buildPipeline function. Hope this helps!– Divjot",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Hello Divjot, Thank you much! Really appreciated by your in-time and effectively support!",
"username": "Zhihong_GUO"
}
] | How to use geoNear in pipeline | 2020-03-05T07:37:30.872Z | How to use geoNear in pipeline | 3,561 |
[
"golang"
] | [
{
"code": "",
"text": "Hello, may be a silly question, but I found in GitHub there is a version of mongo-go-driver,The Official Golang driver for MongoDB. Contribute to mongodb/mongo-go-driver development by creating an account on GitHub.\nand I import the one go.mongodb.org/mongo-driver/what’s the difference of the two drivers? thanks.",
"username": "Zhihong_GUO"
},
{
"code": "go.mongodb.org/mongo-drivergithub.com/mongodb/mongo-go-driver",
"text": "Hi,These are the same thing. go.mongodb.org/mongo-driver is a redirect to github.com/mongodb/mongo-go-driver. There’s some information about custom import paths at go command - cmd/go - Go Packages.– Divjot",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Hello Divjot,We use gitlab build our revision control system which is used only in our company, in this case I should use go.mongodb.org or mongodb · GitHub, or both will work fine.thanks,James",
"username": "Zhihong_GUO"
},
{
"code": "go.mongodb.org/mongo-drivergithub.com/mongodb/mongo-go-driver",
"text": "You should use go.mongodb.org for all import paths. Your choice of version control system shouldn’t matter for this. Basically, Go will try to fetch the dependencies from go.mongodb.org/mongo-driver, which will reroute to github.com/mongodb/mongo-go-driver and it can download the dependencies from there as it needs.",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Wow, An answer in 5 minute! Thank you so much!",
"username": "Zhihong_GUO"
}
] | Mongo-go-driver and mongo-driver | 2020-03-06T04:54:19.218Z | Mongo-go-driver and mongo-driver | 1,770 |
|
null | [
"backup"
] | [
{
"code": "",
"text": "HelloI am new to MongoDB administration. Can someone clarify if encrypting backups is possible when generating backups using Community Edition?Thanks",
"username": "SatyaKrishna"
},
{
"code": "",
"text": "@SatyaKrishna The “generation” of backups is via tooling outside of the MongoDB server, so this is broadly possible depending on your environment and requirements. For supported backup approaches, see: MongoDB Backup Methods.If you are backing up via file system snapshots or cloud provider snapshots, there are generally options to save snapshots to an encrypted volume. You’ll have to consult the relevant filesystem or cloud provider documentation for more information on available options. Typically these are using volume-level encryption rather than file-level encryption.If you are using a management service like MongoDB Ops Manager (which is part of an Enterprise Advanced subscription), this can include support for encrypted backups depending on your configuration. Similar to filesystem or cloud provider snapshots, available options generally depend on the destination for your backup rather than the source.If you are looking for an easier solution for managing your MongoDB deployments (including configuration, monitoring, and backup), I would also consider MongoDB Atlas. MongoDB Atlas is a managed service running MongoDB Enterprise Edition.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "Thank you for your response. I am looking specifically database backups (and not file system snapshots).\nI cannot go with Mongo Atlas as well because all our databases should be on prem and not on cloud.",
"username": "SatyaKrishna"
},
{
"code": "",
"text": "In that case seems to me that the best approach is to use mongodump and then encrypt the resulting files.\nHowever, depending on the size of your dataset and number of indexes, the mongodump/mongorestore procedure could be a bad solution, due to the time spent in the tasks.Filesystem snapshots could help in this case, even on prem, you could work with mount points and ISOs, maybe…",
"username": "Felipe_Esteves"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Encrypted backups using Community Edition | 2020-02-28T23:07:01.789Z | Encrypted backups using Community Edition | 2,193 |
null | [
"dot-net"
] | [
{
"code": "var client = new MongoClient(\"mongodb+srv://user:[email protected]\");\nvar database = client.GetDatabase(\"PTITSCurve\");\n \nvar collection = database.GetCollection<BsonDocument>(\"PlanningData\");\n\nvar filter = Builders<BsonDocument>.Filter.Eq(\"WSID\", WSId);\nvar projection = Builders<BsonDocument>.Projection.Exclude(\"_id\");\n System.Collections.Generic.IEnumerable<MongoDB.Bson.BsonDocument> bsonResult = collection.Find<BsonDocument>(filter).Project(projection).ToList();\n",
"text": "Hi,I am using following code:Yours sincerely",
"username": "Rajesh_Yadav"
},
{
"code": "",
"text": "I think you might find the following useful.",
"username": "steevej"
},
{
"code": "",
"text": "can it be done with out making index, because i want to search fillter on other cols and with out indexs.\neven i am ready to change the collation.yours sincerley",
"username": "Rajesh_Yadav"
},
{
"code": "",
"text": "The concept of collation https://docs.mongodb.com/manual/reference/collation/ will help you.",
"username": "steevej"
},
{
"code": "",
"text": "sorry the db is sombodys , he is not allowing to change the collation , so if i can set some thing in connection string or with out collation , then it would be help full, pls give example i am new and do not have time as i have to delever.",
"username": "Rajesh_Yadav"
},
{
"code": "",
"text": "If you go back to the link I posted you will find that find() supports collation so it does not involve any index or collection changes.",
"username": "steevej"
}
] | How to search in case insensitive way through C# | 2020-02-27T15:16:15.360Z | How to search in case insensitive way through C# | 11,064 |
null | [
"kafka-connector"
] | [
{
"code": " {\n\"name\": \"mongodb_user_management\",\n \"config\": {\n \"connector.class\":\"com.mongodb.kafka.connect.MongoSinkConnector\",\n \"tasks.max\":\"1\",\n \"key.converter\":\"org.apache.kafka.connect.storage.StringConverter\",\n \"key.converter.schema.enable\":\"false\",\n \"value.converter\":\"io.confluent.connect.avro.AvroConverter\",\n \"value.converter.schema.registry.url\":\"http://127.0.0.1:8081\",\n \"connection.uri\":\"mongodb+srv://arkcase:[email protected]/test?retryWrites=true&w=majority\",\n \"database\":\"UserDB\",\n \"collection\":\"users\",\n \"topics\":\"newuser\",\n \"database.history.kafka.bootstrap.servers\":\"http://127.0.0.1:9092\"\n }\n }\n\n {\n \"name\": \"User\",\n \"namespace\": \"org.arkcase.avro\",\n \"type\": \"record\"\n \"fields\": [\n {\n \"name\": \"userId\",\n \"type\": \"string\"\n }\n {\n \"name\": \"name\",\n \"type\": \"string\"\n },\n {\n \"name\": \"lastName\",\n \"type\": \"string\"\n },\n \t{\n \"name\": \"audit\",\n \"type\": {\n \"fields\": [\n {\n \"name\": \"userId\",\n \"type\": \"string\"\n },\n {\n \"default\": \"127.0.0.1\",\n \"name\": \"ipAddress\",\n \"type\": \"string\"\n },\n {\n \"name\": \"requestId\",\n \"type\": \"string\"\n }\n ],\n \"name\": \"Audit\",\n \"type\": \"record\"\n }\n }\n ]\n }\nCaused by: org.apache.kafka.connect.errors.DataException: Converting byte[] to Kafka Connect data failed due to serialization error:\nCaused by: org.apache.kafka.common.errors.SerializationException: com.fasterxml.jackson.core.JsonParseException: Unrecognized token 'user': was expecting ('true', 'false' or 'null')\n at [Source: (byte[])\"user-001\"; line: 1, column: 6]\n",
"text": "Hi,I am trying to configure Kafka Sink connector with avro schema value.\nThis is my configuration :As you can see I am using StringConverter for key and AvroConverter for Value.\nI am sending avro serialized message whit this avro schema :I am loading the connector and everything is okey until the first message is sent.\nAfter that connector is down with the following exception :Looks like avro converter is not working.Any suggestions ?",
"username": "Vladimir_Cherepnalko"
},
{
"code": "",
"text": "Solved.\nLooks like mongo sink connector had problem with the message key.\nI was sending string key, but even if the key converter is String it requires at least valid json encoded string.\nSolution :\ntransforms=WrapKey\ntransforms.WrapKey.type=org.apache.kafka.connect.transforms.HoistField$Key\ntransforms.WrapKey.field=_id",
"username": "Vladimir_Cherepnalko"
}
] | MongoDB Kafka Sink connector - AvroConverter problem | 2020-03-05T12:28:56.048Z | MongoDB Kafka Sink connector - AvroConverter problem | 4,187 |
null | [
"legacy-realm-cloud"
] | [
{
"code": "",
"text": "From reading the roadmap, it’s still unclear to me what the future of realm is for devs that don’t plan to integrate with non-MongoDB services.I have two questions:Thanks",
"username": "curiousdev"
},
{
"code": "",
"text": "Hi – We can definitely look into making the roadmap clearer from this perspective.One thing that we want to stress is that for users with no intention of using cloud/MongoDB products, Realm Database and the Realm SDKs will continue to function as they do today. We are fully committed to continuing to build Database and SDK features that further local development.In regards to your specific questions –While we are planning on officially announcing the deprecation of Realm Cloud and ending sign-ups within the next few months, Realm Cloud is still not a bad solution for POC-ing or getting a sense of how Realm development works. For non-production apps built around Realm Cloud’s full sync we think migration should be fairly simple and we are working on strategies for migrating production applications from Realm Cloud to MongoDB Realm.While we don’t have a specific plan to support the Realm Adaptors, much of the functionality of Stitch (which will become the new backend for Realm Database and be renamed to “MongoDB Realm”) will allow coordinating with other backend systems (specifically Triggers, Functions, and Dependency Resolution).",
"username": "Drew_DiPalma"
},
{
"code": "",
"text": "Hi Drew,I am currently a few months away from releasing a production app using Realm Cloud / Query Based Sync. I’m very disappointed right now and feel that there is a serious lack of information out there that needs to be addressed, especially for paying customers like myself around what the issues are with Query Based Sync.There have been mentions throughout various forum posts back on forums.realm.io about performance/scaling issues with Query Based Sync, but no real documentation about what these performance/scaling issues actually are.Can these issues please please be outlined in greater detail?How long will query based sync be supported and when will Realm Cloud actually seize to exist? I know you plan to stop future sign ups on Realm Cloud, but that is not enough information. Do we have a year, two years, or what does the future look like in clear defined terms for Realm Cloud?My situation is similar to the one outlined below:Discussions about developing applications with MongoDB Atlas App Services and Realm, including Realm SDKs, Atlas Device Sync, Atlas Data API, Atlas GraphQL API, and Atlas Triggers.Please can we have more information on this ASAP.Thank you,Chase",
"username": "Chase_Klingel"
},
{
"code": "",
"text": "Hi Chase –There are some cases where query-based sync works fairly well, some where we can throw some extra resources your way to help scale, and some cases where we may want to help you adjust your data model. As this is something that varies so much by case and usage profile it is tough to document. I saw that you have also emailed me directly and have replied so we can discuss more of the intricacies of this project. For others on the forum with similar questions, please feel free to reach out at [email protected] terms of support for Realm Cloud – While we will be halting sign-ups to Realm Cloud once we release MongoDB Realm (in approximately 2 months) we anticipate keeping Realm Cloud live for at least another year. A specific shutdown plan will be based on usage as well as how quickly we can GA MongoDB Realm and help Realm users migrate.Thanks,\nDrew",
"username": "Drew_DiPalma"
},
{
"code": "",
"text": "This topic was automatically closed 24 hours after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Future of Realm Cloud and Custom Adapters | 2020-02-26T23:35:16.816Z | Future of Realm Cloud and Custom Adapters | 3,006 |
null | [] | [
{
"code": "",
"text": "I’ve heard that such institutes use SQL because they need relational DB!Q. What do colleges and schools generally use and why?\nQ. Do we need to use SQL for more security?",
"username": "Arshdeep_Singh"
},
{
"code": "",
"text": "My answer is definitely bias (I really like MongoDB) however,Check this link, which show companies using MongoDBSee the leading organizations creating applications never before possible with MongoDB.And i got it from this post",
"username": "Natac13"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Do colleges use NoSQL? | 2020-03-05T14:26:56.268Z | Do colleges use NoSQL? | 2,339 |
null | [
"compass",
"mongodb-shell"
] | [
{
"code": "{\n \"_id\": {\n \"$oid\": \"5e5ddf423874fe28e899d912\"\n },\n \"nodeMapping\": [],\n \"Parentnode\": [\n {\n \"LEVEL1\": \"Civil and Structural\"\n },\n {\n \"LEVEL2\": \"Cluster 2\"\n },\n {\n \"LEVEL3\": \"Parking Area\"\n }\n ],\n \"nodeHierarchy\": [\n {\n \"LEVEL1\": \"Civil and Structural\"\n },\n {\n \"LEVEL2\": \"Cluster 2\"\n },\n {\n \"LEVEL3\": \"Parking Area\"\n },\n {\n \"LEVEL4\": \"Compaction\"\n }\n ],\n \"leafNode\": \"Compaction\",\n \"weightage\": \"40\",\n \"dateFrom\": \"10 Apr 2020\",\n \"dateTo\": \"10 Jan 2021\",\n \"scope\": \"2600\",\n \"IsLeafNode\": true,\n \"uniqueId\": \"26ed3117-41d5-4252-a772-832fa8454587\",\n \"isInternalNode\": false,\n \"wsName\": \"PROJECT2\",\n \"wsId\": \"PROJECT2\"\n}\n",
"text": "I have JSON in following format:Now how can I sort or filter on date range using shell or Compass?Basically i want function which can convert it into real date.Pls send query. It will be helpful.",
"username": "Rajesh_Yadav"
},
{
"code": "date$dateFromString{ dt: \"10 Apr 2020\" }MONTHS_ARR = [ \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\" ]db.coll.aggregate( [\n { \n $addFields: { \n dt: { \n $dateFromString: {\n dateString: { \n $concat: [\n { $substrCP: [ \"$dt\", 0, 2 ] }, \" \",\n { $toString: { $add: [ { $indexOfArray: [ MONTHS_ARR, { $substrCP: [ \"$dt\", 3, 3 ] } ] }, 1 ] } }, \" \",\n { $substrCP: [ \"$dt\", 7, 4 ] }\n ] \n },\n format: \"%d %m %Y\"\n }\n }\n } \n },\n] )\nDate{ \"dt\" : ISODate(\"2020-04-10T00:00:00Z\") }",
"text": "An example document with input string date field can be converted to a date object with an aggregation operator $dateFromString. But, this operator takes a month value in numeric format, not as alphabet characters like “Mar”. So, get the alpha month converted to a number to convert as a date object.{ dt: \"10 Apr 2020\" }MONTHS_ARR = [ \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\" ]The converted string date to Date object. This can be used for your sort or comparison operations.{ \"dt\" : ISODate(\"2020-04-10T00:00:00Z\") }",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "agrregate is the only way to do it or some other functions are there which could be applied with out agrregates pipelne?",
"username": "Rajesh_Yadav"
},
{
"code": "find$expr$dateFromStringfind",
"text": "If you want to use the converted date in an aggregation query, that is the way; for comparison, sorting and date arithmetic this is the only way, I think.It is possible to use a date field in converted form in a find query filter, using the $expr operator. $expr allows using aggregation operators (e.g., $dateFromString) in a find query filter only.; this means using with comparison operators and not for sorts on the date field.",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "can i get one eample, i new and there is time limit, so i cannot read.",
"username": "Rajesh_Yadav"
},
{
"code": "MONTHS_ARRdateToComparedt$exprdateToCompare = ISODate(\"2021-03-11T00:00:00Z\");\n\nqueryCond = {\n $lt: [\n {\n $dateFromString: {\n dateString: { \n $concat: [\n { $substrCP: [ \"$dt\", 0, 2 ] }, \" \",\n { $toString: { $add: [ { $indexOfArray: [ MONTHS_ARR, { $substrCP: [ \"$dt\", 3, 3 ] } ] }, 1 ] } }, \" \",\n { $substrCP: [ \"$dt\", 7, 4 ] }\n ] \n },\n format: \"%d %m %Y\"\n }\n },\n dateToCompare\n ]\n};\n\ndb.coll.find( { $expr: { $eq: [ queryCond, true ] } } );",
"text": "The same document as in the previous exmple and the MONTHS_ARR variable are used to compare a supplied date dateToCompare with the document’s string date field dt. Note the usage of the $expr operator.",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "not this one, i wanted field query example",
"username": "Rajesh_Yadav"
}
] | How to convert string date to new Date() using shell or Compass? | 2020-03-04T13:15:30.979Z | How to convert string date to new Date() using shell or Compass? | 5,842 |
null | [
"golang"
] | [
{
"code": "",
"text": "Hello, when I set a filter has $geoNear, $near, and $nearSphere and use the filter in CountDocuments, I will get error (BadValue) $geoNear, $near, and $nearSphere are not allowed in this context. what’s the root reason that these operators not allowed.Is it because the searching or counting using $geoNear, $near and $nearSphere are time consuming and will impact the performance?thanks,James",
"username": "Zhihong_GUO"
},
{
"code": "filterCountDocuments$match$geoNear$matchCollection.AggregateCollection.CountDocuments$geoNear$count",
"text": "Hi James,The filter parameter for CountDocuments becomes a $match aggregate stage. Because $geoNear is also an aggregation stage, it cannot be used inside of the $match one.To do this, you can use Collection.Aggregate rather than Collection.CountDocuments. Your aggregation would have two stages: $geoNear and $count to output the number of documents found.",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Hello Divjot,Very clear. Thank you!James",
"username": "Zhihong_GUO"
}
] | Why the CountDocuments doesn't support $geoNear | 2020-03-05T00:42:07.108Z | Why the CountDocuments doesn’t support $geoNear | 3,680 |
null | [
"golang"
] | [
{
"code": "",
"text": "Hello,I am developing a feature based on CountDocuments of Collection. But I just found this function can only be used after 4.0. Now our production server runs version 3.6, is there any other equivalent functions for 3.6? And BTW, is there any tools to help me check if the API I am using is available for specific version of mongodb. I am using golang.Thanks,James",
"username": "Zhihong_GUO"
},
{
"code": "count()countDocumentscount()",
"text": "Hey @Zhihong_GUOHere are all the versions of the Go driver Releases · mongodb/mongo-go-driver · GitHubHere is the compatibility chart for the Go drivers and mongodb versionHere is the 3.6 version for count() in the mongo shellSorry I do not know Go at all (thinking I should learn) but the equivalent function of countDocuments before 4.0 was count()",
"username": "Natac13"
},
{
"code": "",
"text": "Hello Natac, thank you so much for the detail answer!",
"username": "Zhihong_GUO"
},
{
"code": "",
"text": "Hi James,The CountDocuments function will still work with server version 3.6. You can check on the driver docs to see if something is restricted to certain versions. If nothing is specified, it should work with all supported server versions. Also, the driver will return an error if something is not compatible with your current server version.–Isabella",
"username": "Isabella_Siu"
},
{
"code": "",
"text": "Hello Isabella,I have tested this function on 3.6 and found it works well.Many thanks,James",
"username": "Zhihong_GUO"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | What's the equivalent function of CountDocuments before 4.0? | 2020-03-03T14:01:54.064Z | What’s the equivalent function of CountDocuments before 4.0? | 2,332 |
null | [] | [
{
"code": "MongoClient",
"text": "According to the mongodb documentation ( https://docs.mongodb.com/manual/release-notes/drivers-write-concern/ ) the default write concern for drivers is acknowledgement of write operations.The default write concern on the new MongoClient class will be to acknowledge all write operationsWill the newly written document be immediately queryable even though the document might not have been written to disk?In summary,Do any of the write concern configurations in mongodb produce eventual consistency issues that developers need to be aware of that could cause a document just inserted/updated to not be immediately noticeable?",
"username": "Michael_Fyffe"
},
{
"code": "",
"text": "Write Concerns are acknowledgements back to the author to define the level of durability/certainty that the write(s) have taken place. Your question is fundamentally about Reads - and so you will want to review the Read Concerns features/options. But of course in general - not specific to Mongo - where there is replication if there are Reads occurring directly off the Secondaries then there definitely are intervals where it does not have the most fresh data. One must plan the Read strategy accordingly. For instance some analytics are fine off a secondary whereas some transactions must only read off the primary.",
"username": "James_Bailie"
}
] | Do any WriteConcern configurations imply eventual consistency? | 2020-03-04T21:46:38.644Z | Do any WriteConcern configurations imply eventual consistency? | 1,358 |
null | [] | [
{
"code": "",
"text": "Hi,\nThere is a question related to partial indexes in my application, and I see somebody is fixing it, see also https://jira.mongodb.org/browse/SERVER-25023.The status of this issue is IN CODE REVIEW, does anyone know what is going on now? And when will this improvement be released?Thanks",
"username": "sammy_Ma"
},
{
"code": "",
"text": "The sprint’s label in the ticket is Query 2020-03-09, Query 2020-03-23, so my guess is that they will be working on it starting next week, but maybe @Asya_Kamsky might have more insight into it since she’s the one that reported it.",
"username": "Doug_Duncan"
},
{
"code": "",
"text": "It’s exciting to hear this feature, this is very important for me. If anyone tell me the exact release date, I will be very grateful.Sorry, what’s mean of sprint? It is the date when the feature code will be merged or MongoDB team will start working on it?",
"username": "sammy_Ma"
},
{
"code": "fixVersion/smasterfixVersionfivVersion/smaster",
"text": "The status of this issue is IN CODE REVIEW, does anyone know what is going on now? And when will this improvement be released?You can “Watch” issues in Jira to follow updates. This issue is currently In Code Review status because the assignee has submitted a changeset for internal code review. Depending on feedback the changeset may require further revision or testing before it is approved for merging.When ready for merging, the fixVersion/s on the JIRA issue will change to a target release version (replacing the generic “Backlog” placeholder). Changes are usually first merged to the master branch so the first tagged fixVersion is typically a development/unstable release. The changeset will then be considered for backport to non-EOL production release branches depending on the impact and complexity of the backport, and additional releases will be added to the fivVersion/s field as the backports are completed.It is too early to confirm any release timing for this issue, but Watching in Jira will help you track progress. Using the latest production release series (currently MongoDB 4.2) will definitely increase the odds that an interesting fix can be backported. A backport may not always be possible or approved: server code may have changed substantially in the master branch or the changeset could introduce a risky or backward-breaking behaviour change.In the interim there is a hacky workaround mentioned on SERVER-25023 you could try.You may also want to start a discussion in the Working with Data forum category. If you can include your MongoDB server version, a sample document, and your use case for partial indexing perhaps there may be other data modelling or indexing approaches to suggest.Sorry, what’s mean of sprint? It is the date when the feature code will be merged or MongoDB team will start working on it?This refers to a software development sprint (aka iteration or work period). This doesn’t have a direct correlation to when development or review for an issue will start or finish. The interpretation should be that a team has planned to allocate some time to progress this issue in the associated fortnightly sprints. If there are unexpected planning or resource changes, an issue will be added to future sprints as needed.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | When will SERVER-25023 be released? | 2020-03-05T09:01:08.572Z | When will SERVER-25023 be released? | 2,306 |
null | [
"indexes",
"views"
] | [
{
"code": "db.b.find({ day: 'Monday' }).explain()$match",
"text": "My question is about MongoDB view optimization. For example, I have such code mongodb.js · GitHub and when I’m running a db.b.find({ day: 'Monday' }).explain() I see that query is appended at the end of the view pipeline as $match stage operator. Is there any way to prepend this query to utilize indexes properly?",
"username": "Ivan_Zakharchanka"
},
{
"code": "$match$match$match$match",
"text": "Is there any way to prepend this query to utilize indexes properly?The view’s query is not using the index as the $match is coming in the later stages of the pipeline. The index can be used only when the $match is in the beginning stages (mostly first) of the pipeline.To utilize the index, make the $match part of the view definition (then this query will use the underlying collection’s index). But, that limits the view’s capabilities (e.g., just query on one value, like “Monday” or a range of values).As such there is no way to pass a ‘parameter’ value to a view’s definition; in this case the $match stage, if it is used as the view pipeline’s first stage. But, a JavaScript function with a query can take a parameter, and one can run the script from mongo Shell.",
"username": "Prasad_Saya"
}
] | How to efficiently create indexes and query them in MongoDB views? | 2020-03-05T19:51:10.718Z | How to efficiently create indexes and query them in MongoDB views? | 2,719 |
null | [
"indexes"
] | [
{
"code": "",
"text": "I have created multiple documents inside my collection that has the “name” field set to a unique index. The issue I am having is that when I remove “name” as a unique index, it still requires the field to be unique. How can I remove the unique index of a field without removing the field from my documents?",
"username": "Jon_Paricien"
},
{
"code": "",
"text": "Welcome @Jon_ParicienCan you post the error on inset and the indices on the collection?",
"username": "chris"
},
{
"code": "namesname{\n _id: 1,\n name: \"John Doe\",\n profession: \"Programmer\"\n}\nnamedb.names.createIndex( { name: 1 }, { unique: true } )db.names.getIndexes()namedb.names.dropIndex( { name: 1 } )",
"text": "How can I remove the unique index of a field without removing the field from my documents?A collection names with name as field:Create a unique index on name field:\ndb.names.createIndex( { name: 1 }, { unique: true } )List all indexes on the collection:\ndb.names.getIndexes()Remove the index on name field using the dropIndex method:\ndb.names.dropIndex( { name: 1 } )The above commands work from the mongo Shell. The same actions can be performed from the Compass GUI tool.",
"username": "Prasad_Saya"
}
] | Remove unique index from each document | 2020-03-05T18:33:05.461Z | Remove unique index from each document | 8,225 |
null | [
"spark-connector"
] | [
{
"code": " val spark = SparkSession.builder()\n .appName(\"User Network Graph\")\n .config(\"spark.mongodb.input.uri\", \"mongodb://mongo/socio.d3raw\")\n .config(\"spark.mongodb.output.uri\", \"mongodb://mongo/socio.d3raw\")\n .master(\"yarn\").getOrCreate()\n\n val rawD3str=seqGraph.toDF()\n\n MongoSpark.write(rawD3str).option(\"spark.mongodb.output.uri\", \"mongodb://mongo/socio\" \n ).option(\"collection\",\"d3raw\").mode(\"append\").save()\n",
"text": "I am trying to store Apache Spark Dataframe into MongoDB using Scala but getting Caused by: org.bson.BsonMaximumSizeExceededException: Payload document size is larger than maximum of 16777216. exception while storing dataframe into MongoDBCode Snippet:Error stack trace0 failed 4 times, most recent failure: Lost task 0.3 in stage 332.0 (TID 11617, hadoop-node022, executor 1): org.bson.BsonMaximumSizeExceededException: Payload document size is larger than maximum of 16777216. at com.mongodb.internal.connection.BsonWriterHelper.writePayload(BsonWriterHelper.java:68) at com.mongodb.internal.connection.CommandMessage.encodeMessageBodyWithMetadata(CommandMessage.java:147) at com.mongodb.internal.connection.RequestMessage.encode(RequestMessage.java:138) at com.mongodb.internal.connection.CommandMessage.encode(CommandMessage.java:61) at com.mongodb.internal.connection.InternalStreamConnection.sendAndReceive(InternalStreamConnection.java:248) at com.mongodb.internal.connection.UsageTrackingInternalConnection.sendAndReceive(UsageTrackingInternalConnection.java:99) at com.mongodb.internal.connection.DefaultConnectionPool$PooledConnection.sendAndReceive(DefaultConnectionPool.java:450) at com.mongodb.internal.connection.CommandProtocolImpl.execute(CommandProtocolImpl.java:72) at com.mongodb.internal.connection.DefaultServer$DefaultServerProtocolExecutor.execute(DefaultServer.java:226) at com.mongodb.internal.connection.DefaultServerConnection.executeProtocol(DefaultServerConnection.java:269) at com.mongodb.internal.connection.DefaultServerConnection.command(DefaultServerConnection.java:131) at com.mongodb.operation.MixedBulkWriteOperation.executeCommand(MixedBulkWriteOperation.java:435) at com.mongodb.operation.MixedBulkWriteOperation.executeBulkWriteBatch(MixedBulkWriteOperation.java:261) at com.mongodb.operation.MixedBulkWriteOperation.access$700(MixedBulkWriteOperation.java:72) at com.mongodb.operation.MixedBulkWriteOperation$1.call(MixedBulkWriteOperation.java:205) at com.mongodb.operation.MixedBulkWriteOperation$1.call(MixedBulkWriteOperation.java:196) at com.mongodb.operation.OperationHelper.wi",
"username": "Ameen_Nagiwale"
},
{
"code": "",
"text": "Hi @Ameen_Nagiwale a single MongoDB document can not exceed 16MB (unless you use the GridFS storage). From the sounds of things your dataframe is bigger than that.",
"username": "Doug_Duncan"
}
] | Getting Size Exceeded Exception while storing Dataframe into MongoDB | 2020-03-05T12:36:19.957Z | Getting Size Exceeded Exception while storing Dataframe into MongoDB | 3,693 |
null | [
"compass"
] | [
{
"code": "BinaryBinary{ \"user_id\": Binary('AjHO4fVjpb6GOvVhHeHOQ==', 0) }",
"text": "Is there a way to use Binary values in queries in MongoDB Compass? I tried different syntaxes, but none of them work. I use the Binary type quite a lot, e.g. for user IDs and it would be really useful if I could build and analyse queries in MongoDB Compass.One syntax that doesn’t give me an error, but produces no results is like this: { \"user_id\": Binary('AjHO4fVjpb6GOvVhHeHOQ==', 0) }",
"username": "Nick"
},
{
"code": "{\"field\": BinData(0, \"valid_base64\")}",
"text": "Hi @Nick, welcome!You should be able to query UUID and BinData from MongoDB Compass v1.20+ (COMPASS-1083). Try using {\"field\": BinData(0, \"valid_base64\")}For example, using the current stable version Compass v1.20.5:\nScreen Shot 2020-03-06 at 9.37.11 am904×195 23.3 KB\nYou can also store/retrieve using UUIDs (subtype 4) for IDs.\nRegards,\nWan.",
"username": "wan"
}
] | How to use Binary values in MongoDB Compass? | 2020-03-03T12:12:34.754Z | How to use Binary values in MongoDB Compass? | 13,275 |
null | [
"installation"
] | [
{
"code": "",
"text": "I have established the mongoDB logs and data directories on our CIFS, network file share, on a virtual machine on our netapp, which I access from my local machine. These directories are located on this server for security purposes.Since, I have setup another user on their local machine to access the DB using mongoDB Compass Community, but when we both attempt to connect, i.e. start the mongoDB service, we are not able to do so simultaneously.Is it possible for more than 1 user to have a MongoDB Server service in the Running status? I’d like 2-n users to be able to connect to the same data.",
"username": "Craig_Hill"
},
{
"code": "mongodmongod",
"text": "Is it possible for more than 1 user to have a MongoDB Server service in the Running status? I’d like 2-n users to be able to connect to the same data.Data files are exclusively opened by a single mongod process.You should have a single MongoDB deployment which multiple remote users can connect to. The mongod process is multi-threaded and supports tens of thousands of concurrent connections (subject to resource availability and workload).For more information on securing this service, please see the Security Checklist in the MongoDB manual.Regards,\nStennie",
"username": "Stennie_X"
}
] | Multi-user multi-system mongoDB Compass Community simultaneous connections | 2020-03-05T19:37:55.966Z | Multi-user multi-system mongoDB Compass Community simultaneous connections | 3,662 |
null | [] | [
{
"code": "",
"text": "** UPDATED 2-23-20Operating System: Windows 10\nInstall: Runs on localhost, data and log on a secure network virtual server due to PHIv. 4.0.16 - Original Install\nv. 4.2.3 - Updated to versionHas been an issue for both versions.The Operation Could Not Be CompletedError: 1053:\nThe service did not respond to the start or control request in a timely fashion.Log:\n2020-02-23T08:46:44.182-0500 I CONTROL [main] ***** SERVER RESTARTED *****\n2020-02-23T08:46:44.502-0500 I CONTROL [main] Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols ‘none’",
"username": "Craig_Hill"
},
{
"code": "",
"text": "Welcome to the forum. What version of MongoDB are you running? What OS? Is this on a physical machine or virtual?",
"username": "Wesley_Faulkner"
},
{
"code": "",
"text": "That looks like a Windows Service error message.What does the MongoDB logs show?",
"username": "Doug_Duncan"
},
{
"code": "",
"text": "I’ve updated the original post, it unfortunately was submitted prior to completion. It should now contain sufficient information to kick off the conversation. Thanks.",
"username": "Craig_Hill"
},
{
"code": "mongod.exe",
"text": "It would be helpful if we new what was before this section of the log. There should hopefully be more information in the log which could point to what’s going on.2020-02-23T08:46:44.182-0500 I CONTROL [main] ***** SERVER RESTARTED *****\n2020-02-23T08:46:44.502-0500 I CONTROL [main] Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols ‘none’You could also try starting the mongod.exe process manually to see if you get any other errors. Note, if the service is calling any configuration file or passing command line options, you would want to do that as well.",
"username": "Doug_Duncan"
},
{
"code": "",
"text": "I haven’t had this issue recently. I’m keeping an eye on it.",
"username": "Craig_Hill"
}
] | Localhost:27017 Service keeps stopping; service wont start | 2020-02-22T18:29:19.296Z | Localhost:27017 Service keeps stopping; service wont start | 6,346 |
null | [
"atlas"
] | [
{
"code": "",
"text": "Hey all,I’m looking into moving our mlab instances over to atlas. There is a migration button I expect will do most of the work. However, will that remove what is currently live over on MLab?I’d like to get Atlas set up, confirmed working, and then switch things over without losing my fallback.Thanks for any information.",
"username": "Josh_Rodarte"
},
{
"code": "",
"text": "Welcome to the forum @Josh_Rodarte!There’s a detailed mLab Guide to Migrating to Atlas which should cover any questions related to migration.Some of the migration outcomes depend on the type of clusters you are migrating from/to (for example, migrating between shared or dedicated clusters) but the general approach is to allow you to test and confirm your cutover to Atlas without removing any existing data from mLab.Once you have finalised your migration to Atlas, you need to explicitly Delete the source mLab deployment to remove the data and stop any recurring charges.If you are live migrating to a dedicated-tier Atlas cluster (M10 and above), see Can I perform a trial/test Live Migration? and related FAQs in the migration guide.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "Also if you are looking for help, please reach out via [email protected]",
"username": "Andrew_Davidson"
},
{
"code": "",
"text": "Thank you both. That cleared it up for me.",
"username": "Josh_Rodarte"
},
{
"code": "",
"text": "This topic was automatically closed 24 hours after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Simple question on Migrating from MLab to Atlas | 2020-03-04T21:25:54.779Z | Simple question on Migrating from MLab to Atlas | 1,519 |
[] | [
{
"code": "",
"text": "It might be good to have direct signup link on every page when you are not logged in.Right now, you need to click on login\n\nScreen Shot 2020-02-23 at 10.13.131558×146 14.1 KB\n\nAnd then on Mongo SSO page, there is a small text reads “sign up”\n\nScreen Shot 2020-02-23 at 10.12.35832×1014 59 KB\n",
"username": "coderkid"
},
{
"code": "",
"text": "Hey @coderkid, thanks for the suggestion. This isn’t in the cards right now, but maybe something we can pursue down the road. Keep the ideas coming!Jamie",
"username": "Jamie"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Signup link on every page when you are not logged in | 2020-02-23T15:18:37.138Z | Signup link on every page when you are not logged in | 3,314 |
|
null | [
"golang"
] | [
{
"code": "\tdbs := map[string](interface{}){}\n\tconn.GetDBWithoutIndexing(\"admin\").RunCommand(context.Background(), bson.M{\n\t\t\"listDatabases\": 1,\n\t}).Decode(&dbs)\n\n\tfor _, database := range dbs[\"databases\"].(primitive.A) {\n\t\td := database.(map[string]interface{})\n\t\tdbName= d[\"name\"].(string)\n\t\tfmt.Println(\"Working on \", dbName)\n\t\t// Find out the number of collections\n\t\tcols, _ := conn.GetDBWithoutIndexing(dbName).ListCollectionNames(context.Background(), bson.M{})\n\t\tfor _, col := range cols {\n\t\t\tcur, _ := conn.GetDBWithoutIndexing(dbName).C(col).Indexes().List(context.Background())\n\t\t\tindices := []mongo.IndexModel{}\n\t\t\tcur.All(context.Background(), &indices)\n\t\t\tfor _, index := range indices {\n\t\t\t\tfmt.Printf(\"%#v\\n\", index.Keys)\n\t\t\t}\n\t\t}\n\t\tswitch {\n\t\tcase strings.HasPrefix(dbName, \"BLURG_\"):\n\t\t\tdevDBs++\n\t\tdefault:\n\t\t\tnonEnvDBs++\n\t\t}\n",
"text": "I’m attempting to aggregate some stats in our mongo database using the go driver. Ultimately, I’m trying to find out:Now a couple issues with this code:",
"username": "TopherGopher"
},
{
"code": "mongo.IndexModelIndexView.Listcursor.Current{\"v\": {\"$numberInt\":\"2\"},\"key\": {\"_id\": {\"$numberInt\":\"1\"}},\"name\": \"_id_\"}KeyNameDatabase.ListCollections",
"text": "Hi Christopher,The mongo.IndexModel type is meant to be used for creating indexes, not decoding into when fetching existing ones. I tried running IndexView.List and printing out cursor.Current on every iteration and it seems like the server returns documents in the form{\"v\": {\"$numberInt\":\"2\"},\"key\": {\"_id\": {\"$numberInt\":\"1\"}},\"name\": \"_id_\"}Given this, you could define a struct with Key and Name fields and use that for decoding. We have an open GODRIVER ticket to add a type for decoding the results from Database.ListCollections (https://jira.mongodb.org/browse/GODRIVER-903) and could potentially repurpose this ticket to include a type for indexes as well. I’m currently talking to the team about whether or not we think this is a good idea to have in the driver and can get back to you on this tomorrow.",
"username": "Divjot_Arora"
},
{
"code": "type KV struct {\n\tKey map[string]interface{} `bson:\"key\"`\n\tValue interface{} `bson:\"v\"`\n\tName string `bson:\"name\"`\n}\n\tdbs := map[string](interface{}){}\n\terr := conn.GetDBWithoutIndexing(\"admin\").RunCommand(context.Background(), bson.M{\n\t\t\"listDatabases\": 1,\n\t}).Decode(&dbs)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Could not run command: %#v\", err))\n\t}\n\n\tvar dbName string\n\tvar found bool\n\tfor _, database := range dbs[\"databases\"].(primitive.A) {\n\t\td := database.(map[string]interface{})\n\t\tif dbName, found = d[\"name\"].(string); !found {\n\t\t\tnoNameDBs++\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"Working on \", dbName)\n\t\t// Find out the number of indexes\n\t\tcols, err := conn.GetDBWithoutIndexing(dbName).ListCollectionNames(context.Background(), bson.M{})\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"Could not run command: %#v\", err))\n\t\t}\n\t\tfor _, col := range cols {\n\t\t\tcur, err := conn.GetDBWithoutIndexing(dbName).C(col).Indexes().List(context.Background())\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Errorf(\"could not get the indices for %s.%s: %v\", dbName, col, err))\n\t\t\t}\n\t\t\tindices := []KV{}\n\n\t\t\terr = cur.All(context.Background(), &indices)\n\t\t\tif len(indices) == 0 {\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Errorf(\"could not get the indices for %s.%s: %v\", dbName, col, err))\n\t\t\t}\n\t\t\tfor _, compoundIndex := range indices {\n\t\t\t\tfor index := range compoundIndex.Key {\n\t\t\t\t\tfmt.Printf(\"%s\\n\", index)\n\t\t\t\t}\n\t\t\t}\n\t\t}\nListDatabasesListCollectionNamesIndexes().List()for _, db := range conn.ListDatabases() {\n for _, coll := range db.ListCollections() {\n for _, index := range coll.ListIndexes() {\n fmt.Println(index.GetName(), index.GetKeys())\n index.Drop()\n }\n }\n}\n",
"text": "Alright - with the output you provided I was able to get a code snippet working, but man, it’s not pretty -So essentially, I call ListDatabases using an admin command, parse that raw output, call ListCollectionNames, then for each collection, call Indexes().List(). Whew!\nIt would be so nice if I could do this all like this:For listing database metadata, I don’t need access to the raw cursor - it’s not enough data slices to worry about in memory.",
"username": "TopherGopher"
},
{
"code": "Collection.ListIndexesIndexView.ListClient.ListDatabasesmongo.ListDatabasesResultRunCommandIndexView.ListObjectsIndexView.DropOne(index.Name)",
"text": "We’re not in a position to change the API quite that drastically and maintain separate APIs for Collection.ListIndexes and IndexView.List. Also, by the nature of them being commands that do network round trips, all of those function calls need to return errors, making them not directly usable in a for loop.For listing databases, there is already a Client.ListDatabases method which returns a mongo.ListDatabasesResult so you shouldn’t have to use RunCommand for this.I do think having access to the raw cursor is useful in some circumstances. For example, if the server adds new fields, a user upgrade server versions without necessarily upgrading driver versions and have those new fields through the raw cursor results. However, I do think we can talk about adding something like IndexView.ListObjects which could return a slice of index specifications. From there, you could easily get the names and keys of the index and call IndexView.DropOne(index.Name).",
"username": "Divjot_Arora"
},
{
"code": "Client.ListDatabaseObjects()ListDatabases()[]mongo.ListDatabaseResult{}IndexView.ListObjects()",
"text": "Ahhhh - that’s how you use ListDatabases - I couldn’t for the life of me figure out the type it should unpack - thank you \nHow would you feel about adding Client.ListDatabaseObjects()? Which would be an ease of use wrapper around ListDatabases() that would just return []mongo.ListDatabaseResult{}? For when we don’t need the fine-grained control that a cursor affords?\nI 100% agree having the ability to get at the raw cursor can be super useful, but it’s rare that I really need to use it. When I have to choose between short readable code and something super clever that only saves a couple fractions of a millisecond, I usually go the short route to promote maintainable code. Personal preference though.I could see us spawning go routines as the cursor returns or using REST-stream or something nifty like that for a really large result, but usually we’re unpacking the full result into a slice or map that has to be returned to another function using the default connection values.\nI would LOVE IndexView.ListObjects(), that would be amazing.",
"username": "TopherGopher"
},
{
"code": "ListDatabaseObjectsListDatabsaesResultresult.DatabaseSpecifications",
"text": "I’m a little confused about your request for ListDatabaseObjects. Currently, ListDatabsaesResult is defined as mongo package - go.mongodb.org/mongo-driver/mongo - Go Packages and contains an array of database specifications. You should be able to iterate over result.DatabaseSpecifications and get the relevant information you need for each database. Am I missing something?",
"username": "Divjot_Arora"
},
{
"code": "",
"text": "Oh. Darn it. I didn’t see it hanging off of the connection. I didn’t realize it was on the client, but that makes sense. I’ll change to using that for the top-level loop. I still think a ListCollectionObjects() though would be nice.",
"username": "TopherGopher"
}
] | Fetching index metadata | 2020-03-03T05:11:54.438Z | Fetching index metadata | 3,526 |
null | [
"java",
"production"
] | [
{
"code": "java-driver",
"text": "Version 4.0.0 of the MongoDB Java Driver has been released.Please review the full release notes at https://github.com/mongodb/mongo-java-driver/releases/tag/r4.0.0.Please feel free to post any questions on the MongoDB Community forum in the Drivers, ODMs, and Connectors category tagged with java-driver. Bug reports should be filed against the JAVA project in the MongoDB JIRA.The JVM Drivers team",
"username": "Jeffrey_Yemin"
},
{
"code": "",
"text": "",
"username": "system"
}
] | MongoDB Java Driver 4.0.0 Released | 2020-03-05T16:48:45.184Z | MongoDB Java Driver 4.0.0 Released | 2,438 |
null | [] | [
{
"code": "",
"text": "Is there a way to loop over all Realms within an instance if I am an Administrator for that Realm? From GlobalNotifications I already know that I can get notified for all realms in in Instance but I couldn’t find a way to manually loop over all Realms.Thanks in advance!Kind regardsMario",
"username": "Mario_Henkel"
},
{
"code": "",
"text": "When you say ‘loop over’ what do you mean specifically? What platform are you coding for?",
"username": "Jay"
}
] | Loop over all Realms of an instance | 2020-02-25T17:48:55.658Z | Loop over all Realms of an instance | 1,661 |
null | [
"atlas-search"
] | [
{
"code": "",
"text": "In reviewing the information at\nhttps://docs.atlas.mongodb.com/reference/full-text-search/highlighting/\nthe examples with the fruits all work fine. I was using FTS to search sample stock analyst\nreports. For the matching documents, the highlights texts element values seem to be only\nbe based on sentences. For example, if there are 4 sentences before a sentence containing the\nsearch term. I was expecting to see the text of those 4 sentences as part of the surrounding text\nof the match. Instead, I just see only the sentence 5 pieces. The fruit examples are all single sentence\nin nature. I could not find any mention of such a sentence-centric behavior. Maybe I missed something?Also, deeming a sentence to stop on a dot is incorrect when the text has something like “MongoDB, Inc. blah blah blah”. It stops after “Inc.”. Perhaps the same issue arises with salutations like Mr. and Mrs. ?",
"username": "Bill_Reynolds"
},
{
"code": "",
"text": "Hi Bill,I don’t totally follow the behavior you’re describing. If you could share a piece of a document you are querying and the results you are receiving hopefully I can help.",
"username": "timfrietas"
},
{
"code": "db.stock_news.remove( {} );\ndb.stock_news.insertMany([\n\t{ author: \"Nasdaq Technology Sector Update\", \n\t\ttext: \"Technology giants were gaining Thursday. Early movers include MongoDB, Inc. which gained more than 20%. Microsoft also gained 8%.\"},\n\t{ author: \"PRNewswire\",\n\t\ttext: \"Hello world. MongoDB, Inc. (NASDAQ: MDB) is the leading modern database platform. Where is this text?\"},\n] );\n{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"text\": {\n \"type\": \"string\",\n \"analyzer\": \"lucene.standard\",\n \"multi\": {\n \"keywordAnalyzer\": {\n \"type\": \"string\",\n \"analyzer\": \"lucene.keyword\"\n }\n }\n }\n }\n }\n}\ndb.stock_news.aggregate([\n {\n $searchBeta: {\n index: \"myFtsIndex\",\n \"search\": {\n \"query\": \"MongoDB\",\n \"path\": \"text\"\n },\n \"highlight\": {\n \"path\": \"text\"\n } }\n },\n {\n $project: {\n \"text\": 1,\n \"_id\": 0,\n \"highlights\": { \"$meta\": \"searchHighlights\" }\n }\n }]).pretty()\n{\n \"text\" : \"Hello world. MongoDB, Inc. (NASDAQ: MDB) is the leading modern database platform. Where is this text?\",\n \"highlights\" : [\n {\n \"path\" : \"text\",\n \"texts\" : [\n {\n \"value\" : \"MongoDB\",\n \"type\" : \"hit\"\n },\n {\n \"value\" : \", Inc. \",\n \"type\" : \"text\"\n }\n ],\n \"score\" : 1.8908861875534058\n }\n ]\n}\n{\n \"text\" : \"Technology giants were gaining Thursday. Early movers include MongoDB, Inc. which gained more than 20%. Microsoft also gained 8%.\",\n \"highlights\" : [\n {\n \"path\" : \"text\",\n \"texts\" : [\n {\n \"value\" : \"Early movers include \",\n \"type\" : \"text\"\n },\n {\n \"value\" : \"MongoDB\",\n \"type\" : \"hit\"\n },\n {\n \"value\" : \", Inc. which gained more than 20%. \",\n \"type\" : \"text\"\n }\n ],\n \"score\" : 1.4883723258972168\n }\n ]\n}\n",
"text": "Here is an example to show the unexpected results which seem very \"“sentence centric”.The FTS index was defined with the name “myFtsIndex” asA query to demonstrate the unexpected results is\n(Note I did not use the default index name for some reason so you see the index name “myFtsIndex” below)This showsThe first result document is seemingly missing:A similar observation occurs in the second match.\nThis is the basis for my confusion about the highlights data based on the doc at\nhttps://docs.atlas.mongodb.com/reference/full-text-search/highlighting/Thank you for your help,\nBill",
"username": "Bill_Reynolds"
},
{
"code": "",
"text": "Thanks for the detail, Bill. This is indeed the default behavior, which is based on a Lucene option called the Unified Highlighter, which defaults to sentence-level matches. This is our current sane default, but it also unfortunately our only current option as we haven’t made highlighting customizable yet.However I captured your feature request here:Right now Atlas Search highlighting only supports text in the sentence surrounding a match, ignoring any other sentences in a field that have also been indexed.\n\nI would like to be able to change the default behavior to meet my needs.Feel free to vote for it, follow along, and, if you like, comment describing the ideal behavior you would like to see.",
"username": "timfrietas"
},
{
"code": "",
"text": "Hi Tim,\nThank you very much for the sanity check and for confirming my observations. All the FTS examples I found were with one sentence, which did not help my research. While the possible enhancement goes through the process to perhaps be implemented, I suggest adding some text to the documentation page noted above so others are not confused or implement code that results in incorrect output.\nThanks again.",
"username": "Bill_Reynolds"
}
] | Atlas Text Search highlights results - sentence-centric? | 2020-02-28T20:25:22.552Z | Atlas Text Search highlights results - sentence-centric? | 3,473 |
null | [
"java",
"production"
] | [
{
"code": "java-driver",
"text": "Version 3.12.2 of the MongoDB Java Driver has been released.Please review the full release notes at Release 3.12.2 · mongodb/mongo-java-driver · GitHub.Please feel free to post any questions on the MongoDB Community forum in the Drivers, ODMs, and Connectors category tagged with java-driver . Bug reports should be filed against the JAVA project in the MongoDB JIRA.The JVM Drivers team",
"username": "Jeffrey_Yemin"
},
{
"code": "",
"text": "",
"username": "system"
}
] | MongoDB Java Driver 3.12.2 Released | 2020-03-04T22:51:10.857Z | MongoDB Java Driver 3.12.2 Released | 2,461 |
null | [
"atlas"
] | [
{
"code": "",
"text": "Hi, right now I´m testing Atlas with a Free Tier, this is M0 instance.\nI have two questions.\nFirst:\nI can upgrade this instance to another… for example M30, make some test and then return to the M0?\nIf this is yes. I only will pay the M30 cost the time I used it??.\nFor example, I upgrade the instance for one hour, and then return to M0 and I will pay one our of use??\nO maybe the best way is create a new cluster M30, make the test, and destroy the instance. In this case, I only will pay for that hour??.\nThanks",
"username": "Roberto_Gutierrez"
},
{
"code": "M10+M0M2/M5M0M2/M5M10",
"text": "Hey @Roberto_GutierrezFrom the docsFREE AND SHARED-TIER CLUSTER CONSIDERATIONSSo your option b would be the only choice. Create a new M30 then shut it down after the hour. And you should only be charged for what time and data you use.",
"username": "Natac13"
},
{
"code": "",
"text": "Hi Natac13, thanks! It´s clear now!\nBest regards!",
"username": "Roberto_Gutierrez"
},
{
"code": "",
"text": "Also for completeness, note that an Atlas cluster (replica set) that gets scaled up to an M30 can always be scaled back down to an M10, and/or a dedicated (M10+) cluster can be paused/resumed at any time: once paused, the cost drops drastically to just cover storage. Note that a paused cluster by default un-pauses automatically in 30 days.",
"username": "Andrew_Davidson"
}
] | For Free Tier To Dedicated Cluster | 2020-03-05T08:26:46.189Z | For Free Tier To Dedicated Cluster | 2,475 |
null | [
"database-tools",
"backup"
] | [
{
"code": "",
"text": "Anyone knows how to take backup of mongodb users and roles without data ?",
"username": "Ajithkumar_S"
},
{
"code": "system.rolessystem.users",
"text": "Hey @Ajithkumar_SThe roles are stored on system.roles in the admin db and users are the system.users collection. Therefore you can take backups of just those collections if you want.",
"username": "Natac13"
},
{
"code": "",
"text": "Hi Natac, I have already tried this.I can able to take backup.But while importing to admin database its shows some error.",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "Pasted the error below,[root@ajith\n/]# /mongo_ak/mongodb4_0/bin/mongorestore --port=27018 --db admin --dir=/dump/admin/system.users.bson\n2020-03-02T12:31:46.664+0000\tchecking for collection data in /dump/admin/system.users.bson\n2020-03-02T12:31:46.664+0000\tassuming users in the dump directory are from <= 2.4 (auth version 1)\n2020-03-02T12:31:46.665+0000\tFailed: the users and roles collections in the dump have an incompatible auth version with target server: cannot restore users of auth version 1 to a server of auth version 5\n2020-03-02T12:31:46.665+0000\t0 document(s) restored successfully. 0 document(s) failed to restore.\n[root@casdendra02 /]#",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "Are the versions of Mongod the same?\nThis error maybe outside the level of my knowledge… sorry.",
"username": "Natac13"
},
{
"code": "",
"text": "Yes @Natac13. Both the MongoDB versions are same.We don’t have any direct method to dump users and roles with out data.But I have find a alternative method. To do this.",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "Sorry for the typo,Yes @Natac13. Both the MongoDB versions are same.We don’t have any direct method to (dump and restore) users and roles with out data.But I have find a alternative method. To do this.",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "The admin is a special collection, I don’t think you can just dump/restore like that (more knowledgeable poster required)That said mongorestore has a flag for restoring db users and roles.Are you making a seed database? Once the users are created drop collections and then dump that whole mongodb. Then restore that with the previously mentioned flag.",
"username": "chris"
},
{
"code": "",
"text": "Pasted the error below,[root@ajith\n/]# /mongo_ak/mongodb4_0/bin/mongorestore --port=27018 --db admin --dir=/dump/admin/system.users.bson\n2020-03-02T12:31:46.664+0000 checking for collection data in /dump/admin/system.users.bson\n2020-03-02T12:31:46.664+0000 assuming users in the dump directory are from <= 2.4 (auth version 1)\n2020-03-02T12:31:46.665+0000 Failed: the users and roles collections in the dump have an incompatible auth version with target server: cannot restore users of auth version 1 to a server of auth version 5\n2020-03-02T12:31:46.665+0000 0 document(s) restored successfully. 0 document(s) failed to restore.\n[root@casdendra02 /]#Was the source database upgraded at some point from a MongoDB version lower then 2.6 on it’s way to MongoDB 4.0? Is it possible that the authSchemaUpgrade was never performed on the source during an upgrade?",
"username": "Michael_Grayson"
},
{
"code": "",
"text": "HI @Michael_Grayson,Here, I didn’t upgrade anything.I have just installed the 4.0 binary package and tried the test case.",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "Dear @chris,I know the collections inside the admin database are special collections.But my requirement is, I need to backup the users and roles for the particular database without data and the database size is 3.4 TB.I have tried all the methods available in mongodb but nothing is help for me.After that I made some changes in the dump file after that its works.My question is, any possible ways that anyone know to dump the users and roles directly from MongoDB ?",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "without the data. Is so important.",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "This might be really naive but can’t you achieve what you want with https://docs.mongodb.com/manual/reference/method/db.getUsers/?",
"username": "steevej"
},
{
"code": "",
"text": "HI @steevej,\nUsing db.getUsers(), we can get all the users list, yes we can take this user backup but how will restore.",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "You probably can write a simple .js loop over all documents that calls https://docs.mongodb.com/manual/reference/method/db.createUser/.Note that you do not get any password with db.getUsers() so you will have to generate new password.",
"username": "steevej"
},
{
"code": "",
"text": "Yes @steevej,You are correct, that won’t help for backup and restore. It will help to take the users list only.But I have some other methods to backup and restore users separately.",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "If you wanted I will explain the steps.",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "Otherwise we don’t have any option to take backup & restore, users and roles database wise(without data).",
"username": "Ajithkumar_S"
},
{
"code": "",
"text": "I do not need that. And I do not follow anymore.In one postI need to backup the users and roles for the particular database without dataand nowBut I have some other methods to backup and restore users separately",
"username": "steevej"
},
{
"code": "",
"text": "Looks like you just need to dump admin database in its entirety.system.versions contains the schema version which is the error when your are restoring.",
"username": "chris"
}
] | How to take backup of MongoDB users and roles without data? | 2020-03-02T11:56:04.150Z | How to take backup of MongoDB users and roles without data? | 12,671 |
null | [
"replication",
"performance"
] | [
{
"code": "",
"text": "We have two clusters 800-802 and 9000-9002with PSS architecture that receive a heavy influx of writes (~25k messages/sec) into dated schemas. Applications are facing serious latency issues with writes - 800(200mil records behind) and 9000(100mil records behind).· 800-802 - OS: Linux sles12sp3, SATA SSD, CPU: Ivy Bridge, RAM: 250GB, Disk: 4TB (2.5TB used), mongov4.0.3 , w:0 j:true· 9000-9002 - OS: Linux sles12sp3, SATA SSD, CPU: Ivy Bridge, RAM: 250GB, Disk: 9TB (5TB used), mongov4.0.3 , w:0 j:false (although this setting is not safe/recommended app users are willing to take the chances as opposed to having millions of records drop due to insert latency)My recommendation is to set inserts on both clusters to w:1 j:false . While 800 may experience better performance (since it no longer hits on disk journal) 9000 is going to take a further hit with this change.Questions:Will switching to RAID10 provide a significant performance improvement even for SSDs?Will transferring the journal file to a different volume help ?Given that this is a write heavy application are there any cache settings that can be adjusted?",
"username": "Kanishka_Ramamoorthy"
},
{
"code": "",
"text": "Will switching to RAID10 provide a significant performance improvement even for SSDs?In my experience, yes, it can give significant performance boost. I’ve used RAID10 and RAID50 on SSD drives, and it has been really good. I suggest you try it out, there are benchmarking tools that you can use to fine tune your setup, and make then educated selection which one you start running in production.\nSorry that I don’t have experience in MongoDB specific situation, so can’t help with 2 & 3. Gut feeling is that moving journal to different drive would help, but haven’t done that.",
"username": "kerbe"
}
] | Insert latency in replica set | 2020-03-03T20:03:18.315Z | Insert latency in replica set | 1,974 |
null | [] | [
{
"code": "",
"text": "Hi ,everyone ,I have followed all the steps but unable to connect and it is showing this error.",
"username": "ROHIT_KUMAR_48314"
},
{
"code": "",
"text": "Hi @ROHIT_KUMAR_48314,Are you connected to any Corporate network / VPN connection ?Can you please make sure you are able to ping and telnet the cluster ?ping cluster0-shard-00-00-jxeqq.mongodb.nettelnet cluster0-shard-00-00-jxeqq.mongodb.net 27017In addition to this, Please confirm that port 27017 is not blocked by clicking http://portquiz.net:27017.If any of this is an issue then I would suggest you to contact your IT department to see if there is a workaround or try to make the request from another location such as your home network.Hope it helps!Thanks,\nShubham Ranjan\nCurriculum Support Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "Hello. I having this problem too. But yesterday was working just fine. I checked the ping, telnet and the port availability and all is working great, but still receive “Server selection timed out after 30000 ms” in Compass.",
"username": "Yamil_67132"
},
{
"code": "",
"text": "Are you able to connect with shell?\nTry to restart compass and see\nIf you are using favorites try to create a new one and check if it works",
"username": "Ramachandra_Tummala"
},
{
"code": "",
"text": "Didn’t tried with shell, but looking in this answer Unable to connect to MongoDB compass (barryoneill answer) I found the solution was checking SRV option (I swear I connect just fine without checking it yesterday).",
"username": "Yamil_67132"
},
{
"code": "",
"text": "Hi @Yamil_67132,This must be a temporary network issue. Please try to connect after some time and hopefully it should be fine.Thanks,\nShubham Ranjan\nCurriculum Support Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "Is this still an issue? I just registered and I am trying to connect and get the same timeout issue.",
"username": "cpwolf3"
},
{
"code": "",
"text": "I solved the problem by downloading the “Community” edition rather than the version linked in the course. All seems well now.",
"username": "cpwolf3"
},
{
"code": "",
"text": "Hi @cpwolf3,The Community version of Compass does not have Schema Tab, which you need to have in Compass in order to do the labs.It might be a network/firewall related issue. Please try to connect from some other location such as your home network.If the issue still persists then please feel free to get back to us.Thanks,\nShubham Ranjan\nCurriculum Support Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "You should install mongodb compass from the link in the course. You might have installed community server.",
"username": "hemanth_98065"
},
{
"code": "",
"text": "C:\\Users\\Deevi>ping cluster0-shard-00-00-jxeqq.mongodb.netPinging ec2-34-195-121-130.compute-1.amazonaws.com [34.195.121.130] with 32 bytes of data:\nReply from 34.195.121.130: bytes=32 time=258ms TTL=48\nReply from 34.195.121.130: bytes=32 time=257ms TTL=48\nReply from 34.195.121.130: bytes=32 time=258ms TTL=48\nReply from 34.195.121.130: bytes=32 time=257ms TTL=48Ping statistics for 34.195.121.130:\nPackets: Sent = 4, Received = 4, Lost = 0 (0% loss),\nApproximate round trip times in milli-seconds:\nMinimum = 257ms, Maximum = 258ms, Average = 257msThis server listens on all TCP ports, allowing you to test any outbound TCP port.You have reached this page on port 27017 .Your network allows you to use this port. (Assuming that your network is not doing advanced traffic filtering.)Network service: unknown\nbut still im getting the same error",
"username": "vivek_25217"
},
{
"code": "",
"text": "Hi @vivek_25217,Even though these basic tests passes, network is still a problem. Have you tried connecting from some other network such as your home network/ mobile hotspot ?If nothing works then you can import the data locally on your machine and complete the labs. The instructions are mentioned in this post.Hope it helps!Thanks,\nShubham Ranjan\nCurriculum Services Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "@Shubham_Ranjan\nI think atlas cluster which course is trying to give access, have not set NETWORK ACCESS settings in atlas dashboard, IP WHITELIST as “ALLOW ACCESS FROM ANYWHERE”",
"username": "Sahil_66399"
},
{
"code": "",
"text": "Hi @Sahil_66399,Are you talking about the class atlas cluster ?If yes, then I can confirm that this cluster is publicly available. If in case you are having any problem then please share a screenshot of the error message that you are getting.Thanks,\nShubham Ranjan\nCurriculum Services Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "class atlas clusterYes I was mistaken while filing details appropriately.\nIts working now fine.\nI am really amazed seeing such nice course and active discussion forum which shows everyone is taking interest learning mongoDB and practicing hands on labs.I am really thankful to Team mongoDB for giving us such remarkable platform and access to live cluster, labs, everything to learn about mongoDB.",
"username": "Sahil_66399"
},
{
"code": "",
"text": "Hi @Sahil_66399,I’m so glad that you found the course helpful .And also let me take this opportunity to thank some of our Super Users who are constantly helping our learners in the discussion forum @007_jb, @Ramachandra_37567 and @steevej-1495 .Thanks,\nShubham Ranjan\nCurriculum Services Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "I was getting this error when attempting to connect to my sandbox using Compass. I was able to connect successfully to my sandbox using the shell. I loaded the data for the movies and was in the part of the course where you are supposed to connect to your sandbox using Compass. I tried following the instructions in the video, but some things have changed, and who knows, maybe I made a typo.What fixed it for me was copying and pasting the connection string for Mongo Compass on Atlas for my sandbox cluster, and then changing ONLY the username and password, without clicking around on anything else.\n2020-02-202736×1824 533 KB\n",
"username": "William_Moore_97877"
},
{
"code": "",
"text": "Hi @William_Moore_97877,Thanks for sharing. I believe there must have been a typo in the connection string which was causing this issue.Thanks,\nShubham Ranjan\nCurriculum Services Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "VPN connectionHi @Shubham_Ranjan,i cannot ping the cluster but able to confirm that port 27017 is not blocked. i’m trying to connect at work - what should i ask my administrator to do for me?thank you!",
"username": "jiajing_zachmy_78370"
},
{
"code": "",
"text": "Hi @jiajing_zachmy_78370,The class atlas cluster consists of three different nodes and you must be able to make outgoing requests to all these three nodes.cluster0-shard-00-00-jxeqq.mongodb.net\ncluster0-shard-00-01-jxeqq.mongodb.net\ncluster0-shard-00-02-jxeqq.mongodb.netHope it helps!Thanks,\nShubham Ranjan\nCurriculum Services Engineer",
"username": "Shubham_Ranjan"
}
] | "Server selection timed out after 30000 ms" error during mongodb compass connection | 2019-12-17T03:41:11.554Z | “Server selection timed out after 30000 ms” error during mongodb compass connection | 68,844 |
[
"dot-net"
] | [
{
"code": " redakMongo.RedakDocuments.InsertOneAsync(redakDocument);\n\n if (redakDocument.Id == ObjectId.Empty) continue;\n",
"text": "Hello,I am trying to transfer data from sql to c#. I am using the offical MongoDB Nu Get package driver … V2.7.0I insert the object async in a while loop and then check the object.id (see below)The first question is does the data transfer occur when 1 replica node is down??Because i don’t know if a node was down during the transfer, the only thing that i have down is to change the primary node.But the fact is i got an object.id back from the mongo driver and the data was not into the database?I changed the primary node and start the data transfer again and it was working?How can i be sure that this erro doesn’t occur again? How can i get a object.id back and the data is not in the database?\nHere is the repica setting\nreplicaSetting1152×907 15.4 KB\nBR Christian",
"username": "Christian_Reismullne"
},
{
"code": "",
"text": "Hi Christian - I’m not sure what’s going on here; can you share your connection string? I wonder if it is pointing to a specific server?\n(as an aside, the latest driver version is 2.10.2)\nCheers,\nCaleb",
"username": "Caleb_Thompson"
},
{
"code": "",
"text": "Hello thanks for answering. …\nToday i am trying another test and change during the data Transfer the primary replica to a secondary\nwith command rs.stepDown().After ths change the c# transfer programm transfers the data and i get the object.id back but the data was not in the database.Then when i set the replica set back to standard and it was working …The connection string looks like the mongo docu:mongodb:// : @ 1. .local:27017, 2. .local:27017, 3. .local:27017/admin?replicaSet= ******&readPreference=secondaryPreferred.Is it not possible to change the replica set during the datatransfer?Thanks in advance",
"username": "Christian_Reismullne"
}
] | Data Lost during transfering data from c# program | 2020-03-03T12:21:34.613Z | Data Lost during transfering data from c# program | 1,670 |
|
null | [
"queries"
] | [
{
"code": "",
"text": "_ID index is using $in to query multiple documents for explain analysis. The number of scanned indexes is more than that of scanned documents",
"username": "1117"
},
{
"code": "explain(\"allPlansExecution\")",
"text": "Please provide more details to help investigate your issue:Thanks,\nStennie",
"username": "Stennie_X"
}
] | _ID index is using $in | 2020-03-02T11:56:25.700Z | _ID index is using $in | 1,269 |
null | [
"performance"
] | [
{
"code": "",
"text": "Hi everyone,I have an environment with low use of cpu, zlib compression is more compacted than snappy but uses more cpu although my environment has a lot free cpu. Do you recommend me changing to zlib or there some pitfalls ?In 4.2 there is a new compression Zstd, anyone knows has experience with the new compression.Thanks.Alexandre Araujo",
"username": "Alexandre_Araujo"
},
{
"code": "",
"text": "@Alexandre_Araujo Zstandard is meant to compare very favourably to zlib (similar compression ratio with improved compression/decompression speed), however the ultimate outcome will depend on your data and workload. The best way to compare is by testing in a representative environment for your use case.For more information including some general benchmarks, see the Zstandard site.Regards,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.",
"username": "system"
}
] | Compression with zlib, snappy and Zstd | 2020-03-04T17:45:38.863Z | Compression with zlib, snappy and Zstd | 8,927 |
null | [] | [
{
"code": "",
"text": "Not sure how to describe these windows, so I’ll use window1 for the Terminal window where I’d normally start Mongo by typing “mongod”, and window2 to describe the Terminal window where I start the shell by typing “mongo”.\nWhen I paste the command from Lesson 2.3, part 1 into window1, my installation appears to connect to the database. However, when I enter “show collections” in window1, nothing happens:\n\nSwitching over to window2 and entering the same command gives the same result (namely, nothing):\n\nWhat am I missing here?",
"username": "Andrew_D_Taylor_65592"
},
{
"code": "",
"text": "In the top window you are connected to the shared cluster. There is no collection probably because the default database has none. You may use show dbs to show the list of databases and then use DatabaseName to select the database specified in the course.In the second window you are connected to a local instance.",
"username": "steevej"
},
{
"code": "mongomongod/testuse database_namemongodmongo",
"text": "To add…Running mongod without additional arguments will create a local server instance that will run on your local machine implicitly using the following default options:If you look at the fourth line, you’ll see that you’re connected to 127.0.0.1 at port 27017Running mongo without additional arguments will connect to an instance running on your local machine using the default host and port mentioned above, however, because you didn’t specify a db, it will connect to the test db.PS: In your first sentence, you’ve got the windows mixed up.",
"username": "007_jb"
},
{
"code": "",
"text": "Actually, I don’t think I had the windows mixed up; they were labeled exactly as I intended. However, based on careful reading of your post, it seems I entered the connection string into what I referred to as window1 (where I would normally type “mongod”), instead of into window2 (where I would start the mongo shell). Quitting Terminal and pasting the connection string into a new Terminal window did the trick.Thanks!",
"username": "Andrew_D_Taylor_65592"
},
{
"code": "",
"text": "",
"username": "system"
}
] | Can't connect to lab database via shell | 2020-03-04T21:11:36.871Z | Can’t connect to lab database via shell | 2,078 |
null | [
"java"
] | [
{
"code": "db.user.createIndex({ userName: 1 },{ collation: {locale: 'en', strength: 2}});\ndb.names.find( { userName: \"betsy\" } ).collation( { locale: 'en' } )\n",
"text": "How to search case insensitive with java driver?I created an index:I can search successfully using shell:This returns expected 1 recordI cannot find similar method collation on the java driver api.Thanks",
"username": "Eric_Loew"
},
{
"code": "com.mongodb.client.MongoCollectionfind()collation()finddb.collection.find()cursor",
"text": "com.mongodb.client.MongoCollection’s find() method returns a FindIterable; it has a collation() method where you can specify the collation for the find method returned cursor object.db.collection.find() returns a cursor, and the cursor object has the collation() method and corresponds to the above mentioned Java method .",
"username": "Prasad_Saya"
},
{
"code": "val collation = Collation.builder()\n\t.locale(\"en\")\n\t.collationStrength(CollationStrength.SECONDARY)\n\t.build()\n\nval list = collection.find(and(eq(\"familyId\", item.familyId), eq(\"userName\", item.userName)))\n\t.collation(collation)\n\t.toList()\n",
"text": "Thanks for the help. Getting expected results now using code below.",
"username": "Eric_Loew"
}
] | Case insensitive query with Java driver | 2020-03-04T12:38:30.867Z | Case insensitive query with Java driver | 4,671 |
null | [
"aggregation"
] | [
{
"code": "",
"text": "Hi everyone\nCan i get some help on the following?\nThis is my current code:\ndb.getCollection(“JS_products”).aggregate([{$match:{JS_asin:“Product ID”}},{$match:{JS_categories:“Shorts”}}])However the question asks the following:\nList the product ID and title of all products in the “Shorts” category. No other\nproduct details are required. I need to use the aggregation pipeline method.\nCan someone help out as to what to do? i should end up with 8 or more outputs.",
"username": "Jeffery_Sharjah"
},
{
"code": "",
"text": "It is unclear but you might want to look at https://docs.mongodb.com/manual/reference/operator/aggregation/project/",
"username": "steevej"
},
{
"code": "",
"text": "the question asks the followingHi Jeffery,It sounds like this might be related to an assignment or tutorial.To help with your learning, I suggest posting:If this is related to a MongoDB University course, please ask in the course forum. We generally want to avoid posting spoilers or solutions for homework.Regards,\nStennie",
"username": "Stennie_X"
}
] | Help with aggregation query | 2020-03-04T15:50:00.325Z | Help with aggregation query | 1,462 |
null | [] | [
{
"code": "",
"text": "Hey all,Been using Mongo for a bit over a year. Really enjoyed last year’s World and hoping this year will be on still!Cheers !",
"username": "jeremyfiel"
},
{
"code": "",
"text": "Welcome @jeremyfiel! Looking forward to participating in this community with you! Do you use MongoDB as a Developer or from a DevOps perspective?",
"username": "Michael_Grayson"
},
{
"code": "",
"text": "Hey Michael. Mostly as a developer but still in the learning phase. Honestly have spent as much time on it as I would’ve liked in the past year. Trying to get going again",
"username": "jeremyfiel"
}
] | Hello from New Jersey! | 2020-03-04T18:35:45.807Z | Hello from New Jersey! | 2,329 |
null | [
"aggregation"
] | [
{
"code": "[{$match: {\n\n dtNews:{$gt: \"2019-10-01\"}\n\n}}, {$project: {\n\n str_title:1\n\n}}, {$group: {\n\n _id: null,\n\n tex: {\n\n $addToSet: \"$str_title\"\n\n }\n\n}}]\n",
"text": "Hi everyoneI was trying to retrieve all occurrences of a headline field in my collection but I got only an array of my headlines. Could you tell me if it is possible to get an string of this array?Below my pipeline:Ezequias.",
"username": "Ezequias_Rocha"
},
{
"code": "",
"text": "Could you please give us an sample document?",
"username": "coderkid"
},
{
"code": "[{\n $match: {\n dtNews: {\n $gt: \"2019-10-01\"\n }\n }\n } {\n \"$group\": {\n \"_id\": {\n \"__alias_0\": \"$str_title\"\n },\n \"__alias_1\": {\n \"$sum\": 1\n }\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"__alias_0\": \"$_id.__alias_0\",\n \"__alias_1\": 1\n }\n },\n {\n \"$project\": {\n \"text\": \"$__alias_0\",\n \"size\": \"$__alias_1\",\n \"_id\": 0\n }\n }\n]",
"text": "",
"username": "coderkid"
},
{
"code": "news{ _id: 1, title: \"hello world\", content: \"some content 1\" },\n{ _id: 2, title: \"Hello World\", content: \"some content 2\" },\n{ _id: 3, title: \"Wonderful World\", content: \"some content 3\" },\n{ _id: 4, title: \"Lovely World\", content: \"some content 4\" }\ndb.news.aggregate( [\n { \n $group: { \n _id: null, \n titlesArr: { $push: \"$title\" } \n } \n },\n { \n $project: {\n _id: 0, \n tex: { \n $reduce: {\n input: \"$titlesArr\",\n initialValue: \"\",\n in: {\n $concat : [\"$$value\", \"$$this\", \" \"]\n }\n }\n }\n } \n }\n] )\n{ \"tex\" : \"hello world Hello World Wonderful World Lovely World \" }",
"text": "I have sample news documents like this:and, using this aggregation query:I get this result:{ \"tex\" : \"hello world Hello World Wonderful World Lovely World \" }",
"username": "Prasad_Saya"
},
{
"code": "db.getCollection(\"news\").distinct(\"title\");\n[\n \"Hello World\", \n \"Lovely World\", \n \"Wonderful World\", \n \"hello world\"\n]\n",
"text": "I was trying to retrieve all occurrences of a headline field in my collectionI think I misunderstood you before, looks like you are looking for distinct values;result:Is this the output you are looking for?",
"username": "coderkid"
},
{
"code": "",
"text": "Thank you so much. It worked perfectly.",
"username": "Ezequias_Rocha"
},
{
"code": "",
"text": "Dear @Prasad_SayaYour solution was perfect. I don’t know if it is appropriate to ask here but I would like to know why this concatenation does not appears in MongoDB Compass Agregation tab (not even in a view).Could you give me some advice?I meant with large dataset. With your data sample it appears ok but with more information the presentation at Compass does not show full data.I can only see it at console mode.Sincerely\nEzequias Rocha",
"username": "Ezequias_Rocha"
},
{
"code": "$group$project",
"text": "With a larger data set (about 10,000 documents) I found that in the Compass’s Aggregation, the $group stage output showed all data. But, the $project stage’s result view showed only first few words of the text, as in the attached picture.I am using MongoDB version 4.2 and Compass 1.19.\ncompass_aggr2956×537 88 KB\n",
"username": "Prasad_Saya"
},
{
"code": "",
"text": "Have you try disabling the “SAMPLE MODE” and “AUTO PREVIEW” options. May be it does that to make building the pipeline more responsive.",
"username": "steevej"
},
{
"code": "",
"text": "I am using 1.20.5 and the visualization isn’t also not so ok.I noticed that if I create a view of this agregation I could see it in the second type of visualization as you can see below:\nview1629×122 12.6 KB\nI think it is a solution but it would wrap text if it gets over 200 columns.Sincerely\nEzequias Rocha",
"username": "Ezequias_Rocha"
},
{
"code": "",
"text": "How to modify this options on Compass @steevej?Ezequias.",
"username": "Ezequias_Rocha"
},
{
"code": "",
"text": "Thank you @coderkid I was trying to get only one string with all words collected. I already get the correct agregation.Best regards\nEzequias",
"username": "Ezequias_Rocha"
},
{
"code": "",
"text": "If you look at the screenshot of Prasad_Saya you will see two options at the right almost at the top.",
"username": "steevej"
},
{
"code": "",
"text": "Thank you @steevej it does not change anything for me.",
"username": "Ezequias_Rocha"
}
] | Word cloud of a field using aggregation | 2020-02-28T20:25:01.106Z | Word cloud of a field using aggregation | 2,945 |
null | [] | [
{
"code": "",
"text": "I would like to know the case-sensitivity rules that can be applied to the JSON Document field key-value pair when searching",
"username": "Okposong_29357"
},
{
"code": "",
"text": "Hey @ongSeon_46628You could try this case insensitive index",
"username": "Natac13"
},
{
"code": "",
"text": "ok. thanks",
"username": "Okposong_29357"
},
{
"code": "",
"text": "Hi @Okposong_29357,Please read about the $regex operator. Let me know if you have any other questions.Thanks,\nShubham Ranjan\nCurriculum Services Engineer",
"username": "Shubham_Ranjan"
},
{
"code": "",
"text": "",
"username": "system"
}
] | Case sensitivity of JSON Document | 2020-03-04T10:31:03.882Z | Case sensitivity of JSON Document | 1,789 |
Subsets and Splits