image_url
stringlengths
113
131
tags
list
discussion
list
title
stringlengths
8
254
created_at
stringlengths
24
24
fancy_title
stringlengths
8
396
views
int64
73
422k
null
[ "dot-net", "crud", "field-encryption" ]
[ { "code": "public class Foo\n{\n public string Id { get; set; }\n public string Name { get; set; }\n public string MotherName { get; set; }\n public IReadOnlyList<Bar> Bars { get; init; }\n}\n\npublic class Bar\n{\n public string Name { get; set; }\n public DateTime CreatedAt { get; set; }\n}\nCSFLE Schema Mappublic class JsonSchemaHelper\n{\n\tprivate static readonly string RANDOM_ENCRYPTION_TYPE = \"AEAD_AES_256_CBC_HMAC_SHA_512-Random\";\n private static readonly string DETERMINISTIC_ENCRYPTION_TYPE = \"AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic\";\n\t\t\n\tprivate static BsonDocument CreateEncryptedMetadata(string dataEncriptionKeyBase64)\n {\n var keyId = new BsonBinaryData(Convert.FromBase64String(dataEncriptionKeyBase64), BsonBinarySubType.UuidStandard);\n \n return new BsonDocument(nameof(keyId), new BsonArray(new[] { keyId }));\n }\n\t\n\tprivate static BsonDocument CreateEncryptedField(string bsonType, bool isDeterministic)\n {\n return new BsonDocument\n {\n {\n \"encrypt\",\n new BsonDocument\n {\n { \"bsonType\", bsonType },\n { \"algorithm\", isDeterministic ? DETERMINISTIC_ENCRYPTION_TYPE : RANDOM_ENCRYPTION_TYPE }\n }\n }\n };\n }\n\t\n\tpublic static BsonDocument CreateJsonSchemaFoo(string dataEncriptionKeyBase64)\n\t{\n\t\treturn new BsonDocument\n\t\t{\n\t\t\t{ \"bsonType\", \"object\" },\n\t\t\t{ \"encryptMetadata\", CreateEncryptedMetadata(dataEncriptionKeyBase64) },\n\t\t\t{\n\t\t\t\t\"properties\",\n\t\t\t\tnew BsonDocument\n\t\t\t\t{\n\t\t\t\t\t{ JsonNamingPolicy.CamelCase.ConvertName(nameof(Foo.Name)), CreateEncryptedField(bsonType: \"string\", isDeterministic: true) },\n\t\t\t\t\t{ JsonNamingPolicy.CamelCase.ConvertName(nameof(Foo.MotherName)), CreateEncryptedField(bsonType: \"string\", isDeterministic: true) },\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\t}\n}\n{\n \"_id\": {\n \"$oid\" : \"62f16d12b118b4057859d709\"\n },\n \"name\" : \"*******\", // <- Encrypted\n \"motherName\": \"*******\", // <- Encrypted\n \"bars\" : [\n {\n \"name\" : \"bar1\",\n \"createdAt\" : \"2022-08-08T20:07:49.368+00:00\"\n },\n {\n \"name\" : \"bar2\",\n \"createdAt\" : \"2022-08-08T20:07:53.368+00:00\"\n }\n ]\n}\nbars[]public async Task<bool> UpsertBar(string fooId, Bar bar, CancellationToken cancellationToken)\n{\n var filterBuilder = Builders<Foo>.Filter;\n\n var filter = filterBuilder.Eq(_ => _.Id, fooId) & filterBuilder.ElemMatch(foo => foo.Bars, b => b.Name == bar.Name);\n\n var update = Builders<Foo>.Update.Set(foo => foo.Bars[-1], bar);\n\n var updateResult = await _propostasCollection.UpdateOneAsync(filter, update, cancellationToken: cancellationToken);\n\t\n\tConsole.WriteLine(System.Text.Json.JsonSerializer.Serialize(updateResult));\n}\n{\n \"IsAcknowledged\" : true,\n \"IsModifiedCountAvailable\" : true,\n \"MatchedCount\" : 0,\n \"ModifiedCount\" : 0,\n \"UpsertedId\" : null\n}\nUpdateOneAsyncarrayNameBarpublic class Bar\n{\n public string FullName { get; set; }\n public DateTime CreatedAt { get; set; }\n}\nvar filter = filterBuilder.Eq(_ => _.Id, fooId) & filterBuilder.ElemMatch(foo => foo.Bars, b => *b.FullName == bar.FullName*);{\n \"IsAcknowledged\":true,\n \"IsModifiedCountAvailable\":true,\n \"MatchedCount\":1,\n \"ModifiedCount\":1,\n \"UpsertedId\":null\n}\nFoo.NameBar.NameSchema MapMongoDB DriverNameFooBarfilterSchema Map var schemaMap = $@\"{{\n properties: {{\n name: {{\n encrypt: {{\n keyId: [{{\n '$binary' : {{\n 'base64' : '{base64DataKeyId}',\n 'subType' : '04'\n }}\n }}],\n bsonType: 'string',\n algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'\n }}\n }},\n\t\t\t\t\tmotherName: {{\n encrypt: {{\n keyId: [{{\n '$binary' : {{\n 'base64' : '{base64DataKeyId}',\n 'subType' : '04'\n }}\n }}],\n bsonType: 'string',\n algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'\n }}\n }},\n }},\n 'bsonType': 'object'\n }}\";\nname: {{...}}Schema MapUpdateOneAsync", "text": "Hi,We have the following C# model:And we using the following methods to create the CSFLE Schema Map:Resulting in this “document format” stored in the database:The problem is when we try to update an item of bars[] array with following approach:Output:The UpdateOneAsync method does not affect any item on array.But when I rename the field Name in Bar class “ALL WORKS”, like:Filter:var filter = filterBuilder.Eq(_ => _.Id, fooId) & filterBuilder.ElemMatch(foo => foo.Bars, b => *b.FullName == bar.FullName*);Output:–I suppose it is related to the fields “with the same name” between classes, in this case Foo.Name and Bar.Name and for some reason our Schema Map or MongoDB Driver cannot differentiate what is an Name field from the main class Foo to another of Bar class, and this way the filter does not works.Our Schema Map can be translated to:The field is name: {{...}} in Schema Map and this “looks generic” to the driver(I think).–Does my assumption make sense? Why the method UpdateOneAsync can’t update the “array items” with my initial configuration?(Foo.Name & Bar.Name)Thanks in advance.", "username": "Igor_N_A" }, { "code": "{ }", "text": "Hey @Igor_N_A , I can’t reproduce your issue.\nCan you please provide a full repo with a small console application? Also, can you check the following cases:", "username": "Dmitry_Lukyanov" }, { "code": "JsonSchemaHelper{ }", "text": "Hy @Dmitry_Lukyanov, sorry for the late reply…I made this detailed repo with all configurations to simulate the situation, when you run it you will be able to see the problem.To make it “work as desired”(without reported problem), you only need to comment the line 45 of JsonSchemaHelper class.–Yes.Same reported result.Cheers. ", "username": "Igor_N_A" }, { "code": "", "text": "Hey @Igor_N_A , thanks for your reports, it looks like we have a server issue that I reported here that is a reason of behavior you’re seeing. I will recheck this issue as soon as this ticket will be resolved.", "username": "Dmitry_Lukyanov" }, { "code": "", "text": "Thank’s @Dmitry_Lukyanov!", "username": "Igor_N_A" } ]
CSFLE - Problem when try update "array field" filtering field by the same “field name” present in “CSFLE Schema Map”
2022-08-08T21:52:14.627Z
CSFLE - Problem when try update &ldquo;array field&rdquo; filtering field by the same “field name” present in “CSFLE Schema Map”
2,776
null
[ "aggregation", "queries", "python" ]
[ { "code": " {\n \"_id\": 1024,\n \"data\": [\n {\"foo\": \"one\", \"bar\": \"lorem\"},\n {\"foo\": \"two\", \"bar\": \"ipsum\"},\n {\"foo\": \"three\", \"bar\": \"asdf\"},\n {\"foo\": \"four\", \"bar\": \"ljkls\"},\n {\"foo\": \"five\", \"bar\": \"afsdfk\"},\n {\"foo\": \"six\", \"bar\": \"lksflkj\"},\n ...\n ]\n }\nfoofoobardb.collection.aggregate([\n {\"$match\": {\"_id\": 1024}},\n {\"$project\": {\n \"data\": {\n \"$map\": {\n \"input\": {\n \"$filter\": {\n \"input\": \"$data\",\n \"as\": \"sub\",\n \"cond\": {\"$in\": [\"$$sub.foo\", [\"one\", \"three\", \"six\", ...]]}\n }\n },\n \"as\": \"sub\",\n \"in\": {\n \"k\": \"$$sub.foo\",\n \"v\": \"$$sub.baar\"\n }\n }\n }\n }},\n {'$project': {\n \"final_data\": {\"$arrayToObject\": \"$data\"},\n \"_id\": 0\n }}\n])\ndata = db.collection.find_one({\"_id\": 1024})['data']\nfinal_data = {}\nfor sub_doc in data:\n if sub_doc['foo'] in ['one', 'three', 'six', ...]:\n final_data[sub_doc['foo']] = sub_doc['bar']\n", "text": "I have been using MongoDB for quite some time now and was exploring many powerful operators available in the aggregation pipelines.With the increasing capabilities of aggregation piplelines, a dilemma is created about whether to,For example, consider the below document,After finding the document, I want to filter the sub-documents based on if the value of foo is in an array of values, and then finally get an object with the value of foo as key and bar as value.We can achieve this using the below aggregation query,OR I can just get the document and process it in python (I am using pyMongo driver),In the first approach, I need to know the mongo-db specific operators and syntax. But once the query is ready I can run it in shell or other languages with minimal changes.But with the second, I am already comfortable with python or other programming languages which are designed to do such stuff. Going ahead, if I change the data source which doesn’t support such operators, I need not worry about migrating the logic back to the programming language.How to decide between the both? Does the approach change based on the number of sub-documents, may be from 100 to 100 thousand ?", "username": "Gowtham_Bhat" }, { "code": "", "text": "Very interesting topic.See some of my previous posts that are related.There are times where it is better to do things on the server. One such time is when it reduce the amount of data transferred. Using $map and $filter to only returned the subset of data of interest is definitively such a case.", "username": "steevej" } ]
Complex aggregation pipelines vs complex programming logic
2022-08-18T03:34:28.744Z
Complex aggregation pipelines vs complex programming logic
2,712
null
[ "aggregation" ]
[ { "code": "{\n\n \"_id\" : ObjectId(\"62d59f2bd6f3809e6588fda4\"),\n \"ClientNavision\" : \"51\",\n \"Version\" : 18,\n \"Fields\" : [\n {\n \"I18List\" : [\n {\n \"I18Code\" : \"es-ES\",\n \"I18Name\" : \"Cargo\"\n },\n \"I18Code\" : \"en-US\",\n \"I18Name\" : \"Occupation\"\n }\n ],\n \"Id\" : \"1\",\n \"Description\" : \"Descripcion de Cargo\",\n \"ParentId\" : \"4\",\n \"Type\" : \"text\",\n \"Size\" : NumberLong(\"100\"),\n \"Default\" : true,\n \"Order\" : 3,\n \"Members\" : null\n },\n \"I18Name\" : \"Extension\"\n \"Id\" : \"2\",\n \"Description\" : \"Descripcion de Extension\",\n \"Order\" : 1,\n }\n ],\n \"Clients\" : [\n \"Active\" : true,\n \"ClientName\" : \"Giuseppe Pepponi\",\n \"ClientI18\" : \"es-ES\",\n \"Fields\" : [\n \"Id\" : \"1\",\n \"Value\" : [ \"Supervisor\" ]\n \"Id\" : \"2\",\n \"Value\" : [ \"234\" ]\n \"Modified\" : ISODate(\"2022-03-24T16:29:28.656+01:00\"),\n \"Created\" : ISODate(\"2021-09-03T12:50:11.000+02:00\"),\n \"ClientFolder\" : \"\",\n \"ClientPhone\" : \"\"\n \"Cif\" : \"f98765422\",\n \"ClientName\" : \"Puigdémont Indultos S.L.\",\n \"Value\" : [ \"Jefe\" ]\n \"Value\" : [ \"123\" ]\n \"Created\" : ISODate(\"2021-09-09T19:01:31.758+02:00\"),\n \"Modified\" : ISODate(\"2021-09-09T19:01:31.758+02:00\"),\n ]\n}\n{\n\n \"_id\" : ObjectId(\"62d59f2bd6f3809e6588fda4\"),\n \"ClientNavision\" : \"51\",\n \"Version\" : 18,\n \"Clients\" : [\n {\n \"Id\" : \"1\",\n \"Active\" : true,\n \"ClientName\" : \"Giuseppe Pepponi\",\n \"ClientI18\" : \"es-ES\",\n \"Fields\" : [\n {\n \"I18List\" : [\n {\n \"I18Code\" : \"es-ES\",\n \"I18Name\" : \"Cargo\"\n },\n \"I18Code\" : \"en-US\",\n \"I18Name\" : \"Occupation\"\n }\n ],\n \"Id\" : \"1\",\n \"Description\" : \"Descripcion de Cargo\",\n \"ParentId\" : \"4\",\n \"Type\" : \"text\",\n \"Size\" : NumberLong(\"100\"),\n \"Default\" : true,\n \"Order\" : 3,\n \"Members\" : null,\n \"Value\" : [ \"Supervisor\" ]\n },\n \"Id\" : \"2\",\n \"Value\" : [ \"234\" ],\n \"I18Name\" : \"Extension\"\n \"Description\" : \"Descripcion de Extension\",\n \"Order\" : 1,\n \"Members\" : null\n }\n ],\n \"Modified\" : ISODate(\"2022-03-24T16:29:28.656+01:00\"),\n \"Created\" : ISODate(\"2021-09-03T12:50:11.000+02:00\"),\n \"ClientFolder\" : \"\",\n \"ClientPhone\" : \"\"\n },\n \"Id\" : \"2\",\n \"Cif\" : \"f98765422\",\n \"ClientName\" : \"Puigdémont Indultos S.L.\",\n \"Value\" : [ \"Jefe\" ],\n \"Value\" : [ \"123\" ],\n \"Created\" : ISODate(\"2021-09-09T19:01:31.758+02:00\"),\n \"Modified\" : ISODate(\"2021-09-09T19:01:31.758+02:00\"),\n }\n ]\n}\n", "text": "HelloI´m a collection of document like this:I need recover the collection of Fields into the Clients.Fields collection by the Field.Id and recover the especific value selected by the clients (clients.Fields.Value)I tried with graphlookup, but i´m not find the correct solutionThis is and example of the desired result:// collection: Customersthanks in advance", "username": "amende" }, { "code": "", "text": "I tried with graphlookup, but i´m not find the correct solutionPlease share what you tried so that we know not to pursue a solution in the same direction. Also share the results and indicate where it differs from the expected result. Sometimes a slight adjustment to what you tried might solve the issue.At first look, I think a $lookup is more appropriate than $graphLookup.", "username": "steevej" }, { "code": "{\n\n '$lookup': {\n 'from': 'Customers',\n 'localField': 'Clients.Fields.Id', \n 'foreignField': 'Fields.Id',\n 'let': {\n 'domains': '$Domains',\n 'client_fields_id': '$Clients.Fields.Id',\n 'client_fields_value': '$Clients.Fields.Value'\n },\n 'pipeline': [\n {\n '$match': {\n '$expr': {\n '$eq': [\n '$Fields.Id', '$$client_fields_id'\n ]\n }\n }\n }, {\n '$project': {\n 'Fields': 1,\n 'Domains': 1,\n '_id': 1\n }\n }\n ],\n 'as': 'fields_complete'\n }\n }\n", "text": "send my trie with lookup:i tried:but the result is empty", "username": "amende" }, { "code": "{\n '$graphLookup': {\n 'from': 'Customers', \n 'startWith': '$Customers.Fields.Id\"', \n 'connectFromField': 'Customers.Clients.Fields.Id', \n 'connectToField': 'Customers.Fields.Id', \n 'as': 'FieldsConnection', \n 'depthField': 'prof', \n 'restrictSearchWithMatch': {}\n }\n },\n", "text": "I tried withwith identical wrong result", "username": "amende" }, { "code": "", "text": "I have tried to cut-n-paste your sample input and result documents from your first post and they both gives me syntax error.Make sure we can cut-n-paste your documents because it makes it impossible to experiment with erroneous documents.Make sure you identify your collection names correctly. In your original post you identify the desired result as the collection Customers but in your lookup code it looks like it is your input collection.", "username": "steevej" }, { "code": "[\n {\n '$match': {\n 'Version': 18, \n 'ClientNavision': '51', \n 'Clients.Id': '1', \n 'Clients.Active': true\n }\n }, {\n '$lookup': {\n 'from': 'Customers', \n 'localField': 'Clients.Fields.Id', \n 'foreignField': 'Fields.Id', \n 'let': {\n 'domains': '$Domains', \n 'client_fields_id': '$Clients.Fields.Id', \n 'client_fields_value': '$Clients.Fields.Value'\n }, \n 'pipeline': [\n {\n '$match': {\n '$expr': {\n '$eq': [\n '$Fields.Id', '$$client_fields_id'\n ]\n }\n }\n }, {\n '$project': {\n 'Fields': 1, \n 'Domains': 1, \n '_id': 1\n }\n }\n ], \n 'as': 'fields_complete'\n }\n }, {\n '$project': {\n '_id': 0, \n 'ClientNavision': 1, \n 'Site': 1, \n 'Relations': 1, \n 'Fields': 1, \n 'Clients.Fields': 1, \n 'Clients.Id': 1, \n 'Clients.Active': 1, \n 'Clients.Local': 1, \n 'Clients.Cp': 1, \n 'Clients.ClientI18': 1, \n 'Clients.ContactPhone': 1, \n 'Clients.ClientName': 1, \n 'Clients.Direction': 1, \n 'fields_complete': 1\n }\n }, {\n '$unwind': {\n 'path': '$Clients', \n 'preserveNullAndEmptyArrays': true\n }\n }, {\n '$match': {\n 'Clients.Id': '1', \n 'Clients.Active': true\n }\n }, {\n '$unwind': {\n 'path': '$Clients.Fields', \n 'preserveNullAndEmptyArrays': true\n }\n }, {\n '$project': {\n 'ClientNavisionId': '$ClientNavision', \n 'Site': '$Site', \n 'Relations': '$Relations', \n 'Value': '$Clients.Fields.Value', \n 'Id': '$Clients.Fields.Id', \n 'Description': '$Clients.Fields.Description', \n 'ParentId': '$Clients.Fields.ParentId', \n 'Type': '$Clients.Fields.Type', \n 'Size': '$Clients.Fields.Size', \n 'Default': '$Clients.Fields.Default', \n 'Order': '$Clients.Fields.Order', \n 'Members': '$Clients.Fields.Members', \n 'I18List': '$Clients.Fields.I18List', \n 'ClientId': '$Clients.Id', \n 'ClientI18': '$Clients.ClientI18', \n 'FieldsMatch': '$FieldsConnection', \n 'Stockdata': '$stockdata'\n }\n }, {\n '$sort': {\n 'Created': 1\n }\n }, {\n '$limit': 100\n }\n]\n", "text": "sorry, send you tall thes test:", "username": "amende" }, { "code": "", "text": "the reason is, the two subdocuments collection are in the same document collection (Customers), i need join Customers.Fields (array of subdocuments of Customer) with Customer.Clients.Fields (array of subdocuments of Customer)", "username": "amende" }, { "code": "", "text": "Please read Formatting code and log snippets in posts and update your post. We cannot use that the way it is formatted.", "username": "steevej" }, { "code": "", "text": "If they are in the same document it will be easier to do on the client side with your language of choice.", "username": "steevej" }, { "code": "", "text": "ok, sorry, its my first post, later format and resend the data", "username": "amende" }, { "code": "", "text": "If you really want to do it on the server and the you are working within the same document, you will need a $set stage that uses $map on Clients.Fields that $reduce Fields into the appropriate value.", "username": "steevej" }, { "code": "[\n {\n '$match': {\n 'Version': 18, \n 'ClientNavision': '51', \n 'Clients.Id': '1', \n 'Clients.Active': true\n }\n }, {\n '$lookup': {\n 'from': 'Customers', \n 'localField': 'Clients.Fields.Id', \n 'foreignField': 'Fields.Id', \n 'let': {\n 'domains': '$Domains', \n 'client_fields_id': '$Clients.Fields.Id', \n 'client_fields_value': '$Clients.Fields.Value'\n }, \n 'pipeline': [\n {\n '$match': {\n '$expr': {\n '$eq': [\n '$Fields.Id', '$$client_fields_id'\n ]\n }\n }\n }, {\n '$project': {\n 'Fields': 1, \n 'Domains': 1, \n '_id': 1\n }\n }\n ], \n 'as': 'fields_complete'\n }\n }, {\n '$project': {\n '_id': 0, \n 'ClientNavision': 1, \n 'Site': 1, \n 'Relations': 1, \n 'Fields': 1, \n 'Clients.Fields': 1, \n 'Clients.Id': 1, \n 'Clients.Active': 1, \n 'Clients.Local': 1, \n 'Clients.Cp': 1, \n 'Clients.ClientI18': 1, \n 'Clients.ContactPhone': 1, \n 'Clients.ClientName': 1, \n 'Clients.Direction': 1, \n 'fields_complete': 1\n }\n }, {\n '$unwind': {\n 'path': '$Clients', \n 'preserveNullAndEmptyArrays': true\n }\n }, {\n '$match': {\n 'Clients.Id': '1', \n 'Clients.Active': true\n }\n }, {\n '$unwind': {\n 'path': '$Clients.Fields', \n 'preserveNullAndEmptyArrays': true\n }\n }, {\n '$project': {\n 'ClientNavisionId': '$ClientNavision', \n 'Site': '$Site', \n 'Relations': '$Relations', \n 'Value': '$Clients.Fields.Value', \n 'Id': '$Clients.Fields.Id', \n 'Description': '$Clients.Fields.Description', \n 'ParentId': '$Clients.Fields.ParentId', \n 'Type': '$Clients.Fields.Type', \n 'Size': '$Clients.Fields.Size', \n 'Default': '$Clients.Fields.Default', \n 'Order': '$Clients.Fields.Order', \n 'Members': '$Clients.Fields.Members', \n 'I18List': '$Clients.Fields.I18List', \n 'ClientId': '$Clients.Id', \n 'ClientI18': '$Clients.ClientI18', \n 'FieldsMatch': '$FieldsConnection', \n 'Stockdata': '$stockdata'\n }\n }, {\n '$sort': {\n 'Created': 1\n }\n }, {\n '$limit': 100\n }\n]\n", "text": "", "username": "amende" }, { "code": "", "text": "OK thank you very muchIn short, I understand that my approach was wrong, the ways to do it when we work on the same document would be:I’ll try the second option and if I don’t get it, I’ll do it in code", "username": "amende" }, { "code": "", "text": "I encourage you to follow the thread Complex aggregation pipelines vs complex programming logic because it is related to yours.It is about deciding whether doing data transformation on the data server or in the application code.", "username": "steevej" } ]
Compound data array subdocuments with other array of subdocuments
2022-08-16T12:47:57.480Z
Compound data array subdocuments with other array of subdocuments
4,550
null
[ "replication", "mongodb-shell" ]
[ { "code": "", "text": "Hi all\nI hope this is the right place to post.\nI have a atlas organization with two projects (stage/prod)\nIn prod the Operation Execution Times fluctuate between 50ms and 150ms while in staging i have between 2 and 4ms.\nthey are more or less the same definition\nreplica set (2 regions for stg and 3 for prod)\nstg is m10 and prod is M30\nDefault Read Concern is Local and Default Write Concern is Majority in both cases\nThe writes occurs from the same region in stg and prod\nin mongosh stg and prod i ran the following\nfor (var i = 0; i <= 1000; ++i) { db.Test.insertOne({“id”: “Olivia-157.0-89.0-81.0-102.0-0.3-0.1”, “gender”: “F”, “measurements”: “measurements.json”}) }\nin stg it run fast\nin prod took more than 5 minutes\nI’m open to suggestions \nThank you\nbenjamin", "username": "Benjamin_Elharrar" }, { "code": "", "text": "Hi @Benjamin_Elharrar welcome to the community!In prod the Operation Execution Times fluctuate between 50ms and 150ms while in staging i have between 2 and 4ms.This is unusual, doubly so since as you mentioned prod is using M30 and staging is M10. I’m not sure if the region numbers matter, though.However, troubleshooting this would require access to the logs and performance indicators of both systems, and Atlas support would have access to both. If this is still an issue, could you contact Atlas support about this?Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "Yes thank you\nI’ll contact them", "username": "Benjamin_Elharrar" } ]
Low performance Operation Execution Times
2022-07-13T10:14:59.376Z
Low performance Operation Execution Times
1,744
null
[ "queries", "performance", "atlas-online-archive" ]
[ { "code": "", "text": "Hi there!In my app I have history collection. I moved old records (older than 1 year) into DataLake archive, changed connection string to read data from both archive and common collection.Is it ok that performance decreased even when reading unarchived data? Despite the fact that the query is executed using an indexed field and this datetime field is the key for archiving.", "username": "Denis_Stogniy" }, { "code": "", "text": "Hi @Denis_Stogniy,Welcome to MongoDB communityI would suggest with specific atlas workload problems to open a support case. Our team has better visibility into your clusters config and logs.When opening a case provide run timings and specific cluster details as well as query explain plans.Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Hey @Denis_Stogniy ,Thanks for raising this, I can probably shed a bit of light on this, but it also makes sense to open a case if you’d like some deeper analysis.Regarding the performance on the “federated collection” (i.e. targeting archived and cluster data together), you should expect to see lower performance than connecting directly to your cluster but the degree of the performance impact is based on the type of query and how you optimized the archive.One example would be a “streaming query”, something like a “find()”. We’ll start returning data as soon as the underlying storage returns it, so data coming back from the cluster will be immediately returned to you, and then data coming from the archive will be next (most likely). There will be a minor increase in latency as the data has to go from the cluster to the federated endpoint but it should be minimal.On the other hand, a “blocking query” like a “sort” that requires all relevant data from the cluster and the archive to be brought together is going to be as slow as the slowest tier of storage queried which will most likely be the archive and that can be significantly slower than your cluster.The last piece to remember is that when you setup Online Archive you select “Query Fields”. Queries that utilize those fields will have improved performance on the archival data, so a “find” on a field that was identified as a query field should perform better than a find on a field that was not identified as a query field.I’m the PM for Online Archive and am happy to discuss further if it’s helpful, you can reach me at [email protected],\nBen", "username": "Benjamin_Flast" }, { "code": "", "text": "even without DataLake archive (I just connect to the cluster through the federation) i get the same performance as with DataLakefor example request through the federation 2 sec(without DataLake archive), same directly 100 ms", "username": "Max_Virchenko" }, { "code": "", "text": "Hey @Max_Virchenko that is expected behavior. When connecting to your cluster through the federation layer we see a bit of additional latency due to additional network hops and various other steps that occur. We see somewhere between 1 and 2 additional seconds of additional latency for any basic query through data federation, and that can go higher when combining data from multiple clusters.", "username": "Benjamin_Flast" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Low performance while reading data from collection with archive
2021-01-19T08:45:51.476Z
Low performance while reading data from collection with archive
5,152
null
[ "queries", "mongodb-shell", "schema-validation" ]
[ { "code": "MongoServerError: Document failed validation\nAdditional information: {\n failingDocumentId: ObjectId(\"62e4068f266bd3be3f21150b\"),\n details: {\n operatorName: '$jsonSchema',\n schemaRulesNotSatisfied: [\n {\n operatorName: 'properties',\n propertiesNotSatisfied: [\n { propertyName: 'color', details: [ [Object] ] },\n { propertyName: 'age', details: [ [Object] ] },\n { propertyName: 'gender', details: [ [Object] ] }\n ]\n },\n {\n operatorName: 'required',\n specifiedAs: {\n required: [ 'name', 'title', 'occupation' ]\n },\n missingProperties: [ 'name' ]\n }\n ]\n }\n}\n", "text": "Hi everyone, I am trying to read the details object, how can I achieve this via mongo sh?", "username": "Abejide_N_A" }, { "code": "mongoshmongosh:\n inspectDepth: 10\nconfig.set(\"inspectDepth\", 10)\n", "text": "Hi @Abejide_N_A, welcome to the community.\nThe default level of inner elements that mongosh outputs is set to 6. That means elements that are less than 6 levels deep will only be shown in the shell.\nHowever, you can change this behavior in your mongosh config file by specifying the level till which output gets printed.\nFor example, you want to print the output till level 10, you can specify the same in your config file like this:Alternatively, you can also utilize the config API in mongosh by executing the following command to achieve the same:Please note that settings specified using the config API will override the default configuration as well as the configuration mentioned in your mongosh config file for that corresponding key.If you have any doubts, please feel free to reach out to us.Thanks and Regards.\nSourabh Bagrecha,\nMongoDB", "username": "SourabhBagrecha" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Read Mongodb shell error details
2022-07-29T16:15:43.409Z
Read Mongodb shell error details
3,132
null
[ "c-driver" ]
[ { "code": "", "text": "Hi, I have noticed the commit aa940294031177b5faa8623f81dd433a6ab72ed3 is merged into github, but when to upload the 1.22.1-2 to Debian archive?I ask here just because I want to build rsyslog, but it give me:\nsbuild-build-depends-main-dummy : Depends: libmongoc-dev but it is not installableyes, I am building it on riscv64:)", "username": "Bo_YU" }, { "code": "", "text": "Hi @Bo_YU , that should be available in the 1.22.2 release. 1.22.2 is planned to release in early September.Sincerely,\nKevin", "username": "Kevin_Albertson" }, { "code": "", "text": "Ok,thanks. I thought we would upload a minor version for Debian. Anyway, Let’s look forward to September.", "username": "Bo_YU" } ]
When to upload version on Debian?
2022-08-17T01:46:42.911Z
When to upload version on Debian?
2,288
null
[ "python", "cxx" ]
[ { "code": "/opt/third_party/debug/mongo_c_driver/lib/cmake/libbson-static-1.0/libbson-static-1.0-config.cmake, version: 0.0.0\n", "text": "build mongo cxx driver…\n– The CXX compiler identification is GNU 9.4.0\n– Detecting CXX compiler ABI info\n– Detecting CXX compiler ABI info - done\n– Check for working CXX compiler: /usr/bin/c++ - skipped\n– Detecting CXX compile features\n– Detecting CXX compile features - done\n– Found PythonInterp: /usr/bin/python (found version “2.7.18”)\n– The C compiler identification is GNU 9.4.0\n– Detecting C compiler ABI info\n– Detecting C compiler ABI info - done\n– Check for working C compiler: /usr/bin/cc - skipped\n– Detecting C compile features\n– Detecting C compile features - done\n– Auto-configuring bsoncxx to use MNMLSTC for polyfills since C++17 is inactive\nbsoncxx version: 0.0.0\nCMake Error at src/bsoncxx/CMakeLists.txt:107 (find_package):\nCould not find a configuration file for package “libbson-static-1.0” that\nis compatible with requested version “1.13.0”.The following configuration files were considered but not accepted:– Configuring incomplete, errors occurred!\nSee also “/home/hgc/workspace/gerrit/hilton/third_party/archives/debug/mongo-cxx-driver-r3.6.7/debug_build/CMakeFiles/CMakeOutput.log”.\nninja: error: loading ‘build.ninja’: No such file or directory\nninja: error: loading ‘build.ninja’: No such file or directory\nninja: error: loading ‘build.ninja’: No such file or directorycmake command:\n/usr/bin/cmake -G “${CMAKE_GENERATOR}” -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE_VALUE} -DCMAKE_INSTALL_PREFIX=${MONGO_CXX_DRIVER_INSTALL_DIR} \n-DBUILD_SHARED_LIBS_WITH_STATIC_MONGOC=ON -DCMAKE_PREFIX_PATH=${MONGO_C_DRIVER_INSTALL_DIR} …and the MONGO_C_DRIVER_INSTALL_DIR=/opt/third_party/debug/mongo_c_driver/", "username": "BrentHuang_N_A" }, { "code": "", "text": "c driver build ok:\n/usr/bin/cmake -G “${CMAKE_GENERATOR}” -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE_VALUE} -DCMAKE_INSTALL_PREFIX=${MONGO_C_DRIVER_INSTALL_DIR} \n-DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_SNAPPY=OFF -DENABLE_TESTS=OFF -DENABLE_EXAMPLES=OFF …\n/usr/bin/cmake --build .\nsudo /usr/bin/cmake --build . --target install", "username": "BrentHuang_N_A" }, { "code": "", "text": "hgc@hgc-virtual-machine:~/workspace/gerrit/hilton/third_party/build$ gcc --version\ngcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nCopyright (C) 2019 Free Software Foundation, Inc.\nThis is free software; see the source for copying conditions. There is NO\nwarranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.hgc@hgc-virtual-machine:~/workspace/gerrit/hilton/third_party/build$ g++ --version\ng++ (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nCopyright (C) 2019 Free Software Foundation, Inc.\nThis is free software; see the source for copying conditions. There is NO\nwarranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.hgc@hgc-virtual-machine:~/workspace/gerrit/hilton/third_party/build$ ninja --version\n1.11.0\nhgc@hgc-virtual-machine:~/workspace/gerrit/hilton/third_party/build$ cmake --version\ncmake version 3.24.0CMake suite maintained and supported by Kitware (kitware.com/cmake).\nhgc@hgc-virtual-machine:~/workspace/gerrit/hilton/third_party/build$", "username": "BrentHuang_N_A" } ]
C:1.22.1 build ok. cxx:3.6.7 build failed. os:Linux Mint 20.3 Cinnamon
2022-08-18T09:17:10.933Z
C:1.22.1 build ok. cxx:3.6.7 build failed. os:Linux Mint 20.3 Cinnamon
3,349
null
[ "aggregation" ]
[ { "code": "", "text": "I have this error:db.datos_sensores.aggregate([{$project:{timestamp:{$dateFromString:{dateString:’$timestamp’}}},\"_id\":0, “medidas”:{$slice:[\"$medidas\",-1]},“location_id”:1}, {$addFields:{Hora:{$hour:\"$timestamp\"},Diadelasemana:{$dayOfWeek:\"$timestamp\"}}}, {$match:{‘Diadelasemana’:{$in:[‘1’,‘7’]},$and:[{‘Diadelasemana’:‘6’},{‘Hora’:{$gte:16}}],$and:[{‘Diadelasemana’:‘2’},{‘Hora’:{$lt:‘8’}}]}}])\nuncaught exception: Error: command failed: {\n“ok” : 0,\n“errmsg” : “A pipeline stage specification object must contain exactly one field.”,\n“code” : 40323,\n“codeName” : “Location40323”\n} : aggregate failed :\n_getErrorWithCode@src/mongo/shell/utils.js:25:13\ndoassert@src/mongo/shell/assert.js:18:14\n_assertCommandWorked@src/mongo/shell/assert.js:618:17\nassert.commandWorked@src/mongo/shell/assert.js:708:16\nDB.prototype._runAggregate@src/mongo/shell/db.js:266:5\nDBCollection.prototype.aggregate@src/mongo/shell/collection.js:1046:12\n@(shell):1:1Could you help me?", "username": "Cesar_Garcia_Garcia" }, { "code": ".aggregate()db.datos_sensores.aggregate(\n [\n {\n $project:{\n timestamp:{\n $dateFromString:{\n dateString:’$timestamp’\n }\n }\n },\n \"_id\":0, \n \"medidas\":{\n $slice:[\"$medidas\",-1]\n },\n \"location_id\":1\n }, \n {\n $addFields:{\n Hora:{\n $hour:\"$timestamp\"\n },\n Diadelasemana:{\n $dayOfWeek:\"$timestamp\"\n }\n }\n }, \n {\n $match:{\n ‘Diadelasemana’:{\n $in:[‘1’,‘7’]\n },\n $and:[\n {‘Diadelasemana’:‘6’},\n {‘Hora’:{$gte:16}}\n ],\n $and:[\n {‘Diadelasemana’:‘2’},\n {‘Hora’:{$lt:‘8’}}\n ]\n }\n }\n ]\n)\n$project", "text": "In cases like this where you’re getting an error from a .aggregate() call, the first thing to do while troubleshooting it to format your query so you can easily see how the brackets line up. Your query lines up as follows:Notice that there is an extra closing brace in your $project so a lot of the files for projection have been moved outside that stage. This has also thrown everything else off.", "username": "Doug_Duncan" }, { "code": "[\n { $project :\n {\n timestamp : { $dateFromString:{dateString:’$timestamp’}}\n } ,\n \"_id\":0,\n “medidas” : {$slice:[\"$medidas\",-1]},\n “location_id”:1\n },\n { $addFields :\n {\n Hora:{$hour:\"$timestamp\"},\n Diadelasemana:{$dayOfWeek:\"$timestamp\"}\n }\n },\n { $match:\n {\n ‘Diadelasemana’:{$in:[‘1’,‘7’]},\n $and:[{‘Diadelasemana’:‘6’},{‘Hora’:{$gte:16}}],\n $and:[{‘Diadelasemana’:‘2’},{‘Hora’:{$lt:‘8’}}]\n }\n }\n]\nmongosh> match_stage = {$match:{Diadelasemana:{$in:[1,7]},$and:[{Diadelasemana:6},{Hora:{$gte:16}}],$and:[{Diadelasemana:2},{Hora:{$lt:8}}]}}\n/* produce the output */\n{$match:{Diadelasemana:{$in:[1,7]},$and:[{Diadelasemana:2},{Hora:{$lt:8}}]}}\n", "text": "If we try to reformat your code to understandwe can see the you are closing your $project too soon.Read Formatting code and log snippets in posts before posting code or documents. It is hard to help when the markup is wrong and all quotes are screwed up.Using variables for stages help in finding errors, it is adaptation of Divide And Conquer. For example:Because with JSON you cannot have an object with duplicate key. Well, you can but you only get the last occurrence. Anyway in this case the logic is wrong since you cannot have\nboth Diadelasemana:2 and Diadelasemana:6 both true.", "username": "steevej" }, { "code": "", "text": "Sorry for reply the almost the same at almost the same time.", "username": "steevej" }, { "code": "", "text": "Why you write what you wrote and then delete it. I feel you are, if not smarter then more knowledgeable (if we can say than in English) than me.", "username": "steevej" }, { "code": "", "text": "I deleted my post because it was not correct. Not the part about you being smarter than me (I do feel this is correct and I’ve learned a lot from you over the years here), but the part about my having the brackets incorrect.", "username": "Doug_Duncan" }, { "code": "", "text": "I’ve learned a lot from you over the years hereDitto over here. Every post is bringing some light.", "username": "steevej" }, { "code": "", "text": "Thanks a lot guys. I have got to work fine!!", "username": "Cesar_Garcia_Garcia" } ]
Error aggregate $match $and
2022-08-17T23:17:42.607Z
Error aggregate $match $and
5,192
null
[ "aggregation", "queries" ]
[ { "code": "{\n name: \"Example 1\",\n year: \"2012\",\n}\n\n{\n name: \"Example 2\",\n year: \"2012\",\n}\n\n{\n name: \"Example 3\",\n year: \"2013\",\n}\n\n{\n name: \"Example 4\",\n year: \"2014\",\n}\n[\n {\n _id: \"2012\",\n count: 4 // years 2012-2014\n },\n {\n _id: \"2013\",\n count: 2 // years 2013-2014\n },\n {\n _id: \"2014\",\n count: 1 // only year 2014\n }\n]\n$group$sumyearCounts: [ \n { $group: { _id: \"$year\", count: { $sum: 1 } } }\n]\nconst yearCounts: { _id: string, count: number }[] = aggregationResult[0].yearCounts || [];\nconst yearCountsSummed = yearCounts.map((yearCount: { _id: string, count: number }) => {\n const yearsUntil = yearCounts.filter(year => year._id >= yearCount._id);\n const countSummed = yearsUntil.map(yearCount => yearCount.count).reduce((a, b) => a + b) || 0;\n return countSummed;\n});\n", "text": "Let’s say my documents look like this:Using an aggregation, is there a way to group by year and sum the document count, but additionally add the sum of all later years?The result I want is this:Right now, I’m using a normal $group + $sum, which gives me the counts for each year individually and then I sort them in JavaScript. I was hoping that there was a simpler way that gets rid of the additional JS code:", "username": "Florian_Walther" }, { "code": "$setWindowFieldsyearCounts: [\n { $group: { _id: \"$year\", count: { $sum: 1 } } },\n {\n $setWindowFields: {\n sortBy: { _id: -1 },\n output: {\n count: {\n $sum: '$count',\n window: { documents: ['unbounded', 'current'] }\n }\n }\n }\n }\n],\nMongoServerError: PlanExecutor error during aggregation :: caused by :: Requested document not in SpillableCache. Expected range was 0--1 but got 0\n[...]\ncode: 5643004,\ncodeName: 'Location5643004'\n", "text": "I found out that I can do this by using $setWindowFields:However, I’m getting an error that I can’t find any results for in Google:", "username": "Florian_Walther" }, { "code": "yearCountsfacetyearCountsfacet({\n // If yearcounts is placed here, I get an error\n yearCounts: [ \n { $group: { _id: \"$year\", count: { $sum: 1 } } },\n {\n $setWindowFields: {\n sortBy: { _id: -1 },\n output: {\n count: {\n $sum: '$count',\n window: { documents: ['unbounded', 'current'] }\n }\n }\n }\n }\n],\nuniqueLanguages: [\n { $match: { approved: true } },\n { $unwind: '$languages' },\n { $group: { _id: null, all: { $addToSet: \"$languages\" } } }\n],\nlanguageCounts: [\n ...matchFilters.filter(matchFilter => !matchFilter.$match.languages),\n { $unwind: '$languages' },\n { $group: { _id: \"$languages\", count: { $sum: 1 } } }\n]\n})\n", "text": "yearCounts is part of a facet. There are two more facet stages coming after it. The error disappears when I put yearCounts after these two. But why is that happening? I thought these facet stages are independent of each other?", "username": "Florian_Walther" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
$group and sum + add all greater than
2022-08-18T05:32:28.188Z
$group and sum + add all greater than
1,485
null
[ "queries", "node-js", "mongoose-odm", "transactions" ]
[ { "code": "await session.withTransaction(async () => {\n const existingVote = await ResourceVote.findByIdAndDelete(voteId).session(session).exec();\n\n if (existingVote?.upDown === 'up') {\n await Resource.findByIdAndUpdate(resourceId, { $inc: { votePoints: -1 } }).session(session).exec();\n\n const deletedKarmaLogEntry = await KarmaLogEntry.findOneAndDelete({ upvotedResource: resource._id, upvotingUser: authenticatedUserId }).session(session).exec();\n if (deletedKarmaLogEntry) {\n await UserModel.findByIdAndUpdate(resource.submittingUser, { $inc: { karma: -upVoteKarmaPoints } }).session(session).exec();\n }\n } else if (existingVote?.upDown === 'down') {\n await Resource.findByIdAndUpdate(resourceId, { $inc: { votePoints: 1 } }).session(session).exec();\n }\n\n if (existingVote?.upDown !== upDown) {\n await ResourceVote.create([{\n _id: voteId,\n userId: authenticatedUserId,\n resourceId: resourceId,\n upDown: upDown,\n }], { session: session });\n\n if (upDown === 'up') {\n await Resource.findByIdAndUpdate(resourceId, { $inc: { votePoints: 1 } }).session(session).exec();\n\n await KarmaLogEntry.create([{\n user: resource.submittingUser,\n points: upVoteKarmaPoints,\n upvotedResource: resource._id,\n upvotingUser: authenticatedUserId,\n }], { session: session });\n\n await UserModel.findByIdAndUpdate(resource.submittingUser, { $inc: { karma: upVoteKarmaPoints } }).session(session).exec();\n } else if (upDown === 'down') {\n await Resource.findByIdAndUpdate(resourceId, { $inc: { votePoints: -1 } }).session(session).exec();\n }\n }\n});\nexistingVotedownResourceVoteResourceVote.create", "text": "I want to make sure that a transaction I’m running is safe from race conditions. I’m using Mongoose’s withTransaction.Are operations within MongoDB transactions executed one after another (so another caller running in parallel can see the first half of the transaction already applied to the DB, but not the second half), or are the changes only visible once the whole transaction is done?\nI want to make sure that this endpoint can be called in parallel (or very fast succession) without causing any wrong updates.Here’s a potential situation that I am worried about: Let’s say the existingVote is down and we send 2 upvotes in parallel. The first upvote deletes the existing downvote and executes the block below . The second caller skips the deletion and therefore the block below it. Now the second caller overtakes the first caller, creates a new ResourceVote, and applies all the points and creates the documents. Now the first caller fails at the ResourceVote.create because the unique id constraint kicks in, and therefore rolls back all previous changes, recreating the originally deleted existing vote.", "username": "Florian_Walther" }, { "code": "", "text": "Hi @Florian_WaltherAre operations within MongoDB transactions executed one after anotherMongoDB’s transaction is using the usual ACID guarantees (see What are ACID Properties in Database Management Systems?)so another caller running in parallel can see the first half of the transaction already applied to the DB, but not the second halfNo. This is counter to the ACID property explained in the article linked above. It’s a pretty broken database system if this is allowed to happen.I want to make sure that this endpoint can be called in parallel (or very fast succession) without causing any wrong updates.With the ACID guarantees of a multi-document transaction, you’ll be able to safely do this. The whole transaction is an all-or-nothing proposition, so either all operations in the transaction succeed, or none of them, and it’s as if that transaction was never performed. However:Here’s a potential situation that I am worried aboutIf I understand the scenario correctly, you’re worried about stale reads (when the transaction is seeing a snapshot of the previous data state at the time when the transaction starts, and makes decisions based on outdated information). Is this correct? If yes, I think this is a valid concern. In this case, you may want to check out In-progress Transactions and Stale Reads on how to cater for this scenario.Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "@kevinadiThank you for the explanation! The worry-case I described can only happen if one caller can overtake the other (and see only parts of the changes the other caller is in the process of doing right now). From your explanation, I understand that this can’t happen.I only change points in reaction to a successful document deletion (with a return value) or creation (where the primary key should make duplicates impossible).", "username": "Florian_Walther" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Is this transaction safe from race conditions?
2022-08-11T06:38:13.677Z
Is this transaction safe from race conditions?
3,683
null
[ "mongodb-shell" ]
[ { "code": "", "text": "mongodb://127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+1.5.4mongod service is running and can connect with mongo ipaddress:27017 but not by mongosh? Why is it?", "username": "Rajitha_Hewabandula" }, { "code": "", "text": "its working as mongosh ipaddress:27017 not as mongosh 127.0.0.1:27017 ?", "username": "Rajitha_Hewabandula" }, { "code": "", "text": "What is your bindIp value?\nMay be you are not allowing it to connect with localhost", "username": "Ramachandra_Tummala" }, { "code": "", "text": "HI @Ramachandra_Tummala this was able to fix by deleting the .sock file in /tmp folder. Many thnx", "username": "Rajitha_Hewabandula" } ]
Mongosh service error
2022-08-17T10:15:48.998Z
Mongosh service error
1,842
null
[ "aggregation", "change-streams" ]
[ { "code": "const pipeline = [\n { $match: { 'fullDocument.username': 'alice' } },\n { $addFields: { newField: 'this is an added field!' } }\n];\nconst collection = db.collection('inventory');\nconst changeStream = collection.watch(pipeline);\nchangeStream.on('change', next => {\n // process next document\n});\nnewField const pipeline = [\n {\n $match: { operationType: { $in: ['insert'] } },\n },\n {\n $addFields: { fullDocument: { createdAt: new Date(), handled: true } },\n },\n { $project: { fullDocument: 1 } },\n ];```\n\n\nIs there something wrong, or did I get it wrong?\n\nThanks in advance.", "text": "Hi, according with Change Stream documentationThere is an exampleThis means that the document will be modified and a new saved attribute newField will be added right?I performed this operation, and it didn’t happen, I capture the event, I added a field , I even needed to add it inside fullDocument.i put log, and the new attribute appears in the log, but it does not persist on database.", "username": "navarro_ferreira" }, { "code": "changeStreamfield: value$matchchangeStream.on('change', next => {\n // To persist the changeStram output, one needs to add code here according to their use-case.\n});\n", "text": "Hello @navarro_ferreira ,I notice you haven’t had a response to this topic yet - were you able to find a solution?\nIf not, could you confirm if my understanding for your use-case is correct?By using changeStream event, you are trying to insert a new field: value back in the original collection that matches your $match criteria. If this is correct, then I’m afraid changestream does not have this feature yet. From the changestream documentation page:Change streams allow applications to access real-time data changes without the complexity and risk of tailing the oplog. Applications can use change streams to subscribe to all data changes on a single collection, a database, or an entire deployment, and immediately react to them. Because change streams use the aggregation framework, applications can also filter for specific changes or transform the notifications at will.The pipeline in the changestream only allows you to modify the changestream output from the default output but it can not update the data in database. This is so that if you’re only interested in part of a document in the changestream (e.g. if you only need to know the _id of the changed document), the changestream doesn’t need to return the whole document and instead project only the _id using the defined pipeline.However, should you need to persist the changestream output, you might need to do it manually by modifying the code example you posted as below:Let me know if you have any more questions.Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Update document during changeStream event
2022-07-28T21:34:04.507Z
Update document during changeStream event
3,256
null
[ "connector-for-bi" ]
[ { "code": "2022-06-20 13:36:28-04 hgs-MacBook-Pro Installer[3004]: Opened from: /Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg\n\n2022-06-20 13:36:28-04 hgs-MacBook-Pro Installer[3004]: Failed to load specified background image\n\n2022-06-20 13:36:28-04 hgs-MacBook-Pro Installer[3004]: Product archive /Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg trustLevel=350\n\n2022-06-20 13:36:28-04 hgs-MacBook-Pro Installer[3004]: Could not load resource readme: (null)\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: ================================================================================\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: User picked Standard Install\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: Choices selected for installation:\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: Install: \"MongoDB ODBC Driver 1.4.1\"\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: Install: \"ODBC Manager\"\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#odbc-manager-component.pkg : odbc_manager : 0\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: Install: \"MongoDB ODBC\"\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#mongodb-odbc-component.pkg : mongodb_odbc : 0\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: ================================================================================\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: It took 0.00 seconds to summarize the package selections.\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: -[IFDInstallController(Private) _buildInstallPlanReturningError:]: location = file://localhost\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: -[IFDInstallController(Private) _buildInstallPlanReturningError:]: file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#odbc-manager-component.pkg\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: -[IFDInstallController(Private) _buildInstallPlanReturningError:]: file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#mongodb-odbc-component.pkg\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: Set authorization level to root for session\n\n2022-06-20 13:36:33-04 hgs-MacBook-Pro Installer[3004]: Authorization is being checked, waiting until authorization arrives.\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Administrator authorization granted.\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Packages have been authorized for installation.\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Will use PK session\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Using authorization level of root for IFPKInstallElement\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Install request is requesting Rosetta translation.\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Starting installation:\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Configuring volume \"Macintosh HD\"\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Preparing disk for local booted install.\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Free space on \"Macintosh HD\": 367.96 GB (367964041216 bytes).\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Create temporary directory \"/var/folders/f1/kn3r7dvn45b8gp0t81hm1frw0000gn/T//Install.3004OvHcor\"\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: IFPKInstallElement (2 packages)\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: Current Path: /System/Library/CoreServices/Installer.app/Contents/MacOS/Installer\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro installd[7782]: PackageKit: Adding client PKInstallDaemonClient pid=3004, uid=501 (/System/Library/CoreServices/Installer.app/Contents/MacOS/Installer)\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro Installer[3004]: PackageKit: Enqueuing install with framework-specified quality of service (utility)\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro installd[7782]: PackageKit: Set reponsibility for install to 3004\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro installd[7782]: PackageKit: ----- Begin install -----\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro installd[7782]: PackageKit: request=PKInstallRequest <2 packages, destination=/>\n\n2022-06-20 13:36:35-04 hgs-MacBook-Pro installd[7782]: PackageKit: packages=(\n\n\"PKLeopardPackage <id=ODBC Manager, version=0, url=file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#odbc-manager-component.pkg>\",\n\n\"PKLeopardPackage <id=MongoDB ODBC, version=0, url=file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#mongodb-odbc-component.pkg>\"\n\n)\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Extracting file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#odbc-manager-component.pkg (destination=/Library/InstallerSandboxes/.PKInstallSandboxManager/F815995D-60D8-426F-9D2D-78D881FFD014.activeSandbox/Root, uid=0)\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Extracting file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#mongodb-odbc-component.pkg (destination=/Library/InstallerSandboxes/.PKInstallSandboxManager/F815995D-60D8-426F-9D2D-78D881FFD014.activeSandbox/Root, uid=0)\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: prevent user idle system sleep\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: suspending backupd\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Using trashcan path /var/folders/zz/zyxvpxvq6csfxvn_n0000000000000/T/PKInstallSandboxTrash/F815995D-60D8-426F-9D2D-78D881FFD014.sandboxTrash for sandbox /Library/InstallerSandboxes/.PKInstallSandboxManager/F815995D-60D8-426F-9D2D-78D881FFD014.activeSandbox\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: PKInformSystemPolicyInstallOperation failed with error:An error occurred while registering installation with Gatekeeper.\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Shoving /Library/InstallerSandboxes/.PKInstallSandboxManager/F815995D-60D8-426F-9D2D-78D881FFD014.activeSandbox/Root (2 items) to /\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro install_monitor[3009]: Temporarily excluding: /Applications, /Library, /System, /bin, /private, /sbin, /usr\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit (package_script_service): Preparing to execute script \"./postinstall\" in /private/tmp/PKInstallSandbox.XL9hIu/Scripts/MongoDB ODBC.O0fBYz\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro package_script_service[7792]: PackageKit: Executing script \"postinstall\" in /tmp/PKInstallSandbox.XL9hIu/Scripts/MongoDB ODBC.O0fBYz\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro package_script_service[7792]: Set responsibility to pid: 3004, responsible_path: /System/Library/CoreServices/Installer.app/Contents/MacOS/Installer\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro package_script_service[7792]: Preparing to execute with Rosetta Intel Translation: '/tmp/PKInstallSandbox.XL9hIu/Scripts/MongoDB ODBC.O0fBYz/postinstall'\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro package_script_service[7792]: ./postinstall: arch: posix_spawnp: /tmp/PKInstallSandbox.XL9hIu/Scripts/MongoDB ODBC.O0fBYz/postinstall: No such file or directory\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro package_script_service[7792]: Responsibility set back to self.\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro install_monitor[3009]: Re-included: /Applications, /Library, /System, /bin, /private, /sbin, /usr\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: releasing backupd\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: allow user idle system sleep\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Install Failed: Error Domain=PKInstallErrorDomain Code=112 \"An error occurred while running scripts from the package “mongodb-connector-odbc-1.4.1-macos-x86-64.pkg”.\" UserInfo={NSFilePath=./postinstall, NSURL=file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#mongodb-odbc-component.pkg, PKInstallPackageIdentifier=MongoDB ODBC, NSLocalizedDescription=An error occurred while running scripts from the package “mongodb-connector-odbc-1.4.1-macos-x86-64.pkg”.} {\n\nNSFilePath = \"./postinstall\";\n\nNSLocalizedDescription = \"An error occurred while running scripts from the package \\U201cmongodb-connector-odbc-1.4.1-macos-x86-64.pkg\\U201d.\";\n\nNSURL = \"file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#mongodb-odbc-component.pkg\";\n\nPKInstallPackageIdentifier = \"MongoDB ODBC\";\n\n}\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Cleared responsibility for install from 3004.\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Cleared permissions on Installer.app\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Running idle tasks\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Done with sandbox removals\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro Installer[3004]: install:didFailWithError:Error Domain=PKInstallErrorDomain Code=112 \"An error occurred while running scripts from the package “mongodb-connector-odbc-1.4.1-macos-x86-64.pkg”.\" UserInfo={NSFilePath=./postinstall, NSURL=file://localhost/Volumes/mongodb-odbc/mongodb-connector-odbc-1.4.1-macos-x86-64.pkg#mongodb-odbc-component.pkg, PKInstallPackageIdentifier=MongoDB ODBC, NSLocalizedDescription=An error occurred while running scripts from the package “mongodb-connector-odbc-1.4.1-macos-x86-64.pkg”.}\n\n2022-06-20 13:36:36-04 hgs-MacBook-Pro installd[7782]: PackageKit: Removing client PKInstallDaemonClient pid=3004, uid=501 (/System/Library/CoreServices/Installer.app/Contents/MacOS/Installer)\n\n2022-06-20 13:36:37-04 hgs-MacBook-Pro Installer[3004]: Install failed: The Installer encountered an error that caused the installation to fail. Contact the software manufacturer for assistance.\n\n2022-06-20 13:36:37-04 hgs-MacBook-Pro Installer[3004]: IFDInstallController 351F320 state = 8\n\n2022-06-20 13:36:37-04 hgs-MacBook-Pro Installer[3004]: Displaying 'Install Failed' UI.\n\n2022-06-20 13:36:38-04 hgs-MacBook-Pro Installer[3004]: 'Install Failed' UI displayed message:'The Installer encountered an error that caused the installation to fail. Contact the software manufacturer for assistance.'.\n\n2022-06-20 13:36:40-04 hgs-MacBook-Pro Installer[3004]: Package Removal: Package is not writable. Not offering removal.\n\n2022-06-20 13:36:40-04 hgs-MacBook-Pro Installer[3004]: Package Removal: Package cannot be removed.\n", "text": "Whenever I try to install the BI ODBC driver I get this error. Would appreciate any help with this.MongoDB ODBC Driver 1.4.1 Installation Log", "username": "Harun_Gunasekaran" }, { "code": "", "text": "Did you try to reboot your system?\nIs gatekeeper enabled?", "username": "Ramachandra_Tummala" }, { "code": "", "text": "hey @Ramachandra_Tummala , I have rebooted. I tried installing both with disabling gatekeeper and by enabling it. I have the error during installation.", "username": "Harun_Gunasekaran" }, { "code": "", "text": "Hi, I am also facing same issue. Mongo ODBC does not work on Mac Montery. Anyone did successfully?", "username": "Ramabathiran_Arumuga" } ]
Installation errors out with MongoBI ODBC Driver on Mac OS( Monterey)
2022-06-20T17:48:38.371Z
Installation errors out with MongoBI ODBC Driver on Mac OS( Monterey)
2,678
null
[ "queries" ]
[ { "code": "", "text": "is there any way to replace null values during indexing ? with the help of mapping ?", "username": "varaprasad_kodali" }, { "code": "", "text": "Hi @varaprasad_kodali welcome to the community, and sorry for the delay!In short, no. There’s no method to pre-process a document before indexing. If it can, then it’s not an index anymore, isn’t it Without knowing your use case, if you want to map the null values to something else, why not do it in the document itself? Alternatively, if you don’t want to index certain part of a collection, you might be able to use partial indexes. However please note that partial indexes is a special index type and it comes with caveats (see the linked page for more details).Best regards\nKevin", "username": "kevinadi" } ]
Is there any way to replace null values during indexing?
2022-07-04T06:57:01.519Z
Is there any way to replace null values during indexing?
1,680
null
[ "aggregation" ]
[ { "code": "[\n{\n\"$match\": {\"field\": { \"$in\" : [a list of around 1k+ elements(one_k_list)] }}\n},\n{\"$group\", .....},\n{\"$group\", .....}\n]\n", "text": "I have to perform aggregation on a very large data.The flow is:-This is taking a lot of time, but if I divide the list into small chunks of let’s say 50 elements then i can make around 20+ async db calls and get my result. My question is will combining those 20+ db call result give me same output as the one with 1K+ list.", "username": "Saujanya_Tiwari" }, { "code": "$group$group$sort$group", "text": "Hi @Saujanya_Tiwari welcome to the community!Apologize for the delay, but have you found a solution for your use case yet?I tend to think that the double $group is the main issue here. In an aggregation, certain stages like $group or $sort without an index is termed a “blocking” stage. That is, this stage will need all the documents to be present to be able to do its work, and not operate on a document-per-document basis. In other words, it “blocks” the whole pipeline.will combining those 20+ db call result give me same output as the one with 1K+ list.That depends on what the $group operation does. If it’s mathematically equal (e.g. if you’re adding or multiplying numbers), then yes. If it’s not equal (e.g. some statistical functions concerning populations), then you’ll need to do more things to make them mathematically equalBest regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Group aggregations on chunks of a list
2022-06-30T07:27:43.661Z
Group aggregations on chunks of a list
1,451
null
[ "aggregation" ]
[ { "code": "", "text": "Hi Team,I have scenario where I need to insert a document when there is no match found in the roles array. I tried but with the In condition it checks in any document but not in single document all the condition.\nI can not use normal update with element match as I will end up calling db multiple times.Sample mongo playgroundMongo playground: a simple sandbox to test and share MongoDB queries online", "username": "Shyam_Sohane" }, { "code": "", "text": "I think i got it. let me know if there is any other idea.Mongo playground: a simple sandbox to test and share MongoDB queries online", "username": "Shyam_Sohane" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to update nested array with aggregation pipeline
2022-08-17T19:42:10.203Z
How to update nested array with aggregation pipeline
1,690
null
[ "swift" ]
[ { "code": "\n2022-06-11 14:08:29.896260-0400 Foody22[5530:81226] *** Terminating app due to uncaught exception 'RLMException', reason: 'Keychain returned unexpected status code: -34018'\n*** First throw call stack:\n(\n\t0 CoreFoundation 0x0000000180406ec4 __exceptionPreprocess + 172\n\t1 libobjc.A.dylib 0x000000018004c08c objc_exception_throw + 56\n\t2 Foody22 0x000000010084b6d0 _ZL17RLMTranslateErrorIZ49-[RLMApp initWithId:configuration:rootDirectory:]E3$_0EDaOT_ + 128\n\t3 Foody22 0x000000010084b4ec -[RLMApp initWithId:configuration:rootDirectory:] + 428\n\t4 Foody22 0x000000010084b9f8 +[RLMApp appWithId:configuration:rootDirectory:] + 288\n\t5 Foody22 0x000000010084bb4c +[RLMApp appWithId:configuration:] + 92\n\t6 Foody22 0x000000010084bc08 +[RLMApp appWithId:] + 68\n\t7 Foody22 0x00000001006d928c $sSo6RLMAppC2idABSS_tcfCTO + 56\n\t8 Foody22 0x00000001006d9188 $s7Foody2210FoodyRealmVACycfC + 1008\n\t9 Foody22 0x00000001007c64b4 $s7Foody220A3AppVACycfC + 76\n\t10 Foody22 0x00000001007c6ca4 $s7Foody220A3AppV7SwiftUI0B0AadEPxycfCTW + 12\n\t11 SwiftUI 0x0000000109cd2e88 OUTLINED_FUNCTION_9 + 76\n\t12 Foody22 0x00000001007c63f4 $s7Foody220A3AppV5$mainyyFZ + 40\n\t13 Foody22 0x00000001007c6cb8 main + 12\n\t14 dyld 0x000000010471df94 start_sim + 20\n\t15 ??? 0x000000010487dc10 0x0 + 4370979856\n\t16 ??? 0x1848000000000000 0x0 + 1749648455233437696\n)\nlibc++abi: terminating with uncaught exception of type NSException\n*** Terminating app due to uncaught exception 'RLMException', reason: 'Keychain returned unexpected status code: -34018'\nterminating with uncaught exception of type NSException\nCoreSimulator 842.7 - Device: iPhone 12 (4B30EC69-0DEF-46B0-890B-AAA0FC6B380E) - Runtime: iOS 16.0 (20A5283p) - DeviceType: iPhone 12\n", "text": "I’m trying out the new macOS, iOS, and Xcode from WWDC22.I’m getting the error:Thread 1: “Keychain returned unexpected status code: -34018”when I’m creating a RealmSwift.app with:let ra = RealmSwift.App(id: “foody23realm-jfsbk”)this line has worked in earlier versions.I don’t really expect Realm to fully support Apple’s beta software, but I thought you could use a heads up about the error and I could warn other users contemplating testing it at this time.Stack trace below:", "username": "Adam_Ek" }, { "code": "", "text": "One more note: Package version…let coreVersionStr = “12.1.0”let cocoaVersionStr = “10.28.1”", "username": "Adam_Ek" }, { "code": "", "text": "One more note:This error occurs with iPhone and iPad simulators.\nIt also occurs with a target of My Mac (Designed for iPad)However, it does not occur with a target of My Mac.", "username": "Adam_Ek" }, { "code": "", "text": "It’s now working with an iPhone 12 running iOS 16.However it’s still failing in simulators and in My Mac (Designed for iPad).", "username": "Adam_Ek" }, { "code": "", "text": "I’m seeing the same behaviour. Works on device, not on simulators.", "username": "Paul_P" }, { "code": "libc++abi: terminating with uncaught exception of type NSException\n*** Terminating app due to uncaught exception 'RLMException', reason: 'Keychain returned unexpected status code: -34018'\nterminating with uncaught exception of type NSException\n", "text": "Xcode 14 beta 3.\nIt now works on iPad simulator, but not on", "username": "Adam_Ek" }, { "code": "", "text": "I had to add Keychain Sharing capability to the target to make the app works on iPhone simulators (beta 4), not sure why though…", "username": "Sonisan" }, { "code": "", "text": "I am having the same issue in beta 4. Adding “Keychain sharing” as a capability in targets fixed the issue for me.", "username": "Tyler_Collins" }, { "code": "", "text": "I filed an issue on the GitHub repo for reference: https://github.com/realm/realm-swift/issues/7915", "username": "Tyler_Collins" } ]
RealmSwift Problem in Xcode 14 beta. Showstopper
2022-06-11T18:11:58.611Z
RealmSwift Problem in Xcode 14 beta. Showstopper
4,381
null
[ "server" ]
[ { "code": "", "text": "I just started using this mac mini M1, 2020 (macOs Monterey 12.5.1). I’m facing some issues trying to install the mongodb community version. First when trying to install @6.0 the “[…]/bin/mongo” was not being created so I give up and installed @5.0. The last error I’m facing is that I can’t start the mongodb service.\nmongo log has this error:\n“{“t”:{”$date\":“2022-08-17T15:27:05.396-03:00”},“s”:“F”, “c”:“CONTROL”, “id”:20573, “ctx”:“initandlisten”,“msg”:“Wrong mongod version”,“attr”:{“error”:“UPGRADE PROBLEM: Found an invalid featureCompatibilityVersion document (ERROR: Location4926900: Invalid featureCompatibilityVersion document in admin.system.version: { _id: \"featureCompatibilityVersion\", version: \"6.0\" }. See https://docs.mongodb.com/master/release-notes/4.4-compatibility/#feature-compatibility. :: caused by :: Invalid value for featureCompatibilityVersiondocument in admin.system.version, found 6.0, expected ‘4.4’ or ‘4.9’ or '5.0. See https://docs.mongodb.com/master/release-notes/4.4-compatibility/#feature-compatibility.). If the current featureCompatibilityVersion is below 4.4, see the documentation on upgrading at https://docs.mongodb.com/master/release-notes/4.4/#upgrade-procedures.”}}.\nSince I can’t run “mongo”, I don’t know how to set the compatibility version. Can anyone help?", "username": "Gabriel_Cardoso" }, { "code": "mongoshdb.adminCommand( { setFeatureCompatibilityVersion: \"5.0\" } )", "text": "Hi @Gabriel_Cardoso, it looks like you started a MongoDB version 6.0 version on your computer. Then you started a 5.0 version pointed at the same database files but as you found out that doesn’t work.I just tried starting 6.0 using a new database path which will set the FCV to 6.0 as yours is. I then tried to start MongoDB 5.0.10 and got the same error as you are getting.For a test, I restarted 6.0 and connected to the database with mongosh and ran db.adminCommand( { setFeatureCompatibilityVersion: \"5.0\" } ). This will downgrade the FCV to 5.0 on the data files. I was then able to start the 5.0.10 version of MongoDB. Note I wouldn’t do this on any production system without MongoDB support telling me to do it, and making sure I had a solid backup/recovery plan in place before in case I needed to recover my database files. You really shouldn’t change the FCV back to a previous version except under extreme situations, and even then there could be issues that arise from doing that.", "username": "Doug_Duncan" }, { "code": "", "text": "I reinstalled 6.0 (without uninstalling 5.0) and I think everything is working. Thanks so much for the advice @Doug_Duncan , I’ll keep that in mind as I work.", "username": "Gabriel_Cardoso" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can't start [email protected] service in Apple M1
2022-08-17T18:53:29.136Z
Can&rsquo;t start [email protected] service in Apple M1
3,085
null
[ "server" ]
[ { "code": "~ brew services start [email protected] \n==> Successfully started `[email protected]` (label: [email protected])\n~ mongo \nMongoDB shell version v4.2.21\nconnecting to: mongodb://127.0.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb\n2022-08-16T16:25:03.024-0500 E QUERY [js] Error: couldn't connect to server 127.0.0.1:27017, connection attempt failed: SocketException: Error connecting to 127.0.0.1:27017 :: caused by :: Connection refused :\nconnect@src/mongo/shell/mongo.js:353:17\n@(connect):2:6\n2022-08-16T16:25:03.029-0500 F - [main] exception: connect failed\n2022-08-16T16:25:03.029-0500 E - [main] exiting with code 1\n~ sudo lsof -i :27017\nmongod", "text": "It ran successful. But:Andreturned nothing.So this concludes that mongod is running on some unknown PORT.", "username": "Big_Cat_Public_Safety_Act" }, { "code": "sudo ss -tlp\nsudo netstat -tlp\nsudo ps -aef | grep [m]ongo\n", "text": "Share the configuration file and we might help.Also share the output oforThe output of the following is also needed:", "username": "steevej" }, { "code": "", "text": "where do I find the configuration file?", "username": "Big_Cat_Public_Safety_Act" }, { "code": "systemLog:\n destination: file\n path: /opt/homebrew/var/log/mongodb/mongo.log\n logAppend: true\nstorage:\n dbPath: /opt/homebrew/var/mongodb\nnet:\n bindIp: 127.0.0.1, ::1\n ipv6: true\n/opt/homebrew/etc/mongod.conf", "text": "This is what is in my /opt/homebrew/etc/mongod.conf file", "username": "Big_Cat_Public_Safety_Act" }, { "code": "ss~ sudo netstat -tlp \nnetstat: option requires an argument -- p\nUsage:\tnetstat [-AaLlnW] [-f address_family | -p protocol]\n\tnetstat [-gilns] [-f address_family]\n\tnetstat -i | -I interface [-w wait] [-abdgRtS]\n\tnetstat -s [-s] [-f address_family | -p protocol] [-w wait]\n\tnetstat -i | -I interface -s [-f address_family | -p protocol]\n\tnetstat -m [-m]\n\tnetstat -r [-Aaln] [-f address_family]\n\tnetstat -rs [-s]\n~ sudo ps -aef | grep mongo \n 501 4445 854 0 8:51AM ttys002 0:00.00 grep mongo\n", "text": "ss is not a recognized command.", "username": "Big_Cat_Public_Safety_Act" }, { "code": "", "text": "The ps output shows that mongod has not been started.Share the log file.", "username": "steevej" }, { "code": "", "text": "where is the log file located?", "username": "Big_Cat_Public_Safety_Act" }, { "code": "systemLog:\n destination: file\n path: /opt/homebrew/var/log/mongodb/mongo.log\n logAppend: true\n", "text": "It is specified in the configuration file you shared.", "username": "steevej" }, { "code": "", "text": "The file is too enormous:2022-08-15T13:21:43.753-0500 I CONTROL [main] ***** SERVER RESTARTED *****\n2022-08-15T13:21:43.759-0500 I CONTROL [main] Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols ‘none’\n2022-08-15T13:21:43.762-0500 W ASIO [main] No TransportLayer configured during NetworkInterface startup\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] MongoDB starting : pid=85422 port=27017 dbpath=/opt/homebrew/var/mongodb 64-bit host=Junlues-MacBook-Pro.local\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] db version v4.2.21\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] git version: b0aeed9445ff41af07449fa757e1f231bce990b3\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] allocator: system\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] modules: none\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] build environment:\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] distarch: x86_64\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] target_arch: x86_64\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] machdep.cpu.extfeatures unavailable\n2022-08-15T13:21:43.763-0500 I CONTROL [initandlisten] options: { config: “/opt/homebrew/etc/mongod.conf”, net: { bindIp: “127.0.0.1, ::1”, ipv6: true }, storage: { dbPath: “/opt/homebrew/var/mongodb” }, systemLog: { destination: “file”, logAppend: true, path: “/opt/homebrew/var/log/mongodb/mongo.log” } }\n2022-08-15T13:21:43.764-0500 E STORAGE [initandlisten] Failed to set up listener: SocketException: Address already in use\n2022-08-15T13:21:43.764-0500 I REPL [initandlisten] Stepping down the ReplicationCoordinator for shutdown, waitTime: 10000ms\n2022-08-15T13:21:43.765-0500 I SHARDING [initandlisten] Shutting down the WaitForMajorityService\n2022-08-15T13:21:43.765-0500 I NETWORK [initandlisten] Shutting down the global connection pool\n2022-08-15T13:21:43.765-0500 I INDEX [initandlisten] Shutting down the IndexBuildsCoordinator\n2022-08-15T13:21:43.765-0500 I NETWORK [initandlisten] Shutting down the ReplicaSetMonitor\n2022-08-15T13:21:43.765-0500 I CONTROL [initandlisten] Shutting down free monitoring\n2022-08-15T13:21:43.765-0500 I FTDC [initandlisten] Shutting down full-time data capture\n2022-08-15T13:21:43.765-0500 I STORAGE [initandlisten] Shutting down the HealthLog\n2022-08-15T13:21:43.765-0500 I - [initandlisten] Dropping the scope cache for shutdown\n2022-08-15T13:21:43.765-0500 I CONTROL [initandlisten] now exiting\n2022-08-15T13:21:43.765-0500 I CONTROL [initandlisten] shutting down with code:48", "username": "Big_Cat_Public_Safety_Act" }, { "code": "sudo netstat -l\nbrew services list\n", "text": "Failed to set up listener: SocketException: Address already in useMeans a process is already using the same address/port combination.Share the output of", "username": "steevej" }, { "code": "\n~ brew services start [email protected] \n==> Successfully started `[email protected]` (label: [email protected])\n~ brew services list \nName Status User File\[email protected] error 3584 gani ~/Library/LaunchAgents/[email protected]\nrabbitmq started gani ~/Library/LaunchAgents/homebrew.mxcl.rabbitmq.plist\nredis started gani ~/Library/LaunchAgents/homebrew.mxcl.redis.plist\n", "text": "", "username": "Big_Cat_Public_Safety_Act" }, { "code": "sudo netstat -l\nProto Recv-Q Send-Q Local Address Foreign Address (state) \ntcp4 0 0 10.0.0.183.49770 ec2-54-92-199-18.https ESTABLISHED\ntcp4 31 0 10.0.0.183.49769 ec2-35-163-77-22.https CLOSE_WAIT \ntcp4 0 0 10.0.0.183.49765 20.189.173.7.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49764 2001:558:feed:443::12.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49763 2606:4700:90:0:f22e:fbec:5bed:a9b9.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49759 ord08s13-in-x03.1e100.net.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49755 ord30s22-in-x05.1e100.net.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49753 2001:558:feed:443::12.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49752 2600:9000:2350:fa00:8:4923:b2c0:21.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49751 server-65-8-55-1.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49749 52.46.154.73.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49744 2a04:4e42:84::272.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49732 160.35.184.35.bc.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49696 ec2-54-217-83-24.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49695 162.219.225.118.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49618 ec2-54-145-94-22.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49617 ec2-54-145-94-22.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49614 ec2-54-147-76-65.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49600 ec2-54-145-94-22.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49581 2a04:4e42:83::272.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49475 stackoverflow.co.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49390 stackoverflow.co.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49389 ec2-54-175-191-2.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49385 ec2-3-214-160-18.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49381 ec2-34-227-4-145.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49377 ec2-54-175-191-2.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49376 stackoverflow.co.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49364 stackoverflow.co.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49359 2600:9000:2350:4400:7:7859:3840:93a1.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49305 104.192.142.18.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49301 2606:4700:90:0:f22e:fbec:5bed:a9b9.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49274 ga-in-f188.1e100.net.5228 ESTABLISHED\ntcp4 0 0 localhost.epmd localhost.49271 ESTABLISHED\ntcp4 0 0 localhost.49271 localhost.epmd ESTABLISHED\ntcp4 24 0 10.0.0.183.49229 static.59.20.181.https CLOSE_WAIT \ntcp4 0 0 10.0.0.183.49216 ec2-3-225-113-73.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49201 2001:558:feed:443::12.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49197 2001:558:feed:443::12.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49196 2001:558:feed:443::12.https ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.49195 2001:558:feed:443::12.https ESTABLISHED\ntcp4 0 0 10.0.0.183.49178 ec2-3-215-27-40..https ESTABLISHED\ntcp4 0 0 10.0.0.183.49152 ec2-52-44-179-25.https ESTABLISHED\ntcp4 0 0 localhost.epmd localhost.49771 TIME_WAIT \ntcp4 0 0 localhost.49773 localhost.epmd TIME_WAIT \ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.64362 2620:149:a42:903::e.443 ESTABLISHED\ntcp4 0 0 10.0.0.183.49279 17.248.190.204.443 ESTABLISHED\ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.64361 2620:149:a42:903::e.443 TIME_WAIT \ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.64360 2620:149:a42:903::e.443 TIME_WAIT \ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.64359 2620:149:a42:903::e.443 TIME_WAIT \ntcp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.63787 2603:1036:304:2853::2.443 ESTABLISHED\ntcp4 0 0 10.0.0.183.65454 17.57.144.134.5223 ESTABLISHED\nudp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.61814 ord38s33-in-x0e.1e100.net.https \nudp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.61608 ord37s08-in-x03.1e100.net.https \nudp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.58947 ord38s29-in-x03.1e100.net.https \nudp6 0 0 2601:241:8202:54b0:461:1336:75bf:e2af.62043 ord38s28-in-x0a.1e100.net.https \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \nudp4 0 0 *.* *.* \n", "text": "", "username": "Big_Cat_Public_Safety_Act" }, { "code": "logAppend: truelogAppend: false\nbrew services start [email protected]", "text": "To solveThe file is too enormousReplacelogAppend: truewithand then redobrew services start [email protected] the log file, we should get the real reason why it fails. The other error from the log you shared is from 2 days ago. Most likely you had another mongod running at that time.", "username": "steevej" }, { "code": "2022-08-17T10:55:27.046-0500 I CONTROL [main] Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\n2022-08-17T10:55:27.048-0500 W ASIO [main] No TransportLayer configured during NetworkInterface startup\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] MongoDB starting : pid=30240 port=27017 dbpath=/opt/homebrew/var/mongodb 64-bit host=Junlues-MacBook-Pro.local\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] db version v4.2.21\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] git version: b0aeed9445ff41af07449fa757e1f231bce990b3\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] allocator: system\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] modules: none\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] build environment:\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] distarch: x86_64\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] target_arch: x86_64\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] machdep.cpu.extfeatures unavailable\n2022-08-17T10:55:27.049-0500 I CONTROL [initandlisten] options: { config: \"/opt/homebrew/etc/mongod.conf\", net: { bindIp: \"127.0.0.1, ::1\", ipv6: true }, storage: { dbPath: \"/opt/homebrew/var/mongodb\" }, systemLog: { destination: \"file\", logAppend: false, path: \"/opt/homebrew/var/log/mongodb/mongo.log\" } }\n2022-08-17T10:55:27.050-0500 I STORAGE [initandlisten] Detected data files in /opt/homebrew/var/mongodb created by the 'wiredTiger' storage engine, so setting the active storage engine to 'wiredTiger'.\n2022-08-17T10:55:27.050-0500 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=15872M,cache_overflow=(file_max=0M),session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress],\n2022-08-17T10:55:27.166-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:166653][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:166653][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.219-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:219207][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:219207][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.243-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:243834][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:243834][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.278-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:278461][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:278461][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.315-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:315348][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:315348][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.337-0500 W STORAGE [initandlisten] Failed to start up WiredTiger under any compatibility version.\n2022-08-17T10:55:27.337-0500 F STORAGE [initandlisten] Reason: -31802: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.337-0500 F - [initandlisten] Fatal Assertion 28595 at src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp 928\n2022-08-17T10:55:27.337-0500 F - [initandlisten] \\n\\n***aborting after fassert() failure\\n\\n\n\n", "text": "", "username": "Big_Cat_Public_Safety_Act" }, { "code": "2022-08-17T10:55:27.166-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:166653][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:166653][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.219-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:219207][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:219207][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.243-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:243834][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:243834][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.278-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:278461][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:278461][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n2022-08-17T10:55:27.315-0500 E STORAGE [initandlisten] WiredTiger error (-31802) [1660751727:315348][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error Raw: [1660751727:315348][30240:0x20969b600], connection: __log_open_verify, 925: unsupported WiredTiger file version: this build only supports versions up to 4, and the file is version 5: WT_ERROR: non-specific WiredTiger error\n", "text": "It looks like you had a more recent version of mongod using the same data directory.I see 3 ways out:1 - If the data that is there IS IMPORTANT, install and run a 5.x version rather than 4.2or2 - If the data that is there IS IMPORTANT and you MUST run 4.2,or3 - If the data that is there IS NOT IMPORTANT and you must run 4.2", "username": "steevej" }, { "code": "", "text": "I chose solution 3 and it worked.But I am not sure what you mean by data directory", "username": "Big_Cat_Public_Safety_Act" }, { "code": "storage:\n dbPath: /opt/homebrew/var/mongodb\n", "text": "It means the directory where mongod store the data. In your case:", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
On my Mac,`brew services start [email protected]` starts mongod process in unknown port
2022-08-17T13:27:49.412Z
On my Mac,`brew services start [email protected]` starts mongod process in unknown port
8,813
null
[]
[ { "code": "", "text": "I’m just starting out with Realm and device sync so these are quite basic questions.\nI have an existing app that contains models that I want to use to create my collection schemas. So I’m in Developer mode letting the app set up the schemas. This means there are no query fields that are selectable at the server when enabling flexible sync.", "username": "John_Atkins" }, { "code": "", "text": "Hi, you can select fields after the app has created the collections and you can even “add them” before they have been created using dev mode (you just type them in). Either works.Im not totally sure what you are asking in this second part. The queryable field names are just field names “user_id”, “category”, “tags”, etc. They can be present in any/all/none Schemas and that is valid.Thanks,\nTyler", "username": "Tyler_Kaye" }, { "code": "", "text": "Thank you @Tyler_Kaye you’ve actually answered both questions. My second question was down to a misunderstanding so that’s fine.", "username": "John_Atkins" } ]
Two questions about setting up flexible sync
2022-08-16T21:34:00.895Z
Two questions about setting up flexible sync
1,143
null
[ "dot-net" ]
[ { "code": " public async Task<IHttpActionResult> DataStorageDestroyAsync(int companyId, string userEmail)\n {\n Trace.TraceInformation(\"DataStorageAPI > UtilityController > DataStorageDestroyAsync Started.\");\n string retStatus = \"Failed\";\n\n try\n {\n Trace.TraceInformation(\"DataStorageAPI > UtilityController > DataStorageDestroyAsync - companyId:\" + companyId + \";\");\n Trace.TraceInformation(\"DataStorageAPI > UtilityController > DataStorageDestroyAsync - userEmail:\" + userEmail + \";\");\n }\n catch (Exception) { }\n\n try\n {\n string databaseName = \"DataStorage\" + companyId.ToString();\n try\n {\n Trace.TraceInformation(\"DataStorageAPI > UtilityController > DataStorageDestroyAsync - databaseName:\" + databaseName + \";\");\n }\n catch (Exception) { }\n\n if (!string.IsNullOrEmpty(databaseName))\n {\n var connectionString = CryptoUtility.GetDecryptedString(System.Configuration.ConfigurationManager.ConnectionStrings[\"MongoDBContext\"].ConnectionString);\n\n MongoClientSettings settings = MongoClientSettings.FromUrl(\n new MongoUrl(connectionString)\n );\n\n settings.ApplicationName = \"DHC_DataStorage.API.DataStorageDestroy\";\n settings.ConnectTimeout = TimeSpan.FromSeconds(1);\n settings.HeartbeatInterval = TimeSpan.FromSeconds(7);\n settings.HeartbeatTimeout = TimeSpan.FromSeconds(8);\n settings.MaxConnectionIdleTime = TimeSpan.FromSeconds(2);\n settings.MaxConnectionLifeTime = TimeSpan.FromSeconds(3);\n settings.MaxConnectionPoolSize = 10000;\n settings.MinConnectionPoolSize = 1;\n settings.LocalThreshold = TimeSpan.FromMilliseconds(20);\n settings.ServerSelectionTimeout = TimeSpan.FromSeconds(5);\n settings.SocketTimeout = TimeSpan.FromSeconds(4);\n settings.WaitQueueTimeout = TimeSpan.FromSeconds(6);\n\n\n MongoClient mongoClient = new MongoClient(settings);\n\n var db = mongoClient.GetDatabase(databaseName);\n\n //Deleting all users in the database.\n var command = @\"{ dropAllUsersFromDatabase: 1, writeConcern: {w: \"\"majority\"\" }}\";\n await db.RunCommandAsync<BsonDocument>(command);\n\n //Drop a MongoDB database and contained collections.\n CancellationToken cancellationToken = new CancellationToken();\n mongoClient.DropDatabase(databaseName, cancellationToken);\n\n retStatus = \"Success\";\n }\n }\n catch (Exception ex)\n {\n Trace.TraceError(\"DataStorageAPI > UtilityController > DataStorageDestroyAsync Failed. Error:\" + ex.ToString());\n retStatus = \"Failed\";\n }\n\n //********************************************************************************\n //End DataStorageDestroy\n //********************************************************************************\n Trace.TraceInformation(\"DataStorageAPI > UtilityController > DataStorageDestroyAsync Ended.\");\n\n return Ok(retStatus);\n }\n2022-08-10T09:33:39.825+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39127 (3 connections now open)\n2022-08-10T09:33:39.826+0000 I NETWORK [conn39127] received client metadata from 52.153.231.234:43777 conn39127: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:33:39.888+0000 I ACCESS [conn39127] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:34:39.883+0000 I NETWORK [conn39127] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:34:39.930+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39128 (3 connections now open)\n2022-08-10T09:34:39.931+0000 I NETWORK [conn39128] received client metadata from 52.153.231.234:43776 conn39128: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:34:39.996+0000 I ACCESS [conn39128] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:35:39.991+0000 I NETWORK [conn39128] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:35:39.999+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39129 (3 connections now open)\n2022-08-10T09:35:40.000+0000 I NETWORK [conn39129] received client metadata from 52.153.231.234:43777 conn39129: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:35:40.064+0000 I ACCESS [conn39129] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:36:40.054+0000 I NETWORK [conn39129] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:36:40.069+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39130 (3 connections now open)\n2022-08-10T09:36:40.070+0000 I NETWORK [conn39130] received client metadata from 52.153.231.234:43776 conn39130: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:36:40.141+0000 I ACCESS [conn39130] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:37:40.135+0000 I NETWORK [conn39130] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:37:40.151+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39131 (3 connections now open)\n2022-08-10T09:37:40.152+0000 I NETWORK [conn39131] received client metadata from 52.153.231.234:43777 conn39131: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:37:40.217+0000 I ACCESS [conn39131] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:38:40.211+0000 I NETWORK [conn39131] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:38:40.228+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39132 (3 connections now open)\n2022-08-10T09:38:40.228+0000 I NETWORK [conn39132] received client metadata from 52.153.231.234:43776 conn39132: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:38:40.295+0000 I ACCESS [conn39132] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:39:40.287+0000 I NETWORK [conn39132] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:39:40.299+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39133 (3 connections now open)\n2022-08-10T09:39:40.300+0000 I NETWORK [conn39133] received client metadata from 52.153.231.234:43777 conn39133: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:39:40.366+0000 I ACCESS [conn39133] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:40:40.363+0000 I NETWORK [conn39133] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:40:40.381+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39134 (3 connections now open)\n2022-08-10T09:40:40.382+0000 I NETWORK [conn39134] received client metadata from 52.153.231.234:43776 conn39134: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:40:40.449+0000 I ACCESS [conn39134] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:41:40.446+0000 I NETWORK [conn39134] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:41:40.474+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39135 (3 connections now open)\n2022-08-10T09:41:40.474+0000 I NETWORK [conn39135] received client metadata from 52.153.231.234:43777 conn39135: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:41:40.539+0000 I ACCESS [conn39135] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:42:40.551+0000 I NETWORK [conn39135] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:42:40.567+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39136 (3 connections now open)\n2022-08-10T09:42:40.568+0000 I NETWORK [conn39136] received client metadata from 52.153.231.234:43776 conn39136: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:42:40.629+0000 I ACCESS [conn39136] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:43:40.639+0000 I NETWORK [conn39136] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:43:40.653+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39137 (3 connections now open)\n2022-08-10T09:43:40.653+0000 I NETWORK [conn39137] received client metadata from 52.153.231.234:43777 conn39137: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:43:40.714+0000 I ACCESS [conn39137] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:44:40.718+0000 I NETWORK [conn39137] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:44:40.732+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39138 (3 connections now open)\n2022-08-10T09:44:40.733+0000 I NETWORK [conn39138] received client metadata from 52.153.231.234:43776 conn39138: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:44:40.795+0000 I ACCESS [conn39138] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:45:40.816+0000 I NETWORK [conn39138] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:45:40.832+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39139 (3 connections now open)\n2022-08-10T09:45:40.832+0000 I NETWORK [conn39139] received client metadata from 52.153.231.234:43777 conn39139: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:45:40.895+0000 I ACCESS [conn39139] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:46:40.907+0000 I NETWORK [conn39139] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:46:40.927+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39140 (3 connections now open)\n2022-08-10T09:46:40.927+0000 I NETWORK [conn39140] received client metadata from 52.153.231.234:43776 conn39140: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:46:40.990+0000 I ACCESS [conn39140] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:47:40.993+0000 I NETWORK [conn39140] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:47:41.012+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39141 (3 connections now open)\n2022-08-10T09:47:41.017+0000 I NETWORK [conn39141] received client metadata from 52.153.231.234:43777 conn39141: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:47:41.090+0000 I ACCESS [conn39141] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:48:41.098+0000 I NETWORK [conn39141] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:48:41.114+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39142 (3 connections now open)\n2022-08-10T09:48:41.114+0000 I NETWORK [conn39142] received client metadata from 52.153.231.234:43776 conn39142: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:48:41.179+0000 I ACCESS [conn39142] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:49:41.183+0000 I NETWORK [conn39142] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:49:41.198+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39143 (3 connections now open)\n2022-08-10T09:49:41.199+0000 I NETWORK [conn39143] received client metadata from 52.153.231.234:43777 conn39143: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:49:41.263+0000 I ACCESS [conn39143] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:50:41.267+0000 I NETWORK [conn39143] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:50:41.290+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39144 (3 connections now open)\n2022-08-10T09:50:41.291+0000 I NETWORK [conn39144] received client metadata from 52.153.231.234:43776 conn39144: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:50:41.354+0000 I ACCESS [conn39144] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:51:41.348+0000 I NETWORK [conn39144] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:51:41.365+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39145 (3 connections now open)\n2022-08-10T09:51:41.366+0000 I NETWORK [conn39145] received client metadata from 52.153.231.234:43777 conn39145: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:51:41.431+0000 I ACCESS [conn39145] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:52:41.426+0000 I NETWORK [conn39145] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:52:41.443+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39146 (3 connections now open)\n2022-08-10T09:52:41.444+0000 I NETWORK [conn39146] received client metadata from 52.153.231.234:43776 conn39146: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:52:41.509+0000 I ACCESS [conn39146] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:53:41.504+0000 I NETWORK [conn39146] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:53:41.519+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39147 (3 connections now open)\n2022-08-10T09:53:41.520+0000 I NETWORK [conn39147] received client metadata from 52.153.231.234:43777 conn39147: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:53:41.603+0000 I ACCESS [conn39147] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:54:41.609+0000 I NETWORK [conn39147] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:54:41.624+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39148 (3 connections now open)\n2022-08-10T09:54:41.625+0000 I NETWORK [conn39148] received client metadata from 52.153.231.234:43776 conn39148: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:54:41.689+0000 I ACCESS [conn39148] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:55:41.690+0000 I NETWORK [conn39148] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:55:41.709+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39149 (3 connections now open)\n2022-08-10T09:55:41.710+0000 I NETWORK [conn39149] received client metadata from 52.153.231.234:43777 conn39149: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:55:41.783+0000 I ACCESS [conn39149] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:56:41.794+0000 I NETWORK [conn39149] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:56:41.809+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39150 (3 connections now open)\n2022-08-10T09:56:41.810+0000 I NETWORK [conn39150] received client metadata from 52.153.231.234:43776 conn39150: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:56:41.882+0000 I ACCESS [conn39150] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:57:41.878+0000 I NETWORK [conn39150] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:57:41.895+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39151 (3 connections now open)\n2022-08-10T09:57:41.895+0000 I NETWORK [conn39151] received client metadata from 52.153.231.234:43777 conn39151: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:57:41.958+0000 I ACCESS [conn39151] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:58:41.955+0000 I NETWORK [conn39151] end connection 52.153.231.234:43777 (2 connections now open)\n2022-08-10T09:58:41.969+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43776 #39152 (3 connections now open)\n2022-08-10T09:58:41.969+0000 I NETWORK [conn39152] received client metadata from 52.153.231.234:43776 conn39152: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:58:42.036+0000 I ACCESS [conn39152] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T09:59:42.040+0000 I NETWORK [conn39152] end connection 52.153.231.234:43776 (2 connections now open)\n2022-08-10T09:59:42.055+0000 I NETWORK [listener] connection accepted from 52.153.231.234:43777 #39153 (3 connections now open)\n2022-08-10T09:59:42.055+0000 I NETWORK [conn39153] received client metadata from 52.153.231.234:43777 conn39153: { application: { name: \"DHC_DataStorage.API.DataStorageDestroy\" }, driver: { name: \"mongo-csharp-driver\", version: \"2.17.1.0\" }, os: { type: \"Windows\", name: \"Microsoft Windows 10.0.14393\", architecture: \"x86_32\", version: \"10.0.14393\" }, platform: \".NET Framework 4.8.4480.0\" }\n2022-08-10T09:59:42.117+0000 I ACCESS [conn39153] Successfully authenticated as principal emmongoadmin on admin\n2022-08-10T10:00:42.141+0000 I NETWORK [conn39153] end connection 52.153.231.234:43777 (2 connections now open)\ndate\tlevel\tapplicationName\tinstanceId\teventTickCount\teventId\tpid\ttid\tmessage\n2022-08-09T21:47:25\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t84\tDHC_DataStorage > Api > Providers > CustomOAuthProvider > GrantResourceOwnerCredentials numRetries: 0\n2022-08-09T21:47:25\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t84\tDH_Common > Services > ManagementAPIService > ValidateApiClientUser - Start;\n2022-08-09T21:47:25\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t84\tDH_Common > Services > ManagementAPIService > ValidateApiClientUser - 406 - secret:CE7BA5CD-40AF-4D94-A39D-0D183A9F2EE5;\n2022-08-09T21:47:25\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t84\tDH_Common > Services > ManagementAPIService > ValidateApiClientUser - 406 - clientId:AccountAdmin;\n2022-08-09T21:47:25\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t82\tDH_Common > Services > ManagementAPIService > ValidateApiClientUser - Successful;\n2022-08-09T21:47:25\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t7\tDataStorageAPI > UtilityController > DataStorageCreateAsync Started.\n2022-08-09T21:47:25\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t7\tDataStorageAPI > UtilityController > DataStorageCreateAsync - companyId:159;\n2022-08-09T21:47:25\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t7\tDataStorageAPI > UtilityController > DataStorageCreateAsync - userEmail:[email protected];\n2022-08-09T21:47:26\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t9\tDHC_DataStorage > Data > MongoDataContext > Mongo MaxConnectionPoolSize:10000;\n2022-08-09T21:47:26\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t9\tDHC_DataStorage > Data > MongoDataContext > Mongo MinConnectionPoolSize:1;\n2022-08-09T21:47:26\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t27688\t81\tDataStorageAPI > UtilityController > DataStorageCreateAsync Ended.\n2022-08-09T22:03:36\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t10\tDHC_DataStorage > Api > Providers > CustomOAuthProvider > GrantResourceOwnerCredentials numRetries: 0\n2022-08-09T22:03:36\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t10\tDH_Common > Services > ManagementAPIService > ValidateApiClientUser - Start;\n2022-08-09T22:03:36\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t10\tDH_Common > Services > ManagementAPIService > ValidateApiClientUser - 406 - secret:CE7BA5CD-40AF-4D94-A39D-0D183A9F2EE5;\n2022-08-09T22:03:36\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t10\tDH_Common > Services > ManagementAPIService > ValidateApiClientUser - 406 - clientId:AccountAdmin;\n2022-08-09T22:03:36\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t8\tDH_Common > Services > ManagementAPIService > ValidateApiClientUser - Successful;\n2022-08-09T22:03:37\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t5\tDataStorageAPI > UtilityController > DataStorageDestroyAsync Started.\n2022-08-09T22:03:37\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t5\tDataStorageAPI > UtilityController > DataStorageDestroyAsync - companyId:159;\n2022-08-09T22:03:37\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t5\tDataStorageAPI > UtilityController > DataStorageDestroyAsync - userEmail:;\n2022-08-09T22:03:37\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t5\tDataStorageAPI > UtilityController > DataStorageDestroyAsync - databaseName:DataStorage159;\n2022-08-09T22:03:37\tInformation\tQADHCDataStorageAPI\t2.17E+15\t6.37957E+17\t0\t17260\t10\tDataStorageAPI > UtilityController > DataStorageDestroyAsync Ended.\n", "text": "I am fairly new to using MongoDB Community edition and I am hoping someone can tell me that this can be corrected with a simple MongoClientSettings change. What I am seeing in the mongo log file is what looks like a keep alive login once a second that goes on forever until I stop the Web Service in Azure, long after the C#.net code has executed and the garbage collection has happened. My Azure Logging for the application only shows the code executed once. I have tried this with MongoDB Community version 4.0, 4.2 and 5.0.9 and MongoDB.Driver versions 2.6.1 to 2.17.1and it happens on all of them. Below is a copy of the C#.Net code and you can see it is deleting users from a database and then the database however this infinite logging is happening even on simple record fetches as well. I can only assume it is some ClientSetting I am missing to tell the MongoDB.Driver not to try and keep the thread alive past a certain point but, as you can see from the code, I have almost run out of settings to try at this point. Any suggestions are welcome. This has become a major problem in the size of my logs and I am having to cycle my Web Services every night because of this. Thanks in advance for your input.C#.Net CodeA snippet of the MongoDB Log that is filled with continuous Login callsAzure Log for the web service", "username": "steve_mauldin" }, { "code": "", "text": "If you increase logging on the server side you would probably see what this connection was doing while it’s connected to the server, my guess is that it’s probably complying with MongoDB driver spec by monitoring the state of the server/replica set.Asya", "username": "Asya_Kamsky" }, { "code": "MongoClientSettingsMaxConnectionIdleTimeMaxConnectionLifeTimeMinConnectionPoolSizeMaxConnectionIdleTimeMaxConnectionLifeTimeMongoClientSettingsMaxConnectionPoolSizeMongoClientSettings", "text": "Hi, @steve_mauldin,You are modifying your default MongoClientSettings, notably setting MaxConnectionIdleTime to 2 seconds and MaxConnectionLifeTime to 3 seconds as well as MinConnectionPoolSize to 1. The net effect is that each connection pool - of which there will be one per replset cluster member - will maintain a minimum pool size of 1. You have also configured that connections can only remain idle for 2 seconds (MaxConnectionIdleTime) and MUST be reaped after 3 seconds regardless of how much or little they are used (MaxConnectionLifeTime). The net effect of these settings is that you are aggressively reaping connections in the pool, but requiring at least 1 connection in the pool resulting in rapid turnover of connections.It is a bit curious that connections are turning over every second rather than every two seconds, but maybe those logs are from slightly different MongoClientSettings.Settings MaxConnectionPoolSize to 10,000 is also not recommended. If you’re connecting to a 3-member replica set, you could potentially create over 30,000 connections from your app server to the cluster. I would encourage you to leave this value at the default of 100 unless you have a strong reason not to.Overall I would suggest leaving the MongoClientSettings at their default values unless performance and scalability testing indicates better values for your application workload.Sincerely,\nJames", "username": "James_Kovacs" }, { "code": "", "text": "James,Thanks you for the reply. I initially was running with the default settings and was seeing the infinite pinging. What you are seeing on the settings now is about 200+ attempts over 30 days to try and guess how to get the thread to stop login in every minute. I have found little documentation on the different ClientSettings options. I am running aa single instance of community MongoDB. No Replication and No sharding. The connections are once per minute not once per second but over the course of a day the number of connections start to bloat the log file. I have set the Client Settings back to default and I am still seeing a once a minute login even though the calling code and object were long since garbage collected. I am at a loss for how to proceed at this point. Any advice on how to tell the C# MongoDB.Driver to stop the keep alive login ping would be greatly appreciated.", "username": "steve_mauldin" }, { "code": "", "text": "James,I reread your post a second time and have reset EVERYTHING back to the defaults and the issue went away. The MinConnectionPoolSize=1 is what was causing what looked like an infinite keep-alive because the driver was maintaining at the minimum one thread alive even after my program had trash collected the MongoClient object. I was thinking, incorrectly, that once the MongoClient object got trash collected that is would no longer exist in memory but that is not the case for the Mongo.Driver at least on Azure. Only stopping and restarting the App Service would stop the Mongo.Driver from keeping that thread alive. The reason I had set MinConnectionPoolSize to 1 was after reading posts about improving your Mongo performance. We were having some throughput issues early on and that is also where the MaxConnectionPoolSize=10000 came from was our load testing very large datasets into our MongoDB. We have since created a front end queuing system and only allow a limited number of connections at a time for the queuing system and MaxConnectionPoolSize just never got set back to 100. Thanks again for your reply.", "username": "steve_mauldin" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Infinite number of logins being generated to the MongoDB from a single call from a C#.net MongoDB.Driver version 2.17.1 application
2022-08-10T15:07:01.512Z
Infinite number of logins being generated to the MongoDB from a single call from a C#.net MongoDB.Driver version 2.17.1 application
2,324
null
[ "configuration" ]
[ { "code": "", "text": "Hi, i am trying to enable ipv6 like in this setting: https://www.mongodb.com/docs/v5.0/reference/configuration-options/#mongodb-setting-net.ipv6\nbut i can’t figure out a way to access the configuration file in a Atlas database.\nHow can i access the file in a AWS / M0 cluster?", "username": "Enrico" }, { "code": "", "text": "How can i access the file in a AWS / M0 cluster?You simply cannot do that.Atlas clusters are configured with the Atlas web interface and an API. But for shared M0 you cannot change the configuration.", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Edit Configuration File in Atlas
2022-08-17T14:33:20.755Z
Edit Configuration File in Atlas
2,420
null
[ "swift" ]
[ { "code": "partitionValue.environment(\\.partitionValue, newValue)already opened with different partitionValueAsyncOpen@AsyncOpen", "text": "hey everyone, I am currently working with realm sync and use partition based app service.on my main screen I have x items with different partitionValue. when I select an item I am changing .environment(\\.partitionValue, newValue). this is working for the first start.if I go back to this list and select the same item again, I am able to open the view again, but my sync is disconnecting.if I select a different one, my app is crashing and saying already opened with different partitionValue1.st question: is there a way to not disconnect from sync when I am opening again the same AsyncOpen view?2.nd question: how am I allowed to remove the partition to setup a different one for my view with @AsyncOpen", "username": "Alexander_Puchta" }, { "code": "", "text": "Can you clarify what you mean by ‘sync is disconnecting’? Realm data is always stored locally first and then Realm sync’s it to the server at a later time - the sync connecting, sync’ing and disconnecting is a normal process.On the partition part of the question; if you need to change partitions, simply set your results to nil (if you store them as a class var), and then async open with the new partition and load the new results.", "username": "Jay" }, { "code": "2022-08-17 09:38:53.436796+0200 XYZ[xxxxx: xxxxx] Sync: Connected to endpoint 'ip.address' (from 'ip.address')\n@AsyncOpen2022-08-17 09:38:59.890651+0200 XYZ[xxxxx: xxxxx] Sync: Connection[5]: Disconnected\[email protected](\\.partitionValue, value)", "text": "i am connecting to my sync database and open partition value “xyz”after I leave my view with @AsyncOpen wrapper, and go back to the same partition, this is what I receive:reagarding my point to switch partition, i don’t store any realm locally, we just use @AsyncOpen and set .environment(\\.partitionValue, value)", "username": "Alexander_Puchta" }, { "code": "", "text": "Realm is an offline first database.Realm Database is offline-first. You always read from and write to the local database, not over the network. When Device Sync is enabled, Realm Database synchronizes data with App Services over the network in a background thread. The sync protocol resolves conflicts consistently on each client and in the linked Atlas cluster.Are you perhaps accessing Atlas directly using the Swift Driver?", "username": "Jay" }, { "code": "app.login(credentials:)@AsyncOpenpartitionValuexyzyzaalready opened with another partition@AsyncOpen", "text": "ok thats right, but I am a bit confused why its disconnecting when we select it for the 2nd time - on 1st selection, its not disconnecting (seems to me that there is something wrong)not sure what you mean with swift driver?I am only logging in on app start app.login(credentials:) and after I am logged in I am selecting my partition.\nthere is a view which has @AsyncOpen property and I am providing to that view my partitionValue.but like already explained, once I want to switch from partition xyz to partition yza its crashing with error already opened with another partition - but I don’t have any realm property anywhere.am I forced to use realm instances in a manager or something like that and provide different realms to my views? I was thinking that the @AsyncOpen wrapper will manage this for me.", "username": "Alexander_Puchta" }, { "code": "let config = user.configuration(partitionValue: whichPartition)\nRealm.asyncOpen(configuration: config) { result in\n switch result {\n case .failure(let error):\n print(\"Failed to open realm: \\(error.localizedDescription)\")\n case .success(let realm):\n print(\"Successfully opened realm: \\(realm)\")\n //do something with realm - maybe query for tasks and store them in a class var\n }\n}\nself.taskResults = nilwhichPartition", "text": "You don’t have control over connecting and disconnecting - that’s not how Realm works; it does all of that for you. Your focus is to write the data to Realm and then it does the heavy lifting in the background when it syncs.AsyncOpen is used to ‘tell’ Realm you’re using Sync, instead of only writing locally. It establishes the initial connection and provides a status so you can take action while it’s connecting - progress bars, user auth and then to let you know it’s ready for use.not sure what you mean with swift driver?There is an option to read/write directly to Atlas from Swift (not using Realm) but I don’t think that’s what you’re doing.In general, the async function would look something like thiswhen switching partitions, if there’s a class var holding our realm data we nil itself.taskResults = niland then pass a new partition to the above code whichPartition which then connects to the new partition. That process works well.Perhaps your process for switching partitions needs to be investigated.", "username": "Jay" } ]
@AsyncOpen + partition value
2022-08-16T12:12:51.212Z
@AsyncOpen + partition value
1,581
null
[]
[ { "code": "", "text": "Hi Team,we have 3 node mongodb cluster. Recently we had an issue where one of the mongodb node did not take connections causing intermittent issues to the application.\nReviewing the logs, we observed below errors.socket errors for 9001 for multiple app instances.\n2022-08-05T16:42:53.664+0000 [conn137308] SocketException handling request, closing client connection: 9001 socket exception [SEND_ERROR] serverconnection to other two nodes failed.\n2022-08-05T17:07:25.455+0000 [rsHealthPoll] couldn’t connect to prod-cirrusmongo-eu05: couldn’t connect to server prod-cirrusmongo-eu05-xxxxx27017 failed, connection attempt failed2022-08-05T17:07:25.455+0000 [rsHealthPoll] warning: Failed to connect to 10.0.3.21:27017, reason: errno:106 Transport endpoint is already connectedsyslog is showing below error.\n[LIVE] root@prod-cirrusmongo-eu06 [/var/log]$ zcat syslog.5.gz | grep mongodb\nAug 5 16:42:52 prod-cirrusmongo-eu06 mongodb_exporter[1760]: E0805 16:42:52.923749 1760 connection.go:48] Cannot connect to server using url mongodb://localhost:27017: no reachable serversThe issue is finally resolved after restarting mongod service on the instance and taking connections now.\nThe node was primary earlier after restart it became secondary.I would like to know the root cause of above errors to avoid the issue again.Please suggest,", "username": "Abhinav_Avanisa" }, { "code": "", "text": "I have the same issue. I have also stored the log. I get“Error sending response to client. Ending connection from remote”errmsg: “Broken pipe”“code”:9001,“codeName”:“SocketException”And I don’t know how to fix it.", "username": "Anton_Tonchev" } ]
SocketException handling request, closing client connection: 9001 socket exception [SEND_ERROR] server
2022-08-10T17:10:54.384Z
SocketException handling request, closing client connection: 9001 socket exception [SEND_ERROR] server
2,329
null
[ "flutter" ]
[ { "code": "flutter run -d <some-device>", "text": "I have a Flutter application that uses a pre-populated Realm database. Running flutter run -d <some-device> only install the app without the pre-populated database. How can I install the pre-populated Realm database into a mobile device together with the Flutter app?", "username": "Tembo_Nyati" }, { "code": "var config = Configuration.local([Dog.schema], initialDataCallback: (realm) {\n realm.add(Dog('fido'));\n });\n", "text": "Hi Tembo_Nyati,\nyou can try to add the realm file to the Flutter app assets. But it is also possible to use initialDataCallback to populate your initial data once the Realm is opened for the first time.I hope it will help.", "username": "Desislava_Stefanova" }, { "code": "", "text": "Cross post over to SOThe answer above mentions the same solution as on StackOverflow; the Realm file needs to be included in your project assets - we may be more help if we knew about your development environment. XCode? Android Studio? Something else?", "username": "Jay" }, { "code": "", "text": "@Jay, I’m on Windows 11 x64, Flutter 3.0.5, Realm 0.3.1+beta, Visual Studio Code with Flutter and Dart extensions.", "username": "Tembo_Nyati" }, { "code": "realm:masterrealm:flutter_bundle_realm", "text": "Hi @Tembo_Nyati,\nI prepared a Flutter app that demonstrates bundling a pre-polluted realm in Flutter assets.Opened because of Community request: [how-can-i-bundle-a-pre-populated-realm-dat…abase-in-a-flutter-app](https://www.mongodb.com/community/forums/t/how-can-i-bundle-a-pre-populated-realm-database-in-a-flutter-app/180290)\nYou can find out some other approach also.\nI’m sorry about the delay.\nHave a nice week!", "username": "Desislava_Stefanova" } ]
How can I bundle a pre-populated Realm database in a Flutter app?
2022-08-12T05:29:12.457Z
How can I bundle a pre-populated Realm database in a Flutter app?
3,035
null
[ "python", "schema-validation" ]
[ { "code": "from pymongo import MongoClient\nfrom collections import OrderedDict\nimport sys\n\ndef create_collection(coll_name):\n db = MongoClient('mongodb://localhost:27017/')['dbname']\n result = db.create_collection(coll_name, validator={\n '$jsonSchema': {\n 'bsonType': 'object',\n 'additionalProperties': True,\n 'required': ['component', 'path'],\n 'properties': {\n 'component': {\n 'bsonType': 'string'\n },\n 'path': {\n 'bsonType': 'string',\n 'description': 'Set to default value'\n }\n \n \n }\n }\n })\n \n print(result)\n\nif __name__ == '__main__':\n create_collection('collectionname')\n \ndb.dbname.insertOne({name: \"Jason\", status:\"updated\"})\n", "text": "I want to add objects that I don’t define in the schema to my database.What if I wanted to add :I know ı can add this to schema but is there a way to do this with python and pymongo. How can ı add name and status to my schema using python and pymongo.", "username": "Furkan_Arslan" }, { "code": "", "text": "Altering a collection’s validator does not seem to be supported by any public API.", "username": "Jack_Woehr" }, { "code": "", "text": "Do you know about Javascript? Just like python, it is not a strongly-typed language, and its object format allows to add-remove properties and methods to object freely. JSON is the serialized version of this object system, and you can do the same with it: add anything you want.MongoDB follows this JS/JSON freedom extending some more types and saving the file as binary making the BSON type.adding the types and schemas to this freedom is mainly for other languages’ expectations to have a defined shape like from a relational database. you do not need them unless you want strict rules.so your schema here in python, you can say, is extracting data in a defined shape and easily validating data with that shape.you can at any time start using schemaless free-style for reading/inserting/removing data that does not conform to a shape, less or more data. Although not difficult in python, it will be your responsibility to validate and convert data without a schema.as for using this in python with pymongo, just know that the method names follow snake case, where words are connected with underscore: “insertOne” becomes “insert_one”. check pymongo documentation for more verbs.", "username": "Yilmaz_Durmaz" }, { "code": "validationLeveladditionalProperties:trueadditionalProperties:truebypassDocumentValidationbypass_document_validationvalidatorcollMod", "text": "Welcome to the MongoDB community @Furkan_Arslan!Schema validation is optional in MongoDB and you can choose a validationLevel for how strictly documents are validated.However, in your example validation you have set additionalProperties:true so should already be able to add fields that are not part of your declared schema validation. Fields names/paths that match your schema rules will still be validated, but additionalProperties:true does not validate properties that aren’t listed.If you do have strict validation enabled, there is also the option for database users with the bypassDocumentValidation permission to bypass validation per operation. In PyMongo this can be set with a bypass_document_validation boolean parameter for supported operations.Altering a collection’s validator@Jack_Woehr You can add or update the JSON Schema validator for an existing collection using the collMod command: Add Document Validation to an Existing Collection. Validator updates only apply to future inserts or updates (existing documents are unaffected).Regards,\nStennie", "username": "Stennie_X" }, { "code": "validatorcollMod", "text": "@Jack_Woehr You can add or update the JSON Schema validator for an existing collection using the collMod command: Add Document Validation to an Existing Collection.True dat, @Stennie_X , but I didn’t find an exposed API to do this in pymongo, which was the original question.", "username": "Jack_Woehr" }, { "code": "additionalPropertiescollModdb.command()", "text": "Hi @Jack_Woehr,Modifying the validator wouldn’t be needed for the original question because it already allows additionalProperties.However, the collMod command can be invoked from PyMongo using db.command() similar to this example: collMod validator usage in python - #2 by Mark_SmithRegards,\nStennie", "username": "Stennie_X" }, { "code": "collModdb.command()", "text": "However, the collMod command can be invoked from PyMongo using db.command() similar to this example: collMod validator usage in python - #2 by Mark_SmithTIL !! Thanks, @Stennie_X", "username": "Jack_Woehr" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Is there a way to add objects that we do not define in the schema to the database using pymongo?
2022-08-15T14:25:58.410Z
Is there a way to add objects that we do not define in the schema to the database using pymongo?
4,464
https://www.mongodb.com/…e_2_1024x512.png
[]
[ { "code": "type=PROCTITLE msg=audit(1660560763.000:4626): proctitle=2F7573722F62696E2F6D6F6E676F64002D66002F6574632F6D6F6E676F642E636F6E66\ntype=PATH msg=audit(1660560763.000:4626): item=0 name=\"/proc/sys/fs/binfmt_misc\" nametype=UNKNOWN cap_fp=0 cap_fi=0 cap_fe=0 cap_fver=0 cap_frootid=0\ntype=CWD msg=audit(1660560763.000:4626): cwd=\"/\"\ntype=SYSCALL msg=audit(1660560763.000:4626): arch=c000003e syscall=137 success=no exit=-13 a0=55c721f04d00 a1=7f0f3c107000 a2=7f0f3c1071c0 a3=0 items=1 ppid=1 pid=3065 auid=4294967295 uid=993 gid=990 euid=993 suid=993 fsuid=993 egid=990 sgid=990 fsgid=990 tty=(none) ses=4294967295 comm=\"ftdc\" exe=\"/usr/bin/mongod\" subj=system_u:system_r:mongod_t:s0 key=(null)\ntype=AVC msg=audit(1660560763.000:4626): avc: denied { search } for pid=3065 comm=\"ftdc\" name=\"fs\" dev=\"proc\" ino=315 scontext=system_u:system_r:mongod_t:s0 tcontext=system_u:object_r:sysctl_fs_t:s0 tclass=dir permissive=0\nallow mongod_t sysctl_fs_t:dir search;\n", "text": "Hi.We are testing an upgrade of MongoDB 5.0 to MongoDB 6.0 on Rocky Linux 8, using the revised SELinux instructions:Having removed the old policies and installed the new policies from mongodb-selinux, we are getting one denial:I can’t see anything in mongodb-selinux that grants:like the old “mongodb_proc_net” policy used to.Should this be included, or do we need to manually add a policy ourselves?Thanks in advance.", "username": "INVADE_International" }, { "code": "", "text": "For info, I have raised the following bug:\nhttps://jira.mongodb.org/browse/SERVER-68892", "username": "INVADE_International" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoDB 6.0 and SELinux
2022-08-15T11:05:27.665Z
MongoDB 6.0 and SELinux
2,295
null
[ "data-modeling", "many-to-many-relationship" ]
[ { "code": "Linked tablesmany-to-manymany-to-manyDBRefsStudentsCoursesconst StudentSchema = new Schema({\n name: String,\n courses: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Course' }]\n});\n\nconst CoursesSchema = new Schema({\n name: String,\n studends: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Student' }]\n});\nstudentscourseslinked tablescollectionlinked collection", "text": "Hi there,I’m new to NoSQL databases and MongoDB and have some troubles understanding some core schema-design principles, despite the very well structured MongoDB’s docs and similar answers all around the i-net.In SQL world, there are Linked tables, mostly used as many-to-many references between other tables, in MongoDB, I didn’t find any doc entry or tutorial equivalent to them. The only many-to-many solution I found is to link each connected tables to each other via DBRefs:\nSo, for example if I have Students and Courses, I will do(using mongoose):And for me it’s a very bad schema-design, because it’s not scalable for the reason the array of students and courses can grow very large(student-courses was just an example). So from scalability perspective, it’s bad.Are there any alternatives to that?\nWhat if I’ll use a linked tables (collection) to connect between the two, how should I implement it on the application level: should I first make a call to the linked collection, await it and only then perform another query? Can I do it in one go(make one request to the DB server), because awaiting for the first response on the application level can take time?Hopefully, you could assist with the above concerns of mine.\nThanks", "username": "John_D1" }, { "code": "{\n _id: <number>,\n name: : <string>,\n otherDetails: { ... },\n courses: [\n { courseId: <number>, courseName: <string> },\n { courseId: <number>, courseName: <string> },\n ...\n ]\n} \n{\n _id: <number>,\n name: <string>,\n description: <string>,\n otherDetails: { ... }\n}\ndb.students.find( { name: \"John Doe\" }, { courses: 1, name: 1 } )db.students.find( { \"courses.courseName\": \"Database Design\" } )coursestudentstudentcourses{ courseId: <number>, courseName: <string> }courseIdcourseIdcourses$lookup", "text": "Hello @John_D1, welcome to the MongoDB Community forum!I have some information for you and hope it is useful. I will assume there are a few thousand students and few hundred courses, and I could model something like this:student:course:Now, your application has some queries. To start with some queries I can think about is, get all students in a particular course and get all courses for a particular student. These are simple queries.To get all courses for a specific student, the query would be:db.students.find( { name: \"John Doe\" }, { courses: 1, name: 1 } )To get all students enrolled for a specific course, your query can be like this:db.students.find( { \"courses.courseName\": \"Database Design\" } )Note that the data related to the course is duplicated - the course name and id are present in both collections. You can also note that these details will not change often (or may not change at all). This is feasible, though you have the same data in both collections. And, this also makes querying simpler.Some important aspects that influence database design are the amount of data in each entity, the size of each document, the relationships between entities, the application functionality (note that all this is part of an app, and the app has various functions, like displaying certain data in browser window, for example), and the other queries you write for the application (the CRUD operations).The “linked table” is not needed in the MongoDB data design - which allows denormalized data through embedding or referencing. Based upon the kind of queries you write you can have the course referenced within the student or vice-versa.In the above student entity, the courses field is an array of { courseId: <number>, courseName: <string> } embedded documents. You can also include more course details in this embedded document, or just the courseId field. If you have just the courseId in the courses array field, then you will have to use an Aggregation Query with $lookup to get more details about the courses.", "username": "Prasad_Saya" }, { "code": "CoursesStudentslinked table", "text": "Thanks @Prasad_Saya for such a detailed and quick reply.The thing is, Courses and Students was just an example and the point was to properly address the issue of many-to-many relationship between two(or more) collections in large, scalable systems.Just imagine the amount of courses per student, but also students per courses can be very, very high. Holding such a huge data in each document will lead inevitably to serious performance issues.In SQL world, this is pretty common scenario and you can make a linked table and query whatever you need in one go.MongoDB made for scale, as far as I read, I honestly can’t believe the very common scenario isn’t address properly yet. Or perhaps I’m mistaken? Please prove me wrong.", "username": "John_D1" }, { "code": "", "text": "Hello @John_D1, you need to provide some realistic data and application to discuss what and how this can be designed. Real data and real applications have many attributes and relationships and requirements - that is where the data modeling as a process can help.", "username": "Prasad_Saya" }, { "code": "ProductsCustomersProductsCustomers", "text": "@Prasad_Saya, say you have Products and Customers. There are millions of Products and there’s millions of Customers.", "username": "John_D1" }, { "code": "ProductsCustomersProductsCustomers", "text": "say you have Products and Customers . There are millions of Products and there’s millions of Customers .@John_D1, it is a very broad and vague description of a problem. What is it you are doing (or want to do) with those products and customers? What is the nature of their attributes?Designing, again, requires a lot of information; thorough investigation of various aspects. I think you can start with information I had already posted, and build on that. All kinds of “large amounts” have some solution - but they are not on their own - lot of “other factors” influence the design in such cases.", "username": "Prasad_Saya" }, { "code": "", "text": "A post was split to a new topic: Is there any method for modifications like “OnCascade” in SQL?", "username": "Stennie_X" }, { "code": "", "text": "@John_D1 Have you found any solution for this issue?The document increased dramatically. Any other way to create pivit table and access the data in a single query?", "username": "MUHAMMAD_ILYAS" }, { "code": "", "text": "Let me give an example.\nLet’s say there are social media posts and let there be users.\nNow each user can like tons of posts and each post can be liked by thousands of users.\nSo how am I supposed to model this many to many relationship in mongodb ?", "username": "Satya_Narayan_Goswami" } ]
Many to many relationship and linked table/collection
2021-11-01T08:07:21.096Z
Many to many relationship and linked table/collection
29,087
https://www.mongodb.com/…e_2_1024x512.png
[]
[ { "code": "Command 'mongo' not found, but can be installed with:\nsudo apt install mongodb-clients\n", "text": "I used the following link to install mongodb on my system Ubuntu 20.04 LTS focal.After installation, I ran “mongo” commad and got the following:I checked this “whereis mongo”. And the result was empty. I also noticed that the folder “mongo” is missing in /usr/bin.Please help. Have already wasted 2 hour in this. How do I fix this?", "username": "SAURAV_KUMAR" }, { "code": "can be installed with:\nsudo apt install mongodb-clients\n", "text": "The command mongo, in principal,I wrote in principal because mongo has been replaced by mongosh, so the command above probably install mongosh rather than mongo.", "username": "steevej" }, { "code": "mongomongoshmongomongosh", "text": "As steevej states, the mongo tool is not included in the download of MongoDB 6.0. You will want to download the mongosh shell now if you wish to interface with the database from the command line. For the most part anything that you could do in the older mongo command line can be done in the newer mongosh tool.", "username": "Doug_Duncan" }, { "code": "", "text": "Got it. Thanks for the help. Used mongosh.", "username": "SAURAV_KUMAR" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" }, { "code": "mongodb-mongoshmongodb-orgmongodb-mongosh", "text": "Hi folks,In addition to the Download Center option mentioned by @Doug_Duncan, there is a mongodb-mongosh package that can be installed per Install MongoDB Community Edition on Ubuntu.If you installed the default mongodb-org Ubuntu metapackage for MongoDB 5.0+ (per the installation steps above), I believe mongodb-mongosh should be included by default.Regards,\nStennie", "username": "Stennie_X" } ]
Installation issue for mongodb 6
2022-08-09T16:56:51.197Z
Installation issue for mongodb 6
4,492
null
[]
[ { "code": "", "text": "i have been facing the same problem, mongo.exe file is missing in bin folder, not getting any way to find the solution to this problem", "username": "ketan_k" }, { "code": "mongomongoshmongosh", "text": "Welcome to the MongoDB community @ketan_k !Can you provide some more details on your environment:If you are installing MongoDB 6.0 or newer, the legacy mongo shell is no longer included. Depending on your O/S and install options you may have mongosh (the new MongoDB shell) and/or MongoDB Compass (an admin GUI including an embedded version of mongosh).You can also Download MongoDB Tools from the MongoDB Download Centre if they are not already installed.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Similar issue:", "username": "SAURAV_KUMAR" }, { "code": "*PS C:\\Program Files\\MongoDB\\Server\\6.0\\bin> ls*\n*Directory: C:\\Program Files\\MongoDB\\Server\\6.0\\bin*\n\n*Mode LastWriteTime Length Name*\n*---- ------------- ------ ----*\n*-a---- 05-07-2022 18:40 1558 InstallCompass.ps1*\n*-a---- 12-08-2022 13:41 605 mongod.cfg*\n*-a---- 05-07-2022 20:15 55887872 mongod.exe*\n*-a---- 05-07-2022 20:15 832090112 mongod.pdb*\n*-a---- 05-07-2022 20:22 34573824 mongos.exe*\n*-a---- 05-07-2022 20:22 464392192 mongos.pdb*\n", "text": "Same issue here, using VS Code and wants to connect to the mongo shell. Unable to find mongosh.exe in version 6.0. These are files I could find in the “bin” folder:", "username": "DEEPAK_KUMAR12" }, { "code": "InstallCompass.ps1mongoshmongomongodb-mongosh", "text": "Hi @DEEPAK_KUMAR12,You can open the InstallCompass.ps1 PowerShell script to install MongoDB Compass (which includes an embedded version of mongosh, the new MongoDB Shell) or download the MongoDB Shell from the the MongoDB Download Center: MongoDB Shell Download | MongoDBSimilar issue:@SAURAV_KUMAR The referenced issue is a similar problem (the legacy mongo shell is no longer included with MongoDB 6.0) for a different O/S environment. Per Install MongoDB Community Edition on Ubuntu, there is a mongodb-mongosh Ubuntu package for the new MongoDB shell.Regards,\nStennie", "username": "Stennie_X" } ]
Mongo.exe file is missing in bin folder
2022-08-11T07:21:03.735Z
Mongo.exe file is missing in bin folder
33,387
null
[ "replication", "indexes" ]
[ { "code": "", "text": "Hi Guys.With Topic Subject, U might see there are similar topic already exists but all those existing topics didn’t have the topic related information at all. Hence I’m creating new topic.Question : In the Replica set of 3 nodes (1 primary, 1 secondary and 1 Arbiter), Is it good Idea to have different set of indexes for Primary and Secondary node. ?In my replica set, its more like Primary will always remain as primary and so as the secondary. So I was thinking to create all write related indexes on primary node and same write related indexes + additional read related indexes on secondary node (as My reporting related queries always run on secondary).Is this good approach ? Will this have any impact on the performance on entire replica set that will eventually introduce to latency on the primary nodeThanks in advance", "username": "Dilip_D" }, { "code": "Secondary", "text": "HI Dilip, while you could do that, since you’ve only got a single secondary it would be promoted to primary state should something happen to the current primary.As for having your reporting only querying the secondary node, I’m assuming you’ve set the read preference to Secondary which means that there will be issues should your primary fail and the current secondary gets promoted. At that time you have no secondary to serve those reads.I would only recommend having different indexes on a cluster that has a hidden secondary member. Since it’s hidden then it can never become the primary and differing indexes are of less concern in that case. However, since you’re in a PSA set up you cannot do this. Is there a reason for going with PSA?", "username": "Doug_Duncan" }, { "code": "", "text": "Hi Doug.Thanks for the response.I understand, In PSA, Different Indexing between the primary and secondary would impact in case of Fail-over switch. We actually had a plan of having Primary, Secondary and Read-only node, but with budget constraint, we got into PSA architecture.However, You answered my question. If we get an additional Read-Only node which will never be involved in the primary election, then we can create reporting specific indexes on the Read-Only node which will not have any impact during primary secondary switch.Thank you for your time on providing this clarity.", "username": "Dilip_D" }, { "code": "secondary", "text": "If we get an additional Read-Only node which will never be involved in the primary election, then we can create reporting specific indexes on the Read-Only node which will not have any impact during primary secondary switch.Hi @Dilip_D,If you want a more robust set up, I’d strongly recommend replacing the arbiter with a hidden secondary that can be used to address your reporting requirements and provide data redundancy in the event one of your data bearing replica set members is unavailable.Arbiters add significant operational caveats per Replica set with 3 DB Nodes and 1 Arbiter - #8 by Stennie.I would also evaluate whether distinct indexes on a hidden secondary are actually required for your current usage. The hidden secondary introduces a single point of failure for your analytics queries.Starting with a standard Primary-Secondary-Secondary (PSS) replica set with your analytics queries using secondary read preferences would provide fault tolerance for your primary and for your data analytics queries. You can always evolve your deployment resources to suit your workload requirements.In my replica set, its more like Primary will always remain as primary and so as the secondary.If you definitely do not want any fault tolerance (i.e. your secondary can never be elected as a primary) and you also want unique indexes on a secondary for reporting, you could also deploy an atypical Primary-Hidden replica set. Setting up zero fault tolerance is not a recommended configuration as the typical goals of a replica set are to support data redundancy and high availability. However, in this scenario you would only have a single member eligible to be primary and have another member dedicated for reporting purposes.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Replica Nodes with Different Indexes
2022-08-12T16:26:22.653Z
Replica Nodes with Different Indexes
2,363
null
[ "aggregation", "transactions" ]
[ { "code": "\"TaskId\" : 1234Status\": \"Processed\".", "text": "Hi Team,I’m using mongo 4.2, trying to perform an update using aggregation pipeline. Mongo transaction management is in place to roll back the updates for failure scenarios.For Mongo transactions, it is recommended to limit the number of documents to 1000 and for documents greater than the limit, developer is expected to split the document in batches.In my case, I run an update using a generic field. Example: Update all documents with \"TaskId\" : 1234 with a new field \"Status\": \"Processed\".\nThere can be “N” documents with TaskId as 1234, at times below 1000 and at times greater than 1000. In such case, is it recommended to use mongo transactions, If yes, how can I split them as batches and run an update within transaction management.", "username": "Laks" }, { "code": "\"TaskId\" : 1234Status\": \"Processed\".", "text": "Hello @Laks ,I notice you haven’t had a response to this topic yet - were you able to find a solution?\nIf not, could you confirm if my understanding for your use-case is correct?Example: Update all documents with \"TaskId\" : 1234 with a new field \" Status\": \"Processed\".\nThere can be “N” documents with TaskId as 1234, at times below 1000 and at times greater than 1000. In such case, is it recommended to use mongo transactions, If yes, how can I split them as batches and run an update within transaction management.I think you are referring to this post of best practices for transactions in MongoDB. This article explains the recommended best practices for transactions. However, please note that those are separate from the actual operational limitations of transactions as described in this documentation page of operational limits of Multi-document Transactions. Note that best practices may change from version to version due to improvements in MongoDB. However, keeping document modifications less than 1000 per transaction is still recommended, as far as I know. Having said that, have you tried running transactions in which more than 1000 document modifications were involved and did you see any errors/issues?Coming back to your use case, I understand that sometimes if TaskId cannot be split into less than 1000 documents, are there any other fields that could be used in conjunction with TaskId (by acting as a secondary key)? For example: date, other ID sequences, names, etc. so that you can always split a transaction batch in a sub-TaskId level?If you need more information, please provide an example document.Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
1000 transactions per batch for Mongo Transaction Management
2022-07-28T10:01:36.159Z
1000 transactions per batch for Mongo Transaction Management
2,700
null
[]
[ { "code": "", "text": "Hi all,I have a collection with status field (Pending,Completed,Failed) & createdAt field.\nI want to delete records that were created more than a month and status is not equal to “Completed”Is there a way to do that?\nIt seem partialFilterExpression doesn’t support thatThanks", "username": "Guy" }, { "code": "{createdAt:{$lt:<some-date>}, status:{$ne:\"Completed\"}}$instatus:{$in:[\"Pending\", \"Failed\"]}{$ne:\"Completed\"}$in$or", "text": "partialFilterExpressionPartial filter expression is about indexes, when you want to index only a subset of your collection. You are asking about deleting records, that would be based on a query like {createdAt:{$lt:<some-date>}, status:{$ne:\"Completed\"}}Are you maybe thinking about creating a partial TTL index to automatically do these deletions? You could create a partial index but it doesn’t support dynamic date expressions (more than a month) and you’re correct that it won’t allow negation in the partialFilterExpression. In the subject you mention $in - is that because for you status:{$in:[\"Pending\", \"Failed\"]} is equivalent to {$ne:\"Completed\"}?Good news is that partial indexes now support $in and $or - the docs haven’t been updated yet but you can see it here: https://jira.mongodb.org/browse/SERVER-59508Asya", "username": "Asya_Kamsky" }, { "code": "", "text": "Great new, this is exactly the feature I needed. Thank you for your quick and detailed response", "username": "Guy" }, { "code": "", "text": "When 5.1 with [quote=“Asya_Kamsky, post:2, topic:180823”]\nhttps://jira.mongodb.org/browse/SERVER-59508\n[/quote] will be released?Does “Support $or, $in and multi-level expressions in partial indexes” feature is already implemented in mondogdb v6?", "username": "Guy" }, { "code": "", "text": "Yep, this is already out as part of version 6.0.", "username": "Asya_Kamsky" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
partialFilterExpression alternative to $in
2022-08-15T18:11:33.236Z
partialFilterExpression alternative to $in
2,057
null
[ "golang", "change-streams" ]
[ { "code": "func (cs *ChangeStream) TryNext(ctx context.Context) bool {\n\treturn cs.next(ctx, true)\n}\nfunc (cs *ChangeStream) loopNext(ctx context.Context, nonBlocking bool) {\n\tfor {\n\t\tif cs.cursor == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif cs.cursor.Next(ctx) {\n\t\t\t// non-empty batch returned\n\t\t\tcs.batch, cs.err = cs.cursor.Batch().Documents()\n\t\t\treturn\n\t\t}\n", "text": "I am using TryNext to fetch documents as they are available in change stream feed. From the API description, if there are no documents available it should return immediately. I added latency trace around tryNext call and I am observing close to 1000ms latency if no documents are available. I looked at the go code for tryNext callit calls next with non-blocking set to true. After following the code loopNext call makes a Next call on cursor which is a blocking callThis can potentially block the call. Is there any alternative available to the non-blocking TryNext?", "username": "Amol" }, { "code": "TryNextChangeStreamnonBlockingloopNextChangeStreamTryNext", "text": "Hey @Amol thanks for the question and sorry about the slow response! You’re correct that calling TryNext on a ChangeStream with no available local documents does always reach out to the database to check for more documents.The nonBlocking parameter in the underlying call to loopNext is unfortunately a moderate misnomer and only indicates that the call doesn’t block indefinitely until a document is available, but does make a network call to the database. There is currently no way to check if more ChangeStream documents are available without potentially making a network call. If no local documents are available, calling TryNext will always make a network call.", "username": "Matt_Dale" } ]
Changestream TryNext is blocking when no documents are available
2022-07-28T18:13:00.210Z
Changestream TryNext is blocking when no documents are available
2,340
null
[]
[ { "code": "", "text": "Hi, I created a simple GUI tool for building MongoDB URI https://manage.mingo.io/tool/mongodb-uri-builderI would love to hear your feedback on it!", "username": "David_Durika" }, { "code": "", "text": "Very cool! I am new to Mongodb and already see a lot of use for this", "username": "Tim_Belvin" } ]
MongoDB URI builder
2022-08-12T03:45:55.403Z
MongoDB URI builder
1,729
null
[ "security" ]
[ { "code": "", "text": "HiI am working on a university project where I am supposed to find out how to approach the following topic. I am not actually programming something, it is only a matter of working out a concept.A company has set up sensor stations to send data to a server via an LTE connection. The server uses MongoDB. Due to the fact that most LTE connections provide dynamic IP adresses these stations never have the same IP adress.\nThis means that only the sensor station can access the server, but the server cannot access the sensor station - which is fine.The stations must be able to down- and upload data, so I came across the option the set the IP binding to 0.0.0.0\nI think this is the only option since the IP adresses randomly change.The question now is what security measures are necessary to prevent unauthorized users from accessing the server.\nThe sensor stations have a computer build in that runs Windows, same as the server.This is only a small side task of the project and therefore there is not so much time to deal with the topic intensively. When the system runs later, then of course more time is invested there.\nInternet security is definitely not my speciality and this is only about getting a rough overview on what we will have to deal with later.Thanks for the help\nTesterino", "username": "Testerino" }, { "code": "", "text": "Hi @Testerino welcome to the community!Security is always a tricky subject. To help you, you might find the Security Checklist useful for this.Note that even with this, systems can still be compromised in other ways that may or may not be due to the database (vulnerable OS, or vulnerable processes that run in the server, for example), so the checklist is a starting point instead of the only thing you should care about. Limiting the network exposure of the server is a big first line of defense, so opening it up to 0.0.0.0 should be done only as a last resort (in my opinion).Instead of opening the server up to 0.0.0.0, you might also want to investigate other potential solutions, such as:Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Securing a 0.0.0.0 database
2022-08-16T17:58:28.352Z
Securing a 0.0.0.0 database
4,411
null
[ "react-native", "realm-web" ]
[ { "code": "", "text": "Hello,I have tried the recent examples of using expo + mongodb realm + react native, and my goal is to create the same app for mobile and web. I have the mobile version up and running, but struggling with the web version. Does anyone have any direct experience here? My error message is as following when I try to “Open web” using expo.Blockquote ./node_modules/@realm/react/dist/useObject.js 5:96\nModule parse failed: Unexpected token (5:96)\nYou may need an appropriate loader to handle this file type, currently no loaders are configured to process this file. See Concepts | webpack\n| return function useObject(type, primaryKey) {\n| const realm = useRealm();\nconst [object, setObject] = useState(() => realm.objectForPrimaryKey(type, primaryKey) ?? null);\n| useEffect(() => {\n| const listenerCallback = (_, changes) => {", "username": "Jehans_Jr_Storvik" }, { "code": "", "text": "Same thing happens to me.Web Bundling complete 111740ms\n./node_modules/@realm/react/dist/useObject.js 5:96\nModule parse failed: Unexpected token (5:96)\nYou may need an appropriate loader to handle this file type, currently no loaders are configured to process this file. See Concepts | webpack\n| return function useObject(type, primaryKey) {\n| const realm = useRealm();\nconst [object, setObject] = useState(() => realm.objectForPrimaryKey(type, primaryKey) ?? null);\n| useEffect(() => {\n| const listenerCallback = (_, changes) => {", "username": "Christopher_Hewitt" }, { "code": "", "text": "I’m experiencing the same problem.\nAnyone knows how to solve this?", "username": "Gilberto_Catarino" } ]
Expo mongodb realm for web and mobile - Module parse failed
2022-05-13T12:27:11.617Z
Expo mongodb realm for web and mobile - Module parse failed
3,831
null
[ "atlas-cluster" ]
[ { "code": "./bin/mongomirror --host \"mydb/source-devtestmocha.tnbhs.mongodb.net\" \\\n --ssl \\\n --username \"admin\" \\\n --password \"password\" \\\n --authenticationDatabase \"mydb\" \\\n --destination \"mydb/dest-backend-cluster.tnbhs.mongodb.net\" \\\n --destinationUsername \"admin\" \\\n --destinationPassword \"password\"\n", "text": "I was supposed to sync two clusters in MongoAtlas with mongomirror tools with following syntax. Unfortunately, it said source cluster is not able to connect even I checked connectivity and it was working.Error initializing mongomirror: could not initialize source connection: could not connect to server: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: source-devtestmocha.tnbhs.mongodb.net:27017, Type: Unknown, Last error: connection() error occured during connection handshake: dial tcp: lookup source-devtestmocha.tnbhs.mongodb.net: No address associated with hostname }, ] }", "username": "Pyae_Phyoe_Shein" }, { "code": "", "text": "Are you trying to sync two Atlas clusters ? or are you trying to migrate a self-hosted MongoDB instance to Atlas?As you stated it looks like a connectivity issue. Make sure your IP ports are open, if in atlas you can set the network open to 0.0.0.0 (open to the world) just to see if that is the issue, it is not a best practice to keep them this way.", "username": "Robert_Walters" }, { "code": "", "text": "Can you connect to your source from shell?\nTry by adding all nodes after replset/n1,n2,n3 in your command", "username": "Ramachandra_Tummala" }, { "code": "mongomirror --host \"RS-ResourceDB-0/SG-ResourceDB-32534.servers.domain.com:27017,SG-SourceDB-32533.servers.domain.com:27017,SG-ResourceDB-32535.servers.domain.com:27017\" \\\n --username \"admin\" \\\n --password \"<REDACTED>\" \\\n --authenticationDatabase \"admin\" \\\n --authenticationMechanism \"SCRAM-SHA-1\" \\\n --destination \"mongodb/mongodb-0.mongodb-svc.mongodb.svc.cluster.local:27017,mongodb-1.mongodb-svc.mongodb.svc.cluster.local:27017,mongodb-2.mongodb-svc.mongodb.svc.cluster.local:27017\" \\\n --destinationUsername \"admin\" \\\n --destinationPassword \"<REDACTED>\" \\\n --destinationAuthenticationDatabase \"admin\" \\\n --destinationAuthenticationMechanism \"SCRAM-SHA-256\" \\\n --tlsInsecure\ngit version: 6e5a5489944845758420e8762dd5e5a89d2e8654\nGo version: go1.16.9\n os: linux\n arch: amd64\n compiler: gc\n2022-08-16T23:49:05.004+0000\tSource isMaster output: {IsMaster:true MinWireVersion:0 MaxWireVersion:7 Hosts:[SourceDB-32533.servers.domain.com:27017 SourceDB-32534.servers.domain.com:27017 SourceDB-32535.servers.domain.com:27017] SetName:RS-Staging-0 SetVersion:5 Me:SourceDB-32533.servers.domain.com:27017}\n2022-08-16T23:49:05.005+0000\tSource buildInfo output: {Version:4.0.12 VersionArray:[4 0 12 0] GitVersion:5776e3cbf9e7afe86e6b29e22520ffb6766e95d4 OpenSSLVersion: SysInfo: Bits:64 Debug:false MaxObjectSize:16777216}\n2022-08-16T23:51:05.008+0000\tError initializing mongomirror: could not initialize destination connection: could not connect to server: server selection error: server selection timeout, current topology: { Type: ReplicaSetNoPrimary, Servers: [{ Addr: mongodb-0.mongodb-svc.mongodb.svc.cluster.local:27017, Type: Unknown, Last error: connection() error occured during connection handshake: EOF }, { Addr: mongodb-1.mongodb-svc.mongodb.svc.cluster.local:27017, Type: Unknown, Last error: connection() error occured during connection handshake: EOF }, { Addr: mongodb-2.mongodb-svc.mongodb.svc.cluster.local:27017, Type: Unknown, Last error: connection() error occured during connection handshake: EOF }, ] }\n", "text": "Any updates on this? I have run into a similar issue. In my case, the source can be connected but the destination is not. I can access the nodes via mongosh with no issues, also can telnet on the port 27017, but mongomirror throws this error.Output:I’d highly appreciate it if anyone had any comment on this. Thanks!", "username": "mhmtsvr" } ]
Mongomirror cluster to cluster migration does not work
2022-04-22T12:49:22.071Z
Mongomirror cluster to cluster migration does not work
2,232
null
[ "mongodb-shell" ]
[ { "code": "/var/log/mongodb/mongod.log{\n \"t\": {\"$date\":\"2022-08-16T12:45:18.536-04:00\"},\n \"s\": \"F\",\n \"c\": \"CONTROL\",\n \"id\": 20573,\n \"ctx\": \"initandlisten\",\n \"msg\": \"Wrong mongod version\",\n \"attr\": {\n \"error\": \"UPGRADE PROBLEM: Found an invalid featureCompatibilityVersion document (ERROR: Location4926900: Invalid featureCompatibilityVersion document in admin.system.version: { _id: \\\"featureCompatibilityVersion\\\", version: \\\"4.4\\\" }. See https://docs.mongodb.com/master/release-notes/5.0-compatibility/#feature-compatibility. :: caused by :: Invalid feature compatibility version value, expected '5.0' or '5.3' or '6.0. See https://docs.mongodb.com/master/release-notes/5.0-compatibility/#feature-compatibility.). If the current featureCompatibilityVersion is below 5.0, see the documentation on upgrading at https://docs.mongodb.com/master/release-notes/5.0/#upgrade-procedures.\"\n }\n}\nmongo$ mongo\n-bash: mongo: command not found\nmongosh$ mongosh\nCurrent Mongosh Log ID: [redacted]\nConnecting to:\t mongodb://127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+1.5.4\nMongoNetworkError: connect ECONNREFUSED 127.0.0.1:27017\n$ ls -l /var/lib/mongodb/mongod.lock\n-rw------- 1 mongodb daemon 0 Aug 16 12:45 /var/lib/mongodb/mongod.lock\n", "text": "Since the last reboot of my server I can longer access mongo at all. When I attempt to run the mongodb service I see the following error logged in /var/log/mongodb/mongod.log:The instructions for upgrading at the provided link require me to run an adminCommand from the mongo shell, which I cannot do because I cannot access it at all.\nThe mongo shell is gone:And the mongosh shell doesn’t work:My lock file is empty, but I attempted to repair mongod anyway to no avail.I don’t really care about recovering my databases, just getting mongo back into a working state. However, I don’t even know how I can wipe my databases without being able to start the mongo service at all. How can I get mongo running again?", "username": "Altay" }, { "code": "mongod", "text": "Is the mongod process running? It looks like that it’s not since you’re getting a connection refused.The FCV in the error message shows that it’s 4.4. Are you trying to upgrade from 4.4, or does your 5.0 database still have the FCV set to 4.4?", "username": "Doug_Duncan" }, { "code": "mongod", "text": "Is the mongod process running?No, it immediately terminates with the fatal featureCompatibilityVersion error whenever I attempt to start it.Are you trying to upgrade from 4.4, or does your 5.0 database still have the FCV set to 4.4?I haven’t attempted to perform an update before, so I’m assuming my databases still have FCV set to 4.4. Is there a way I can check?", "username": "Altay" }, { "code": "./bin/mongod --dbpath <current database path>mongoshdb.adminCommand( { setFeatureCompatibilityVersion: \"5.0\" } )mongoshmongod", "text": "I have been able to replicate this error by starting up MongoDB 4.4.15. Once it was running I stopped it. I then started up MongoDB 6.0.0 and pointed it at the same path that the 4.4.15 version wrote its database files to and I got the FCV error and the process stopped. This seems to be what you are seeing.If you want to save your data, you can fix this, by downloading the compressed archive of 5.0.x and extract it. From this extracted folder you can run ./bin/mongod --dbpath <current database path>. Connect to this instance with mongosh and run db.adminCommand( { setFeatureCompatibilityVersion: \"5.0\" } ). This will change the FCV for you. You can then exit mongosh and then shutdown the mongod instance and finally start your version 6.0.0 server. You will want to change the FCV here as well to be 6.0.If you don’t care about any of your data, you can just delete the entire database directory that the current data is stored in.", "username": "Doug_Duncan" }, { "code": "./bin/mongod --dbpath <current database path>mongoshdb.adminCommand( { setFeatureCompatibilityVersion: \"5.0\" } )mongoshmongod", "text": "If you want to save your data, you can fix this, by downloading the compressed archive of 5.0.x and extract it. From this extracted folder you can run ./bin/mongod --dbpath <current database path> . Connect to this instance with mongosh and run db.adminCommand( { setFeatureCompatibilityVersion: \"5.0\" } ) . This will change the FCV for you. You can then exit mongosh and then shutdown the mongod instance and finally start your version 6.0.0 server. You will want to change the FCV here as well to be 6.0 .Thanks! This worked like a charm. Since I’m on Arch Linux I just needed to install mongodb50-bin from the AUR to downgrade, ran the adminCommand, then installed mongodb-bin again to upgrade my binary back to 6.0.0.Is it good practice to update the FCV after each major release of mongo? Is there a reason it doesn’t happen automatically?If you don’t care about any of your data, you can just delete the entire database directory that the current data is stored in.Good to know in case I run into similar issues in the future.", "username": "Altay" }, { "code": "", "text": "Is it good practice to update the FCV after each major release of mongo? Is there a reason it doesn’t happen automatically?I think the reason for that is that if you need to roll back due to a failed upgrade having the FCV automatically change could cause even more problems. It’s better to leave the FCV at the upgraded from version for a bit to make sure things work as planned and then only change that to the upgraded to version.", "username": "Doug_Duncan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongo v6.0.0 immediately exits with featureCompatibilityVersion error
2022-08-16T20:30:43.273Z
Mongo v6.0.0 immediately exits with featureCompatibilityVersion error
6,772
null
[ "indexes" ]
[ { "code": "Some indexes for collection 'rocketchat_read_receipts' could not be created:\n Index build failed: 92e953ad-cab0-4a72-9aac-2f7ff42b728c: Collection wow.rocketchat_read_receipts ( 56973a08-1e09-4b39-baf5-3f48627e51ce ) :: caused by :: E11000 duplicate key error collection: wow.rocketchat_read_receipts index: roomId_1_userId_1_messageId_1 dup key: { roomId: \"5P57dW2WfpDghDdKw\", userId: \"EuR8TtKnHMNgKjBni\", messageId: \"Pj8Qfu5zaoez5Rpmu\" }\nwow-rocketchat-1 | MessageType.render is deprecated. Use MessageType.message instead. livechat_webrtc_video_call\nwow-rocketchat-1 | {\"level\":40,\"time\":\"2022-08-16T21:05:25.034Z\",\"pid\":1,\"hostname\":\"cd8fa92844a7\",\"name\":\"VoIPService\",\"msg\":\"Voip is not enabled. Cant start the service\"}\nwow-rocketchat-1 | {\"level\":40,\"time\":\"2022-08-16T21:05:26.511Z\",\"pid\":1,\"hostname\":\"cd8fa92844a7\",\"name\":\"VoIPService\",\"msg\":\"VoIP service already stopped\"}\nwow-rocketchat-1 | {\"level\":51,\"time\":\"2022-08-16T21:05:26.654Z\",\"pid\":1,\"hostname\":\"cd8fa92844a7\",\"name\":\"Migrations\",\"msg\":\"Not migrating, already at version 279\"}\n", "text": "Hey,MongoDB totally newb here. Here’s an error I’m constantly noticing when running an app (Rocket.Chat):Also I’m looking to fix this:Can you please give me some hints?Thank you!", "username": "Razvan_Rosca" }, { "code": "", "text": "Hello @Razvan_Rosca and welcome to the MongoDB community forums. The first error is because an index is trying to be created as unique but there are duplicate records for the three fields. You would need to figure out how duplicate entries are getting entered into the database from the application. I’ve not used the app before so not sure what might be going on.As for the second error, that looks like an error with the app itself and nothing to do with MongoDB.", "username": "Doug_Duncan" } ]
Some indexes for collection 'X' could not be created. Index build failed:
2022-08-16T21:10:32.669Z
Some indexes for collection &lsquo;X&rsquo; could not be created. Index build failed:
2,195
null
[ "mongodb-shell" ]
[ { "code": "mongomongoshmongosh --version\n1.5.4\n", "text": "I have mongodb community 6.0.0 installed.The command mongo is not found.The command mongosh is working.", "username": "Big_Cat_Public_Safety_Act" }, { "code": "", "text": "Mongosh is the way to the future. The mongo command line interface is being phased out.", "username": "steevej" }, { "code": "mongomongoshmongomongoshmongomongoshmongomongoshmongosh", "text": "The mongo command line interface is being phased out.mongo is not being included with 6.0 release.As Steeve states, mongosh is the tool to use now and (almost) anything you were able to do with the older mongo command line you can do with `mongosh. The documentation does have this message on it:Currently mongosh supports a subset of the mongo shell methods. Achieving feature parity between mongosh and the mongo shell is an ongoing effort.Most, if not all, of the functionality you use in day to day interactions. I’ve not run into anything that I couldn’t do in mongosh, but I’m sure there are still some lesser used items that haven’t made their way into mongosh.", "username": "Doug_Duncan" } ]
On my Mac, mongosh is found, but mongo is not
2022-08-16T17:47:44.515Z
On my Mac, mongosh is found, but mongo is not
7,100
https://www.mongodb.com/…669e6a56ecc6.png
[ "python" ]
[ { "code": "client = MongoClient(\n SOME_URI,\n user=user,\n pass=pass\n uuidRepresentation=UuidRepresentation.PYTHON_LEGACY\n )\n", "text": "Hi community!\nI have stumbled upon an issue working with uuidRepresentations in python module. I am updating an application to run the last version of pymongo (3.12.3 to 4.2.0). In the code snippet I am initializing a MongoClient object passing username, password, uri and uuidRepresentation as kwargs.However, when I check the properties of client object in debug I could see that the uuidRepresentation in codec_options is equal to 0 (UNSPECIFIED) and it looks like it is being set to default value somewhere inside pymongo, regardless of what uuidRepresentation parameter I passed to MongoClient constructor.\n.\nWhat could be the cause of this issue and how to fix it properly?Thanks in advance!", "username": "Oleksii_Anoshyn" }, { "code": "uuidRepresentation=UuidRepresentation.PYTHON_LEGACYValueError: 3 is an invalid UUID representation. Must be one of ('unspecified', 'standard', 'pythonLegacy', 'javaLegacy', 'csharpLegacy')uuidRepresentation='pythonLegacy'client.codec_options.uuid_representationmongodb://localhost:27107/?uuidRepresentation=standard", "text": "Hi @Oleksii_Anoshyn, I’m having a difficult time reproducing your problem. When I try with uuidRepresentation=UuidRepresentation.PYTHON_LEGACY, I get a traceback:ValueError: 3 is an invalid UUID representation. Must be one of ('unspecified', 'standard', 'pythonLegacy', 'javaLegacy', 'csharpLegacy')because it is expecting a string value and not an enum.When I use uuidRepresentation='pythonLegacy', I get “3” for the value of client.codec_options.uuid_representation.I also verified that a URI argument does not override the value given as a keyword (e.g. mongodb://localhost:27107/?uuidRepresentation=standard).Are you able to give fully self-contained example code that reproduces the problem?", "username": "Steve_Silvester" } ]
uuidRepresentation issue in pymongo when passing a parameter to MongoClient
2022-08-16T15:42:37.881Z
uuidRepresentation issue in pymongo when passing a parameter to MongoClient
1,748
https://www.mongodb.com/…_2_1024x640.jpeg
[]
[ { "code": "", "text": "\nScreenshot 2022-08-04 at 5.32.59 PM1920×1200 172 KB\n\nmongo comman not found", "username": "Prajwal_Janbandhu" }, { "code": "$PATH", "text": "Hi @Prajwal_Janbandhu and welcome to the MongoDB community.This portion of your screenshot looks to provide some answers:\nimage1574×242 66.8 KB\nDid you update your $PATH variable as recommended? If you did, do you see the path mentioned?MongoDB 6.0 was recently released. Is there a reason you’re using MongoDB 4.4? While 4.4 will be supported for about 6 more months, would it make sense for you to upgrade to a newer version of the database to get the most recent security updates and features?", "username": "Doug_Duncan" }, { "code": "", "text": "\nScreenshot 2022-08-09 at 12.14.24 PM1920×1200 247 KB\n\ni had installed version 5.0 and also i want to run the mongod but it’s showing this error i had included the path also.sudo export PATH=$PATH:/usr/local/Cellar/[email protected]/5.0.10/bin\nthis path i have uploaded in bash profile and i have tried to open the executable file from that path but the same error occurs. please help regards with this.", "username": "Prajwal_Janbandhu" }, { "code": "", "text": "You might have installed mongod as service\nWhen you issued mongo looks like it did connect to default mongod running on port 27017\nJust connect and issue db,show dbs\nYou don’t have to start mongod again\nThat may be the reason it is not allowing mongod to come up as you already have a mongod up and running", "username": "Ramachandra_Tummala" }, { "code": "mongod/tmp/mongodb-27017.lockls -alh /tmp/mongodb-27017.locksudo rm /tmp/mongodb-27017.lockmongod", "text": "The error in your log file states that the mongod process cannot unlink the file /tmp/mongodb-27017.lock because permission is denied. My guess here is that the file is owned by the root user. You can check that by running ls -alh /tmp/mongodb-27017.lock.To correct this issue, you can delete the file sudo rm /tmp/mongodb-27017.lock and then start the mongod process once more, which will recreate the file.", "username": "Doug_Duncan" }, { "code": "sudo rm /tmp/mongodb-27017.lock", "text": "After running this command\nsudo rm /tmp/mongodb-27017.lock\nIt says rm: /tmp/mongodb-27017.lock: No such file or directory.\n\nScreenshot 2022-08-16 at 9.20.03 PM1920×1200 356 KB\n", "username": "Prajwal_Janbandhu" }, { "code": "/data/dbbrew/usr/local/etc/mongodb.conf/usr/local/var/mongodbmongod -f /usr/local/etc/mongodb.conf", "text": "Half way down the screenshot you will see the following line:\nimage3250×80 85.1 KB\nThis states that the MongoDB engine cannot find the path /data/db. This is the default location to use for the data files if an alternate path is not provided on the command line or a config file. Since the path doesn’t exist, the process cannot create the database files that are needed and the process then shuts down.If you installed MongoDB with brew, you should have a config file at /usr/local/etc/mongodb.conf that overrides the default location and writes the database files to /usr/local/var/mongodb. If this is the case, try running mongod -f /usr/local/etc/mongodb.conf", "username": "Doug_Duncan" }, { "code": "", "text": "it says that no such file or directory after running\nmongod -f /usr/local/etc/mongodb.conf\nthis command", "username": "Prajwal_Janbandhu" }, { "code": "brewbrew", "text": "Hi @Prajwal_Janbandhu can you explain how you installed MongoDB on your MacBook? I assumed you used brew but maybe you just downloaded the archive. If you just downloaded the archive, then obviously none of the configuration files or paths will be set up as that’s something that brew does for you.", "username": "Doug_Duncan" }, { "code": "", "text": "I had installed mongodb using brew only and followed the instructions given on the official page of mongodb", "username": "Prajwal_Janbandhu" }, { "code": "brew servicesplist~/Library/LaunchAgents/homebrew.mxcl.mongodb-community.plistbrewmongod.confmongodb.conf", "text": "If you run brew services it should show the path to the plist file that brew uses to run the service. For my machine this points to ~/Library/LaunchAgents/homebrew.mxcl.mongodb-community.plist. This file will contain, among other things, the program arguments which controls how brew would start things up:I see that I made a typo and the config file is mongod.conf and not mongodb.conf. Sorry about that.", "username": "Doug_Duncan" } ]
Mongo command not found
2022-08-04T21:57:46.831Z
Mongo command not found
11,191
null
[ "queries" ]
[ { "code": "", "text": "Hello,What is the proper filter document to be used in mongodb atlas to filter documents in a collection based on a field of type UUID? I have been unable to find any documentation or examples on this.", "username": "Michael_Fyffe" }, { "code": "", "text": "Use $uuid instead of UUID().{“id”:{\"$uuid\": “2a7c560c-bb1a-466a-84f8-94c98e273179”}}", "username": "Rene_Hartmann" }, { "code": "", "text": "Not working. =/(BadValue) unknown operator: $uuid", "username": "Daniel_Oliveira" }, { "code": "", "text": "Type issues like your uuid problem are hard(1) to find without real non-redacted example documents and real non-redacted code.Please share documents and code.hard(1) - because sometimes, the data is stored as the string representation and the query is done with the binary data or the other way around, it is store in the binary format and the query is done using the string representation.", "username": "steevej" }, { "code": "{\n \"_id\": {\n \"$binary\": {\n \"base64\": \"ZJZwgzlEQeyU0wJ7Uhhtuw==\",\n \"subType\": \"04\"\n }\n },\n \"Name\": \"STEPHANIE STONE\"\n}\nexports = async function (changeEvent) {\n const collection = context.services\n .get(\"Cluster0\")\n .db(\"my-database\")\n .collection(\"my-collection\");\n\n const doc = await collection.findOne({\n \"_id\": { $uuid: \"64967083-3944-41EC-94D3-027B52186DBB\" }\n });\n\n return doc;\n}\n", "text": "Hi, @steevej! Thank you for your reply and explanation!Here is my document:And here is my Trigger code:It throws: (BadValue) unknown operator: $uuidHere is a print of what i am trying to do:\n\nshare1683×641 56.2 KB\nThank you again!", "username": "Daniel_Oliveira" }, { "code": "", "text": "Take a look at $convert.I am not sure it could be used to convert to UUID, but it is where I would start my quest.", "username": "steevej" } ]
Unable to search documents in atlas by UUID
2020-08-20T04:33:52.126Z
Unable to search documents in atlas by UUID
6,406
https://www.mongodb.com/…_2_1024x640.jpeg
[ "mongodb-shell", "mongodb-world-2022" ]
[ { "code": "", "text": "\nScreenshot 2022-08-16 at 9.20.03 PM1920×1200 356 KB\n", "username": "Prajwal_Janbandhu" }, { "code": "", "text": "See some clues in your other post Mongo command not found - #6 by Prajwal_Janbandhu", "username": "steevej" }, { "code": "", "text": "I have responded at Mongo command not found - #7 by Doug_Duncan.", "username": "Doug_Duncan" } ]
Mongod command is not working on mac m1
2022-08-16T15:58:27.427Z
Mongod command is not working on mac m1
3,170
null
[ "python", "containers" ]
[ { "code": "from pymongo import MongoClient\n\nclient = MongoClient(host=\"test_mongodb\",\n port = 27018,\n username = \"root\",\n password = \"rootpassword\",\n authSource = \"admin\"\n )\n\n#db is same as directory created to identify database\n#default port is 27017\n\ndb = client.aNewDB\n#db is a new database \nUserNum = db[\"UserNum\"]\n#UserNum is a new Collection\nUserNum.insert_one({'num_of_users':0})\nversion: '3'\n\nservices:\n web:\n build: ./Web\n ports: \n - \"5000:5000\"\n links:\n - db #Web is dependent on db\n db:\n image: mongo:latest\n hostname: test_mongodb\n environment:\n - MONGO_INITDB_ROOT_USERNAME=admin\n - MONGO_INITDB_ROOT_PASSWORD=password\n ports:\n - 27018:27018\nweb_1 | Traceback (most recent call last):\nweb_1 | File \"app.py\", line 21, in <module>\nweb_1 | UserNum.insert_one({'num_of_users':0})\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/collection.py\", line 628, in insert_one\nweb_1 | comment=comment,\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/collection.py\", line 562, in _insert_one\nweb_1 | self.__database.client._retryable_write(acknowledged, _insert_command, session)\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/mongo_client.py\", line 1447, in _retryable_write\nweb_1 | with self._tmp_session(session) as s:\nweb_1 | File \"/usr/local/lib/python3.7/contextlib.py\", line 112, in __enter__\nweb_1 | return next(self.gen)\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/mongo_client.py\", line 1729, in _tmp_session\nweb_1 | s = self._ensure_session(session)\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/mongo_client.py\", line 1712, in _ensure_session\nweb_1 | return self.__start_session(True, causal_consistency=False)\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/mongo_client.py\", line 1657, in __start_session\nweb_1 | self._topology._check_implicit_session_support()\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/topology.py\", line 538, in _check_implicit_session_support\nweb_1 | self._check_session_support()\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/topology.py\", line 555, in _check_session_support\nweb_1 | readable_server_selector, self.get_server_selection_timeout(), None\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/topology.py\", line 240, in _select_servers_loop\nweb_1 | % (self._error_message(selector), timeout, self.description)\nweb_1 | pymongo.errors.ServerSelectionTimeoutError: test_mongodb:27018: timed out, Timeout: 30s, Topology Description: <TopologyDescription id: 62fa0685c58c2b61f79ea52e, topology_type: Unknown, servers: [<ServerDescription ('test_mongodb', 27018) server_type: Unknown, rtt: None, error=NetworkTimeout('test_mongodb:27018: timed out')>]>\nflask_project_web_1 exited with code 1\n", "text": "I’ve been trying to connect flask with mongodb over docker but constantly get the timeout error. Here’s my code and error below. Please let me know where I’ve gone wrong? Thanks.Also, I’ve intentionally chosen port 27018 instead of 27017app.py code:docker-compose.ymlError during docker-compose up:", "username": "RCane" }, { "code": "", "text": "Try with the default port 27017 and share the result.", "username": "steevej" }, { "code": "ERROR: for flask_project_db_1 Cannot start service db: driver failed programming external connectivity on endpoint flask_project_db_1 (41241e60d62271a299edab7d1ef56cb9d5fa3328cb9028c13f093972d796b915): Error starting userland proxy: listen tcp4 0.0.0.0:27017: bind: address already in use\n\nERROR: for db Cannot start service db: driver failed programming external connectivity on endpoint flask_project_db_1 (41241e60d62271a299edab7d1ef56cb9d5fa3328cb9028c13f093972d796b915): Error starting userland proxy: listen tcp4 0.0.0.0:27017: bind: address already in use\nERROR: Encountered errors while bringing up the project.\nweb_1 | Traceback (most recent call last):\nweb_1 | File \"app.py\", line 25, in <module>\nweb_1 | UserNum.insert_one({'num_of_users':0})\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/collection.py\", line 628, in insert_one\nweb_1 | comment=comment,\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/collection.py\", line 562, in _insert_one\nweb_1 | self.__database.client._retryable_write(acknowledged, _insert_command, session)\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/mongo_client.py\", line 1447, in _retryable_write\nweb_1 | with self._tmp_session(session) as s:\nweb_1 | File \"/usr/local/lib/python3.7/contextlib.py\", line 112, in __enter__\nweb_1 | return next(self.gen)\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/mongo_client.py\", line 1729, in _tmp_session\nweb_1 | s = self._ensure_session(session)\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/mongo_client.py\", line 1712, in _ensure_session\nweb_1 | return self.__start_session(True, causal_consistency=False)\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/mongo_client.py\", line 1657, in __start_session\nweb_1 | self._topology._check_implicit_session_support()\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/topology.py\", line 538, in _check_implicit_session_support\nweb_1 | self._check_session_support()\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/topology.py\", line 555, in _check_session_support\nweb_1 | readable_server_selector, self.get_server_selection_timeout(), None\nweb_1 | File \"/usr/local/lib/python3.7/site-packages/pymongo/topology.py\", line 240, in _select_servers_loop\nweb_1 | % (self._error_message(selector), timeout, self.description)\nweb_1 | pymongo.errors.ServerSelectionTimeoutError: test_mongodb:27017: timed out, Timeout: 30s, Topology Description: <TopologyDescription id: 62fa397096b8563268d159b9, topology_type: Unknown, servers: [<ServerDescription ('test_mongodb', 27017) server_type: Unknown, rtt: None, error=NetworkTimeout('test_mongodb:27017: timed out')>]>\n", "text": "Hi @steevej I tried running the same with port 27017.I have mongodb running locally on that port so I got the following message:I performed db.shutdownServer() within mongo to close the same and retried to obtain the response as below:", "username": "RCane" }, { "code": "", "text": "Could you try to split your setup in 2 parts? One for the web part and one for the db part.It would be easier to find what is the issue.Make sure you start the db part first, then try to connect manually with mongosh. Once you know that part is working, try the web part.If you really want to have both in the same you could try to put them back together once you know they both work separately.", "username": "steevej" } ]
MongoDB shows timeout error when connected with flask over docker
2022-08-15T10:00:50.797Z
MongoDB shows timeout error when connected with flask over docker
4,134
null
[]
[ { "code": "", "text": "HiAfter migrating to a recent version of MongoDB, we noticed that the log is in json format and it’s not easy to ready it, is there any free tool (in windows env) we can use to parse and read the log in more readable formatthanx", "username": "Mohamed_B" }, { "code": "", "text": "Opening a JSON with firefox usually allows easy reading.The jq tool is also nice to have\nhttps://stedolan.github.io/jq/", "username": "steevej" } ]
Read MongoDB Log file on windows
2022-08-16T15:16:02.742Z
Read MongoDB Log file on windows
1,057
null
[ "monitoring" ]
[ { "code": "", "text": "I want to create third party integration to fetch metrics from MongoDB atlas which will help in monitoring mongodb instances. where can i find metrics which are helpfull.\nThanks.", "username": "Niraj_Rathod" }, { "code": "", "text": "Hi @Niraj_Rathod,Thank you for posting and welcome to the MongoDB developer forums! Please take a look at the documentation here, which will provide more information on how to get monitoring data for a host.Thanks!\nFrank", "username": "Frank_Sun" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Fetching MongoDB Atlas metrics for monitoring
2022-08-16T05:10:01.166Z
Fetching MongoDB Atlas metrics for monitoring
2,000
null
[ "aggregation", "queries", "node-js", "dot-net" ]
[ { "code": "[\n {\n \"id\": 1,\n \"name\": \"Crime and punishment\"\n },\n {\n \"id\": 2,\n \"name\": \"Atomic habits\"\n },\n {\n \"id\": 3,\n \"name\": \"Demons\"\n },\n {\n \"id\": 4,\n \"name\": \"C# for beginners\"\n }\n]\n[\n {\n \"id\": 1,\n \"userId\": 75,\n \"books\": [\n {\n \"id\": 1,\n \"price\": 50\n },\n {\n \"id\": 2,\n \"price\": 20\n }\n ]\n },\n {\n \"id\": 2,\n \"userId\": 184,\n \"books\": [\n {\n \"id\": 3,\n \"price\": 10\n },\n {\n \"id\": 4,\n \"price\": 99\n }\n ]\n }\n]\n {\n $lookup: {\n from: 'Purchases',\n let: { id: \"$id\", \n pipeline: [\n { $match: \n { $expr: { \n $and: [\n { $eq: ['$userId', user.id] },\n { $in: ['$books.id', '$id'] }\n ], } } },\n ],\n as: 'shopData',\n },\n}\n", "text": "Hello everyone.\nI have a collection of books.And I have a shopping list(The id in books is the id of the book in books collection):I want a field for each product to inform me if the product is purchased or not.\nThere I tried to get the shopData list and if length !== 0 => we purchased the book.But i always get empty shopData.\nTried through $addFields, to immediately on the database side to calculate whether it is in the lists or not, and not pull the entire list, but also to no avail.\nWhat i’am doing wrong ?", "username": "librain" }, { "code": "SyntaxError: Unexpected token, expected \",\" (15:1)\n\n 13 | as: 'shopData',\n 14 | },\nlet : { \"id\" : \"$_id\" }\n", "text": "The lookup you share generates a syntax error.I think you are missing a closing brace for your let: object.Are you sure that your collection of books have books with id:1?Do you manage an id field that is different from the default _id field?If not then you might want to try with", "username": "steevej" }, { "code": "_id: ObjectId(\"68728313db115d9555e\")\"id\":ObjectId(\"68728313db115d9555e\")Shopping list -> books\"_id\": ObjectId(\"4354qweq1312489ewq\")[\n {\n $sort: { _id: -1 },\n },\n {\n $lookup: {\n from: 'Purchases',\n let: { id: '$_id' },\n pipeline: [\n {\n $match: {\n $expr: {\n $and: [{ $in: ['$$id', '$books.id'] }, { $eq: ['$userId', user.id] }],\n },\n },\n },\n ],\n as: 'shopData',\n },\n },\n {\n $project: {\n _id: 1,\n name: 1,\n shopData: 1,\n },\n },\n { $skip: take * (pageParam - 1) },\n { $limit: take },\n ] as any[];\n", "text": "I think you are missing a closing brace for your let: object.Misspelled.Are you sure that your collection of books have books with id:1 ?For ease of code exchange I made this data like this, in fact the id there is _id: ObjectId(\"68728313db115d9555e\") and in shopping list book id is \"id\":ObjectId(\"68728313db115d9555e\")Do you manage an id field that is different from the default _id field?No. In Shopping list -> books I think the id is custom, but for all others the id is like this : \"_id\": ObjectId(\"4354qweq1312489ewq\")\nFull query:", "username": "librain" }, { "code": "", "text": "Please share real non-redacted documents that should be processed with your real non-redacted pipeline.Try $eq rather than $in.Also share the code that uses the pipeline.", "username": "steevej" } ]
How to join data from two collections to find purchases?
2022-08-15T13:47:14.184Z
How to join data from two collections to find purchases?
2,192
null
[]
[ { "code": "", "text": "I have a problem with UUID() generation in atlas trigger. Any calls of UUID() throw → ReferenceError: ‘UUID’ is not defined.I can’t find any useful references for this problem…", "username": "Damian_S" }, { "code": "", "text": "Hi there!Could you find any help on this?", "username": "Daniel_Oliveira" } ]
ReferenceError: 'UUID' is not defined
2021-12-01T13:16:00.900Z
ReferenceError: &lsquo;UUID&rsquo; is not defined
2,283
null
[ "app-services-cli" ]
[ { "code": "", "text": "Hi,i’m not able to push with “realm-cli push --remote APPNAME -y” while having a breaking change.\n“push failed: breaking change not allowed: update is missing property:”\nIs there any parameter that allow me to force the push ?I also tried to pause the sync and push, but didn’t work eather.Kind Regards\nBjörn", "username": "Bjorn_Hempel" }, { "code": "/sync/config.jsonstate\"\"realm-cli pushrealm-cli pushconfig.jsonstateenabledrealm-cli push", "text": "Hi Björn,First of all, welcome to the MongoDB Developer Community! Regarding your question, there are no parameters in Realm CLI in order for you to force the push.When a destructive change happens, pausing Sync won’t work. You have to execute a couple of extra steps:You will need to terminate Device Sync. You can do that by going to your /sync/config.json file in the project directory and setting the state field to an empty string \"\". After that please execute your realm-cli push command.Make the desired schema changes ensuring that the existing data is compliant, and execute realm-cli push again to push your changes.Enable Device Sync again by going to the config.json file and change the state field to enabled. After that use therealm-cli push command as the final step.Please be aware that If you are re-enabling Atlas Device Sync after you have terminated it, you must perform a manual Client Reset in your client application.I hope this helps.Mar", "username": "Mar_Cabrera" }, { "code": "", "text": "Hi Mar,thanks for the explanation. I will try that out next time and maybe integrate it in our pipeline.", "username": "Bjorn_Hempel" }, { "code": "", "text": "No problem at all! Glad I could help.", "username": "Mar_Cabrera" } ]
Realm CLI push with breaking Changes
2022-08-16T07:51:51.578Z
Realm CLI push with breaking Changes
3,048
https://www.mongodb.com/…1_2_1024x486.png
[]
[ { "code": " author: {\n type: Schema.Types.ObjectId,\n ref: 'authors'\n },\n", "text": "hello everyone, I refer from one collection to another in atlas via ObjectID , but it does not work, what else needs to be done in order to work correctly\nimage1806×858 90.2 KB\nField author in Product schema:", "username": "Kostya_N_A" }, { "code": "", "text": "i add ObjectID field manually via UI atlas", "username": "Kostya_N_A" } ]
ObjectID doesn't work in atlas
2022-08-16T09:16:34.661Z
ObjectID doesn&rsquo;t work in atlas
1,162
null
[]
[ { "code": "", "text": "Hi! I would like to lock a batch of documents for processing in an application consisting of multiple instances. While one instance has selected a batch of documents for further processing, other instances should not be able to select it.findAndModify appears to be a perfect solution, but it only chooses a single document from the query, then updates it and returns the original version.What I need, is a similar method for a batch of documents. It should in particular:Is there any way to accomplish this challenge?", "username": "Piotr_Kwiatkowski" }, { "code": "selectfindAndModifynew: <boolean>true", "text": "Hello @Piotr_Kwiatkowski,Welcome to MongoDB community! I notice you haven’t had a response to this topic yet - were you able to find a solution?\nIf not, could you please help me understand below things from your use-case?other instances should not be able to select itWhat is select in this context?\nis this a read operation (other transactions should not be able to read)?\nor are you referring to a write operation here?further processingCan you elaborate on this?I think what you are describing so far can be solved using Transactions, where a series of operations are executed in its entirety, or none of them, and they are treated as a singular operation. Note that multiple operations (including findAndModify) can be executed within a transaction. For more information, please see:it only chooses a single document from the query, then updates it and returns the original version.This is correct but you can get the updated version instead of origina, by setting new: <boolean> to true, the default is false.If you need further information, could you please provide:Regards,\nTarun", "username": "Tarun_Gaur" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
findAndModify multiple documents
2022-08-01T11:36:42.344Z
findAndModify multiple documents
2,989
null
[ "aggregation", "queries", "performance", "time-series" ]
[ { "code": "timestamptimeFieldtimestampdb.collection(\"iot-time-series\")\n .find({}, { sort: { timestamp: -1 }, limit: 1 })\n .toArray()\ntimestamp{\n \"explainVersion\": \"1\",\n \"stages\": [\n {\n \"$cursor\": {\n \"queryPlanner\": {\n \"namespace\": \"REDACTED.system.buckets.iot-time-series\",\n \"indexFilterSet\": false,\n \"parsedQuery\": {},\n \"queryHash\": \"8B3D4AB8\",\n \"planCacheKey\": \"D542626C\",\n \"maxIndexedOrSolutionsReached\": false,\n \"maxIndexedAndSolutionsReached\": false,\n \"maxScansToExplodeReached\": false,\n \"winningPlan\": { \"stage\": \"COLLSCAN\", \"direction\": \"forward\" },\n \"rejectedPlans\": []\n },\n \"executionStats\": {\n \"executionSuccess\": true,\n \"nReturned\": 825,\n \"executionTimeMillis\": 1232,\n \"totalKeysExamined\": 0,\n \"totalDocsExamined\": 825,\n \"executionStages\": {\n \"stage\": \"COLLSCAN\",\n \"nReturned\": 825,\n \"executionTimeMillisEstimate\": 0,\n \"works\": 827,\n \"advanced\": 825,\n \"needTime\": 1,\n \"needYield\": 0,\n \"saveState\": 41,\n \"restoreState\": 41,\n \"isEOF\": 1,\n \"direction\": \"forward\",\n \"docsExamined\": 825\n },\n \"allPlansExecution\": []\n }\n },\n \"nReturned\": 825,\n \"executionTimeMillisEstimate\": 90\n },\n {\n \"$_internalUnpackBucket\": {\n \"exclude\": [],\n \"timeField\": \"timestamp\",\n \"metaField\": \"metadata\",\n \"bucketMaxSpanSeconds\": 3600\n },\n \"nReturned\": 294628,\n \"executionTimeMillisEstimate\": 1060\n },\n {\n \"$sort\": { \"sortKey\": { \"timestamp\": -1 }, \"limit\": 1 },\n \"totalDataSizeSortedBytesEstimate\": 0,\n \"usedDisk\": false,\n \"nReturned\": 1,\n \"executionTimeMillisEstimate\": 1227\n }\n ],\n \"serverInfo\": {\n \"host\": \"REDACTED\",\n \"port\": 27017,\n \"version\": \"5.0.6\",\n \"gitVersion\": \"212a8dbb47f07427dae194a9c75baec1d81d9259\"\n },\n \"serverParameters\": {\n \"internalQueryFacetBufferSizeBytes\": 104857600,\n \"internalQueryFacetMaxOutputDocSizeBytes\": 104857600,\n \"internalLookupStageIntermediateDocumentMaxSizeBytes\": 104857600,\n \"internalDocumentSourceGroupMaxMemoryBytes\": 104857600,\n \"internalQueryMaxBlockingSortMemoryUsageBytes\": 104857600,\n \"internalQueryProhibitBlockingMergeOnMongoS\": 0,\n \"internalQueryMaxAddToSetBytes\": 104857600,\n \"internalDocumentSourceSetWindowFieldsMaxMemoryBytes\": 104857600\n },\n \"command\": {\n \"aggregate\": \"system.buckets.iot-time-series\",\n \"pipeline\": [\n {\n \"$_internalUnpackBucket\": {\n \"timeField\": \"timestamp\",\n \"metaField\": \"metadata\",\n \"bucketMaxSpanSeconds\": 3600,\n \"exclude\": []\n }\n },\n { \"$sort\": { \"timestamp\": -1 } },\n { \"$limit\": 1 }\n ],\n \"cursor\": {},\n \"maxTimeMS\": 60000,\n \"collation\": { \"locale\": \"simple\" }\n },\n \"ok\": 1,\n \"$clusterTime\": {\n \"clusterTime\": { \"$timestamp\": \"7068709110501343233\" },\n \"signature\": {\n \"hash\": \"/scbPt2f8gVBc/Jpq0GEGEc6Ze4=\",\n \"keyId\": { \"low\": 2, \"high\": 1633075851, \"unsigned\": false }\n }\n },\n \"operationTime\": { \"$timestamp\": \"7068709110501343233\" }\n}\n", "text": "Hi MongoDB community,I’m using a time series collection to store IoT data and would like to get the most recent document in the collection to find out the current state of the IoT device that’s pushing the data.My collection has about 300,000 documents, uses a timestamp field as the timeField, and has an index on timestamp, as suggested in the documentation.I assumed that the following query would be efficient:However, it takes about two seconds to execute on an M10 cluster. If I interpret the explain output for this query correctly, MongoDB doesn’t use the index on timestamp and loads all 300,000 documents into memory to find the one with the most recent timestamp.Is there another, more efficient way to get the latest document in a time series collection? Does MongoDB make any guarantees regarding the order of time series documents?", "username": "christiank" }, { "code": "weather> db.weather.find()\n{ \"timestamp\" : ISODate(\"2022-03-01T01:49:59.226Z\"), \"metadata\" : \"x\", \"temp\" : 10, \"_id\" : ObjectId(\"621d7bc74cf7a4c8c6fc47cd\") }\n{ \"timestamp\" : ISODate(\"2022-03-01T01:50:00.342Z\"), \"metadata\" : \"x\", \"temp\" : 10, \"_id\" : ObjectId(\"621d7bc84cf7a4c8c6fc47ce\") }\n{ \"timestamp\" : ISODate(\"2022-03-01T01:50:01.269Z\"), \"metadata\" : \"x\", \"temp\" : 10, \"_id\" : ObjectId(\"621d7bc94cf7a4c8c6fc47cf\") }\n{ \"timestamp\" : ISODate(\"2022-03-01T01:50:02.065Z\"), \"metadata\" : \"x\", \"temp\" : 10, \"_id\" : ObjectId(\"621d7bca4cf7a4c8c6fc47d0\") }\n{ \"timestamp\" : ISODate(\"2022-03-01T01:50:02.865Z\"), \"metadata\" : \"x\", \"temp\" : 10, \"_id\" : ObjectId(\"621d7bca4cf7a4c8c6fc47d1\") }\n> show collections\n...\nweather [time-series]\nsystem.buckets.weather\n...\n\n> db.system.buckets.weather.find()\n[\n {\n _id: ObjectId(\"621d7b8c681c5835f9a3e0fa\"),\n control: {\n version: 1,\n min: {\n timestamp: ISODate(\"2022-03-01T01:49:00.000Z\"),\n temp: 10,\n _id: ObjectId(\"621d7bc74cf7a4c8c6fc47cd\")\n },\n max: {\n timestamp: ISODate(\"2022-03-01T01:50:02.865Z\"),\n temp: 10,\n _id: ObjectId(\"621d7bca4cf7a4c8c6fc47d1\")\n }\n },\n meta: 'x',\n data: {\n timestamp: {\n '0': ISODate(\"2022-03-01T01:49:59.226Z\"),\n '1': ISODate(\"2022-03-01T01:50:00.342Z\"),\n '2': ISODate(\"2022-03-01T01:50:01.269Z\"),\n '3': ISODate(\"2022-03-01T01:50:02.065Z\"),\n '4': ISODate(\"2022-03-01T01:50:02.865Z\")\n },\n temp: { '0': 10, '1': 10, '2': 10, '3': 10, '4': 10 },\n _id: {\n '0': ObjectId(\"621d7bc74cf7a4c8c6fc47cd\"),\n '1': ObjectId(\"621d7bc84cf7a4c8c6fc47ce\"),\n '2': ObjectId(\"621d7bc94cf7a4c8c6fc47cf\"),\n '3': ObjectId(\"621d7bca4cf7a4c8c6fc47d0\"),\n '4': ObjectId(\"621d7bca4cf7a4c8c6fc47d1\")\n }\n }\n }\n]\ntimestamp:1", "text": "Hi @christiank welcome to the community!Since time-series collection is a view into a separate bucketed collection, the usual index improvements do not really apply to them. This is because indexes ties into a physical location of a document. With the underlying bucketed collection, individual time-series measurement do not really have a physical presence that can be tracked by an index.One example: I have a weather time-series collection with a content like:However this is only a view into the actual physical collection holding the data:Those 5 documents inside the weather (view) collection is in reality only 1 document, physically.Thus having a secondary index on timestamp:1 does not work the same way as a non time-series collection, and in this case cannot help to speed up your particular query. The underlying bucketed collection was designed to be aggregated instead of selecting individual “documents” inside the collection.There is an ongoing effort to optimize your use case of querying the recent measurements though: SERVER-4507. Please watch/upvote the ticket!Best regards\nKevin", "username": "kevinadi" }, { "code": "", "text": "I voted !Cheers!", "username": "MaBeuLux88" }, { "code": "control.max.timestampasync function findMostRecent() {\n const bucket = await db.collection(\"system.buckets.weather\").findOne(\n {},\n {\n sort: { \"control.max.timestamp\": -1 },\n projection: { \"control.max.timestamp\": 1 }\n }\n );\n\n if (!bucket) return;\n\n return db.collection(\"weather\").findOne({\n timestamp: bucket.control.max.timestamp\n });\n}\n", "text": "@kevinadi Thank you so much for your detailed answer and the example!I found a workaround that uses the internal collection you mentioned to get the most recent document without loading all other documents into memory. The first query finds the bucket that contains the most recent timestamp (which is fast because the index on control.max.timestamp is used), and the second query uses the timestamp to look up the document in the “virtual” time series collection:Do you think this limitation could be mentioned in the documentation (here)? The introduction to time series collections says: “Time series collections behave like normal collections. You can insert and query your data as you normally would.” And the first query example on that page is a lookup of a single document, so I think it’s fair to assume that an index on the timestamp is used when sorting.", "username": "christiank" }, { "code": "", "text": "Is there any update on this question? The performance of time series collections is in this regard very disappointing. Those queries work pretty well with normal collections and now you have to use a workaround?\nEven though the data is stored internally in buckets, it will be sufficient to get the most recent bucket by sorting the buckets based on the time index. I tried with version 6.0, the performance is still very poor.Regards,\nMartin", "username": "Martin_Prodanov" }, { "code": " // prepare options\n\ttso := &options.TimeSeriesOptions{\n\t\tTimeField: \"timestamp\",\n\t}\n\ttso.SetMetaField(\"metadata\")\n\ttso.SetGranularity(\"minutes\")\n\tco := &options.CreateCollectionOptions{\n\t\tDefaultIndexOptions: nil,\n\t\tMaxDocuments: nil,\n\t\tStorageEngine: nil,\n\t}\n\tco.SetTimeSeriesOptions(tso)\n // events\n err = srv.storeDB.CreateCollection(ctx, srv.eventCollectionName, co)\n// AddEvent adds a new event to the history store\n// The event 'created' field will be used as timestamp after parsing it using time.RFC3339\nfunc (srv *HistoryStoreServer) AddEvent(ctx context.Context, args *thing.ThingValue) (*emptypb.Empty, error) {\n\t// Name and ThingID are required fields\n\tif args.Name == \"\" || args.ThingID == \"\" {\n\t\terr := fmt.Errorf(\"missing name or thingID\")\n\t\tlogrus.Warning(err)\n\t\treturn nil, err\n\t}\n\tif args.Created == \"\" {\n\t\targs.Created = time.Now().UTC().Format(time.RFC3339)\n\t}\n\n\t// It would be nice to simply use bson marshal, but that isn't possible as the\n\t// required timestamp needs to be added in BSON format.\n\t//createdTime, err := time.Parse(\"2006-01-02T15:04:05-07:00\", args.Created)\n\tcreatedTime, err := time.Parse(time.RFC3339, args.Created)\n\ttimestamp := primitive.NewDateTimeFromTime(createdTime)\n\tevBson := bson.M{\n\t\tTimeStampField: timestamp,\n // the metadata on thingID and name speeds up aggregate query by factor 5\n\t\t\"metadata\": bson.M{\"thingID\": args.ThingID, \"name\": args.Name},\n\t\t\"name\": args.Name,\n\t\t\"thingID\": args.ThingID,\n\t\t\"valueID\": args.ValueID,\n\t\t\"value\": args.Value,\n\t\t\"created\": args.Created,\n\t\t\"actionID\": args.ActionID,\n\t}\n\tres, err := srv.eventCollection.InsertOne(ctx, evBson)\n\t_ = res\n\treturn nil, err\n}\n// GetLatestValues returns the last received event/properties of a Thing\nfunc (srv *HistoryStoreServer) GetLatestValues(ctx context.Context,\n\targs *svc.GetLatest_Args) (*svc.ThingValueMap, error) {\n\tvalues := &svc.ThingValueMap{PropValues: make(map[string]*thing.ThingValue)}\n\tmatchStage := bson.D{\n\t\t{\"$match\",\n\t\t\tbson.D{\n\t\t\t\t{\"thingID\", args.ThingID},\n\t\t\t},\n\t\t},\n\t}\n\tsortStage := bson.D{\n\t\t{\"$sort\",\n\t\t\tbson.D{\n\t\t\t\t{\"timestamp\", -1},\n\t\t\t\t//{\"control.max.timestamp\", -1},\n\t\t\t},\n\t\t},\n\t}\n\tgroupStage := bson.D{\n\t\t{\"$group\",\n\t\t\tbson.D{\n\t\t\t\t{\"_id\", \"$name\"},\n\t\t\t\t{\"name\", bson.M{\"$first\": \"$name\"}},\n\t\t\t\t{\"created\", bson.M{\"$first\": \"$created\"}},\n\t\t\t\t{\"value\", bson.M{\"$first\": \"$value\"}},\n\t\t\t\t{\"valueID\", bson.M{\"$first\": \"$valueID\"}},\n\t\t\t\t{\"thingID\", bson.M{\"$first\": \"$thingID\"}},\n\t\t\t},\n\t\t},\n\t}\n\tpipeline := mongo.Pipeline{matchStage, sortStage, groupStage}\n\taggOptions := &options.AggregateOptions{}\n\tcursor, err := srv.eventCollection.Aggregate(ctx, pipeline, aggOptions)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\treturn nil, err\n\t}\n\tcount := 0\n\tfor cursor.Next(ctx) {\n\t\tvalue := thing.ThingValue{}\n\t\terr = cursor.Decode(&value)\n\t\tvalues.PropValues[value.Name] = &value\n\t\tcount++\n\t}\n\tlogrus.Infof(\"Iterated %d values\", count)\n\treturn values, nil\n}\n", "text": "Thank you for this post. I was struggling with the same question. Your post steered me in the right direction on how to solve this, as did this post. In my case I need to get the most recent sensor values of a device. The sensor is identified by the ‘name’ field and the device by the thingID field. So aggregate using $match by thingID and $group on name. The records are added with ‘metadata’ set to “thingID” and “name” as was recommended elsewhere. This speeds up the aggregate query by factor 5 or so. (add ‘name’ didn’t make a difference though)I found that the sort on timestamp worked as expected but using control.max.timestamp didn’t return the most recent record. Also, both have similar performance. Getting the most recent sensors from 100K records with 10 devices and 5 sensor types takes 20msec on my desktop PC, which is a i5-4570S CPU @ 2.90GHz. 300K records takes 63msec, so it looks linear.My code below. This is in golang. I’m new to mongodb so please forgive the ugliness. It feels like a rather cludgy way to get the results. For example, how to get all fields in the result instead of listing each one using $first?Creating the collection:Adding an event:Get the most recent sensor values of a device:", "username": "zaai" }, { "code": "", "text": "Some additional findings:", "username": "zaai" }, { "code": "", "text": "Hi,thanks for the explanation. My personal speculation - It seems that the optimizer does not really now how many actual “documents” are stored in a bucket in order to push the limit step up in the query plan. In a normal collection, the index size corresponds to the the number of “documents” and it is enough to scan the index only to get the number of required document as specified by the limit op in the query. In a time-series collection, the number of documents in a bucket varies and this metadata is probably not present to the optimizer. Therefore it cannot push the step up in the plan but requires to first unpack all the buckets that match the query criteria and then perform the limit step.\nThis is a significant difference in the behavior and IMHO it has to be listed in the limitations of time series collections. In a normal collection the query predicate can match a billion documents but if you only want 10, it will return pretty fast. Whereas doing the same query on a time series collection may lead to an unresponsive state of the database for some time.Regards,\nMartin", "username": "Martin_Prodanov" } ]
Time series: How to get the most recent document?
2022-02-25T18:10:02.176Z
Time series: How to get the most recent document?
40,132
null
[ "aggregation" ]
[ { "code": "{\n \"_id\": ObjectId(),\n \"dayoffs\" : [\n\t\t{\n\t\t\t\"kind\" : \"A\",\n\t\t\t\"from\" : ISODate(\"2022-01-01T03:00:00Z\"),\n\t\t\t\"to\" : ISODate(\"2022-01-01T03:00:00Z\"),\n\t\t\t\"reason\" : \"B\",\n\t\t\t\"_id\" : ObjectId(\"62d66bc24c17ee18272f97ad\"),\n\t\t\t\"approvers\" : [ ]\n\t\t},\n\t\t{\n\t\t\t\"kind\" : \"A\",\n\t\t\t\"from\" : ISODate(\"2022-01-01T03:00:00Z\"),\n\t\t\t\"to\" : ISODate(\"2022-01-01T03:00:00Z\"),\n\t\t\t\"reason\" : \"B\",\n\t\t\t\"_id\" : ObjectId(\"62d66be73844cc213cf958cf\"),\n\t\t\t\"approvers\" : [ ]\n\t\t},\n\t\t{\n\t\t\t\"kind\" : \"A\",\n\t\t\t\"from\" : ISODate(\"2022-01-02T01:30:00Z\"),\n\t\t\t\"to\" : ISODate(\"2022-01-02T10:30:00Z\"),\n\t\t\t\"reason\" : \"B\",\n\t\t\t\"_id\" : ObjectId(\"62e238306821bfc1ee2bd308\")\n\t\t},\n\t\t{\n\t\t\t\"kind\" : \"A\",\n\t\t\t\"from\" : ISODate(\"2022-01-03T01:30:00Z\"),\n\t\t\t\"to\" : ISODate(\"2022-01-03T10:30:00Z\"),\n\t\t\t\"reason\" : \"B\",\n\t\t\t\"_id\" : ObjectId(\"62e2386dda2753fad5ca65ce\")\n\t\t}\n\t]\n}\n{\n \"$match\": {\n \"dayoffs.from\": {\n \"$gte\": new Date(\"2022/01/03\")\n },\n \"dayoffs.to\": {\n \"$lte\": new Date(\"2022/01/03 23:59:00\")\n }\n }\n}\n", "text": "I have a document like this:I try to query like this:Why can’t I get dayoffs with from and to within a day 2022/01/03?", "username": "MAY_CHEAPER" }, { "code": "ISODate$matchdayoffs$filterfilteredEntriesdb.test.aggregate(\n{\n '$addFields': {\n filteredEntries: {\n '$filter': {\n input: '$dayoffs',\n as: 'entry',\n cond: {\n '$and': [\n {\n '$gte': [ '$$entry.from', ISODate(\"2022-01-03T00:00:00.000Z\") ]\n },\n {\n '$lte': [ '$$entry.to', ISODate(\"2022-01-03T23:59:59.000Z\") ]\n }\n ]\n }\n }\n }\n }\n}\n$projectfilteredEntriesfilteredEntries[\n {\n _id: ObjectId(\"62fb04b5ab5d901302a457e5\"),\n filteredEntries: [\n {\n kind: 'A',\n from: ISODate(\"2022-01-03T01:30:00.000Z\"),\n to: ISODate(\"2022-01-03T10:30:00.000Z\"),\n reason: 'B',\n _id: ObjectId(\"62e2386dda2753fad5ca65ce\")\n }\n ]\n }\n]\n", "text": "Hi @MAY_CHEAPER,There appears to be some formatting differences in the date values you’ve provided in the match stage compared to that noted in the Date() documentation.I believe the way you have it formatted would:specify the datetime in the client’s local timezone and returns the ISODate with the specified datetime in UTC.I note this as MongoDB stores times in UTC, and will convert any local time representations into this form.In saying so, can you advise what your expected output is? I ran the same $match stage on a test environment and was able to retrieve the same document you provided in the post back.Why can’t I get dayoffs with from and to within a day 2022/01/03?If you’re specifically wanting on elements inside of the dayoffs array field that match the date range, perhaps use of the $filter operator may work for you.Please see a simple example below. I have only tested this on a single document but you can see the additional field filteredEntries contains only entries that match the date range:Output (with an additional $project on filteredEntries just to keep post smaller and to verify that only entries within the range were included in the filteredEntries array field):If you believe this may work for you, please test thoroughly in a test environment to verify it meets your use case and requirements.Hope this helps.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Filter date with $lte and $gte not working
2022-07-28T08:12:37.076Z
Filter date with $lte and $gte not working
15,593
null
[ "queries", "node-js", "transactions" ]
[ { "code": "const getStoreReceivableBalance = async (storeId, companyId) => {\n try {\n\n const storeBalance = await mycollection.find({ storeId, companyId }).sort({ 'createdAt': -1 }).limit(1);\n if (storeBalance.length > 0) {\n return storeBalance[0].balance;\n } else {\n return 0\n }\n\n } catch (error) {\n console.log('error', error);\n }\n}\n// calculate new balance of store\nlet currentStoreReceivableBalance = await transactionServices.getStoreReceivableBalance(storeId, companyId);\ncurrentStoreReceivableBalance = currentStoreReceivableBalance + totalPrice;\n/* 4 createdAt:7/2/2022, 8:37:44 PM*/\n{\n\t\"_id\" : ObjectId(\"62c02e08c5779f54ce56e42f\"),\n\t\"paid\" : 232820000,\n\t\"received\" : 0,\n\t\"total\" : Double(\"7479949874\"),\n\t\"balance\" : Double(\"7479949874\"),\n\t\"isDeleted\" : false,\n\t\"reference\" : \"1574\",\n\t\"storeId\" : ObjectId(\"61cf4afdea853a367998f6e4\"),\n\t\"companyId\" : ObjectId(\"60e5c27b5dd888474611b6a4\"),\n\t\"createdBy\" : ObjectId(\"61d2cb4f3e5a0f1e3f01c42d\"),\n\t\"description\" : \"Order\",\n\t\"prefix\" : \"FC\",\n\t\"createdAt\" : ISODate(\"2022-07-02T20:37:44.665+09:00\")\n},\n\n/* 5 createdAt:7/2/2022, 3:08:47 AM*/\n{\n\t\"_id\" : ObjectId(\"62bf382fc5779f83bd56dc6f\"),\n\t\"paid\" : 0,\n\t\"received\" : 114957600,\n\t\"total\" : Double(\"7132172274\"),\n\t\"balance\" : Double(\"7132172274\"),\n\t\"isDeleted\" : false,\n\t\"reference\" : \"1571\",\n\t\"storeId\" : ObjectId(\"61cf4afdea853a367998f6e4\"),\n\t\"companyId\" : ObjectId(\"60e5c27b5dd888474611b6a4\"),\n\t\"createdBy\" : ObjectId(\"61b86f6e3d35436056f97522\"),\n\t\"recordType\" : \"storeToStoreSaleReceipt\",\n\t\"description\" : \"Payment\",\n\t\"prefix\" : \"FS\",\n\t\"createdAt\" : ISODate(\"2022-07-02T03:08:47.789+09:00\")\n},\n\n/* 6 createdAt:7/2/2022, 2:59:12 AM*/\n{\n\t\"_id\" : ObjectId(\"62bf35f0c5779f926256db25\"),\n\t\"paid\" : 0,\n\t\"received\" : 54700000,\n\t\"total\" : Double(\"7247129874\"),\n\t\"balance\" : Double(\"7247129874\"),\n\t\"isDeleted\" : false,\n\t\"reference\" : \"1570\",\n\t\"storeId\" : ObjectId(\"61cf4afdea853a367998f6e4\"),\n\t\"companyId\" : ObjectId(\"60e5c27b5dd888474611b6a4\"),\n\t\"createdBy\" : ObjectId(\"61b86f6e3d35436056f97522\"),\n\t\"recordType\" : \"storeToStoreSaleReceipt\",\n\t\"description\" : \"Payment\",\n\t\"prefix\" : \"FS\",\n\t\"createdAt\" : ISODate(\"2022-07-02T02:59:12.689+09:00\")\n},\n", "text": "Hello,\nI am using mongodb atlas in my project.\nI have a scenario where I always must return the newest document in my query. But I noticed in some cases the newest document is not returned due to which I am getting wrong account balances.\nBelow is the function I am using.With the above flow there are cases where the new balance is wrong. I am showing an output where I got a wrong balance.I am trying to calculate the new balance of the store when the store makes a payment or an invoice by getting the most recent balance of the store from the collection.The balance for document with id “_id” : ObjectId(“62c02e08c5779f54ce56e42f”) should be calculated by getting the latest balance from document with id “_id” : ObjectId(“62bf382fc5779f83bd56dc6f”), but instead the balance from document with id “_id” : ObjectId(“62bf35f0c5779f926256db25”) was used. Therefore, the most recent balance is wrong.Could you help me figure out what is wrong with my approach or whether there is another way to achieve this without having to make big changes in my code?", "username": "Bachir_Diallo" }, { "code": "", "text": "If you are running a replica set and you are reading on a secondary, it is possible that you do not get the latest document if the secondary is lagging the oplog of the primary.Make sure you read from primary only.Look at https://www.mongodb.com/docs/manual/applications/replication/ to see how to setup your writes and reads to make sure you get the data you want.", "username": "steevej" } ]
Find & sort is not always returning the most recent record
2022-08-14T05:18:28.846Z
Find &amp; sort is not always returning the most recent record
1,527
null
[ "queries", "atlas-search" ]
[ { "code": "?*", "text": "From the documentation of queryString query https://www.mongodb.com/docs/atlas/atlas-search/queryString/, there is no mention of wildcard ?* or fuzzy search. I was wondering if those are supported or if there are plans to add these features. It’s quite natural since the Apache Lucene (which Atlas Search is based on) has those in their syntax Apache Lucene - Query Parser SyntaxWe are moving our project from Elasticseach to Atlas, and an important part of the project depends on the Elasticsearch queryString query with wildcard and fuzzy search.", "username": "Yi_Wang" }, { "code": "", "text": "I’m curious to learn more about your use case and what you are trying to achieve. In more scenarios where you would want to combine multiple search behaviors, I’d actually recommend using Compound over queryString. In general, it is much easier to maintain and way more flexible.", "username": "Elle_Shwer" }, { "code": "compoundfacetfilters?*~salesdataset:sales*", "text": "Hi Elle, thanks for the response. To give context, our data product provides two search experiences to customers and is currently built on top of Elasticsearch, and we what to migrate to Atlas search. For general search, we already build in business logic using compound, facet, filters, etc. But for advanced users (eng, analytics, etc), we provide the capability to allow them to write queries using Lucene syntax and get exactly what they want in ad hoc search. Lucene natively supports wildcard ?* and fuzzy search ~ in query string, and so does Elastic. Our customer finds it very useful, e.g. they can grab all datasets starting with sales by querying dataset:sales*. We certainly don’t want regression in this feature during our Atlas search migration. This is blocking us right now.", "username": "Yi_Wang" }, { "code": "", "text": "Ref: Lucene query syntax: Apache Lucene - Query Parser Syntax", "username": "Yi_Wang" }, { "code": "", "text": "I do not believe that we have support for what you are describing out of the box today. I’d encourage you to vote on this feedback item though so we are able to keep you updated on progress for the feature.", "username": "Elle_Shwer" } ]
Does queryString query support wildcard and fuzzy search?
2022-08-12T05:41:36.231Z
Does queryString query support wildcard and fuzzy search?
2,425
null
[ "node-js", "compass" ]
[ { "code": "", "text": "I setup my mongodb to my server. I can access the server and data from a uri and i save this uri at mongodb compass. Everything was working fine untill last night. I get “Connect ETIMEDOUT” error while connecting to database on server via mongodb compass and even though I made any changes.\nHow can i fix this? Please HELP", "username": "Ahmet_Gurel" }, { "code": "", "text": "I’m getting the same ETIMEOUT error. It was working fine at one time, now I haven’t been able to connect for weeks.", "username": "Joseph_Brown" }, { "code": "", "text": "Hi, @Ahmet_Gurel\nThe most probable cause is the strict IP access list on Atlas, and an IP change on your router.If you don’t have a static IP contract with your internet provider, then any reset on your router will get a new IP, by whether a purposed reset or power outage. In that case, if you have set restricted IP access on your cluster, then since your IP has changed, you will not be able to access your cluster@Joseph_Brown , Do you think this might be your case? please check your IP and cluster’s access list.", "username": "Yilmaz_Durmaz" }, { "code": "", "text": "That’s not the issue. I’ve asked a question at Getting ETIMEOUT error when attempting to connect and included the actual connection string.", "username": "Joseph_Brown" }, { "code": "", "text": "Yes, you were right. I wanted to access away from home internet. When I added the current IP as strict IP, I was connected it. Thanks.", "username": "Ahmet_Gurel" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Although I connected before, I can't connect now and I get the connect ETIMEDOUT error
2022-08-11T05:53:23.089Z
Although I connected before, I can&rsquo;t connect now and I get the connect ETIMEDOUT error
8,454
null
[]
[ { "code": "00001 JJ000", "text": "I have a string field which contains alphanumerics like 00001 J I need to enable search on this field so I am trying to use the full text index to optimize the searching but it will only allows me to search the string content. So If I try to search the field with J am getting the document but while trying to search with 000 am not getting that document. Is there any other way to achieve my goal?", "username": "Bhargavi_Kasi" }, { "code": "db.coll1.find({text:/000/i})\n[\n { _id: ObjectId(\"62f45b301bbceb4cf25ff2ab\"), text: '00001 J' },\n { _id: ObjectId(\"62f45d981bbceb4cf25ff2ac\"), text: '00001J' }\n]\n", "text": "Hi @Bhargavi_Kasi - Welcome to the community.Is there any other way to achieve my goal?Would the below example suit your use case?If so, please see the following documentation for more details.Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "", "username": "Jason_Tran" } ]
Text Index for numeric content of string field
2022-07-22T12:08:28.574Z
Text Index for numeric content of string field
926
null
[ "aggregation", "queries", "compass" ]
[ { "code": "[{\n $geoNear: {\n near: {\n type: 'Point',\n coordinates: [\n 5.092047,\n 51.984694\n ]\n },\n key: 'geo',\n spherical: true,\n distanceField: 'dist',\n query: {\n access: {\n $ne: 'quests_only'\n }\n }\n }\n}, {\n $match: {\n geo: {\n $geoWithin: {\n $geometry: {\n type: 'Polygon',\n coordinates: [\n [\n [\n 7.02324,\n 52.3444473\n ],\n [\n 6.5975198,\n 52.3712859\n ],\n [\n 6.4958963,\n 52.1763364\n ],\n [\n 6.9133767,\n 52.1544352\n ],\n [\n 7.02324,\n 52.3444473\n ]\n ]\n ]\n }\n }\n }\n }\n}, {\n $limit: 12\n}]\n", "text": "I’m struggling with the follow problem.I’m making an aggregation pipeline (testing in Compass) to find 10 places near a geolocation (geoNear).I also would like to know if those places fit within a polygon (geoWithin), however the result shouldn’t be limited by the fact that those places fit in that polygon.That’s the struggle I have.\nMy need:In the case nothing is within the polygon I still would like to get 12 results back.\nIn the case only 6 out of 12 are within the polygon, I would like to get 12 results back, the first 6 with is_in_polygon: true, the other 6 with is_in_polygon: falseI would like to do this in one pipeline.\nI was thinking in working with $facet, however $geoNear cannot be used within $facet, it always has to be the first stage in the pipeline.Is it possible to use $addField (or $set, $project) based on a $match (query) outcome?\nIs it possible to sort on that boolean field (so results with that specific field come first.Below my current pipeline.", "username": "Kasper_N_A" }, { "code": "", "text": "I don’t have a solution (at least not yet) but moving the “limit” stage just after “geoNear” will help much as you don’t need any more data than those 12 for the remaining stages.You may then try the “facet” after that and have 2 new lists that are “all” and “within the shape”.", "username": "Yilmaz_Durmaz" }, { "code": "[{\n $geoNear: {\n near: {\n type: 'Point',\n coordinates: [\n 5.092047,\n 51.984694\n ]\n },\n key: 'geo',\n spherical: true,\n distanceField: 'dist',\n query: {\n access: {\n $ne: 'quests_only'\n }\n }\n }\n}, {\n $facet: {\n inside: [\n {\n $match: {\n geo: {\n $geoWithin: {\n $geometry: {\n type: 'Polygon',\n coordinates: [\n [\n [\n 7.02324,\n 52.3444473\n ],\n [\n 6.5975198,\n 52.3712859\n ],\n [\n 6.4958963,\n 52.1763364\n ],\n [\n 6.9133767,\n 52.1544352\n ],\n [\n 7.02324,\n 52.3444473\n ]\n ]\n ]\n }\n }\n }\n }\n },\n {\n $limit: 12\n }\n ],\n outside: [\n {\n $match: {\n geo: {\n $not: {\n $geoWithin: {\n $geometry: {\n type: 'Polygon',\n coordinates: [\n [\n [\n 7.02324,\n 52.3444473\n ],\n [\n 6.5975198,\n 52.3712859\n ],\n [\n 6.4958963,\n 52.1763364\n ],\n [\n 6.9133767,\n 52.1544352\n ],\n [\n 7.02324,\n 52.3444473\n ]\n ]\n ]\n }\n }\n }\n }\n }\n },\n {\n $limit: 12\n }\n ]\n }\n}, {\n $project: {\n both: {\n $concatArrays: [\n '$inside',\n '$outside'\n ]\n }\n }\n}, {\n $unwind: {\n path: '$both'\n }\n}, {\n $replaceRoot: {\n newRoot: '$both'\n }\n}, {\n $limit: 14\n}]\n", "text": "I think I have a working solution. This topic in stackoverflow helped me: MongoDB: adding fields based on partial match query - expression vs query - Stack OverflowAlthough I’m wondering if I can use the size of one $facet array to define the length of the other $facet array.If there is a more optimized way to do it, please let me know. I would like to get at least 12 results back. It could be that inside has 0 or 1 result, hence I also put that 12 limit on the outside polygon.", "username": "Kasper_N_A" }, { "code": "", "text": "this seems pretty plausible but there are two things in this version of your query (let’s call v1 if we need later )1- you are using every single document in your facet stage. you just need 12 of them. that is why I told you to move the limit stage after the geoNear.2- you find inside/outside documents and later combine them together, but then you do not add any in/out indicator so it becomes the same result geoNear gives.", "username": "Yilmaz_Durmaz" }, { "code": "[{\n $geoNear: {\n near: {\n type: 'Point',\n coordinates: [\n 5.092047,\n 51.984694\n ]\n },\n key: 'geo',\n spherical: true,\n distanceField: 'dist',\n query: {\n access: {\n $ne: 'quests_only'\n }\n }\n }\n}, {\n $facet: {\n inside: [\n {\n $match: {\n geo: {\n $geoWithin: {\n $geometry: {\n type: 'Polygon',\n coordinates: [\n [\n [\n 7.02324,\n 52.3444473\n ],\n [\n 6.5975198,\n 52.3712859\n ],\n [\n 6.4958963,\n 52.1763364\n ],\n [\n 6.9133767,\n 52.1544352\n ],\n [\n 7.02324,\n 52.3444473\n ]\n ]\n ]\n }\n }\n }\n }\n },\n {\n $limit: 12\n },\n {\n $set: {\n inside: 1\n }\n }\n ],\n outside: [\n {\n $match: {\n geo: {\n $not: {\n $geoWithin: {\n $geometry: {\n type: 'Polygon',\n coordinates: [\n [\n [\n 7.02324,\n 52.3444473\n ],\n [\n 6.5975198,\n 52.3712859\n ],\n [\n 6.4958963,\n 52.1763364\n ],\n [\n 6.9133767,\n 52.1544352\n ],\n [\n 7.02324,\n 52.3444473\n ]\n ]\n ]\n }\n }\n }\n }\n }\n },\n {\n $limit: 12\n },\n {\n $set: {\n inside: 0\n }\n }\n ]\n }\n}, {\n $project: {\n both: {\n $concatArrays: [\n '$inside',\n '$outside'\n ]\n }\n }\n}, {\n $unwind: {\n path: '$both'\n }\n}, {\n $replaceRoot: {\n newRoot: '$both'\n }\n}, {\n $limit: 12\n}]\n", "text": "1- you are using every single document in your facet stage. you just need 12 of them. that is why I told you to move the limit stage after the geoNear.I tried this, however I just get the nearest results based on distance. In my scenario there are places that are further away that fit within the polygon. In polygon results have to appear first, however if I limit after geoNear I exclude them.I would love the limit, because I think it could make it more performant.2- you find inside/outside documents and later combine them together, but then you do not add any in/out indicator so it becomes the same result geoNear gives.Thanks for the idea, indeed I need to add an identifier.V2 with an addedField (used $set, alias of $addField):Is there a way to get all the documents that are not in the first facet array. Now I do a double geoWithin query (although the limit is 12 I don’t know if this has performance impacts).", "username": "Kasper_N_A" }, { "code": "", "text": "In the case nothing is within the polygon I still would like to get 12 results back.This was your description for why I was telling to use limit after geoNear Your v2 has no problem if you have changed mind. But still be aware that although you use limits in them facet stages will possibly use all documents in this shape. I am not sure if Mongodb optimizes these queries somehow.for combining the two facet results, check if “setUnion” operator does better than “concatArrays”. I haven’t used it for this kind of operation but seem promising.", "username": "Yilmaz_Durmaz" } ]
Aggregation pipeline geoNear, geoWithin question
2022-08-15T15:24:36.948Z
Aggregation pipeline geoNear, geoWithin question
2,486
null
[ "aggregation", "queries", "atlas-search" ]
[ { "code": "exports.userNameCitySearchAutocomplete = async function (req, res) {\n\n try {\n\n const { userNameCityQueryparam } = req.query;\n\n console.log(\"search query param\", userNameCityQueryparam);\n\n const agg = [\n\n {\n\n $search: {\n\n index: 'userNameCity',\n\n 'compound': {\n\n \n\n \"filter\": [{\n\n \"text\": {\n\n \"query\": [\"6271f2bb79cd80194c81f631\",\"62cf35ce62effdc429efa9b0\"],\n\n \"path\": \"_id\",\n\n }\n\n }],\n\n \"should\": [\n\n {\n\n //search on user name\n\n \n\n autocomplete: {\n\n query: userNameCityQueryparam,\n\n path: 'name',\n\n fuzzy: {\n\n maxEdits: 2,\n\n prefixLength: 3\n\n }\n\n }},\n\n //search on user city\n\n \n\n {\n\n autocomplete: {\n\n query: userNameCityQueryparam,\n\n path: 'city',\n\n fuzzy: {\n\n maxEdits: 2,\n\n prefixLength: 3\n\n }\n\n },\n\n }\n\n ,\n\n //search on user contact first name\n\n \n\n {\n\n autocomplete: {\n\n query: userNameCityQueryparam,\n\n path: 'contactfirstname',\n\n fuzzy: {\n\n maxEdits: 2,\n\n prefixLength: 3\n\n }\n\n },\n\n }\n\n ,\n\n //search on user contact last name\n\n \n\n {\n\n autocomplete: {\n\n query: userNameCityQueryparam,\n\n path: 'contactlastname',\n\n fuzzy: {\n\n maxEdits: 2,\n\n prefixLength: 3\n\n }\n\n },\n\n }\n\n \n\n ],\n\n \"minimumShouldMatch\": 1\n\n }\n\n }\n\n }\n\n ]\n\n const response = await User.aggregate(agg);\n\n return res.json(response);\n\n // res.send(response);\n\n } catch (error) {\n\n console.log(\"autocomplete search error\", error);\n\n return res.json([]);\n\n }\n\n};\n{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"_id\": {\n \"type\": \"string\"\n },\n \"city\": {\n \"type\": \"autocomplete\"\n },\n \"contactfirstname\": {\n \"type\": \"autocomplete\"\n },\n \"contactlastname\": {\n \"type\": \"autocomplete\"\n },\n \"name\": {\n \"type\": \"autocomplete\"\n }\n }\n }\n}\n", "text": "I want to search autocomplete on different fields but filtered by a different fieldIssue: am getting empty array when applied filter , if removed filter clause autocomplete search is working", "username": "Manoranjan_Bhol" }, { "code": "\"_id\"$match\"_id\"filter", "text": "Hi @Manoranjan_Bhol,Thanks for providing the aggregation pipeline and search index details.Are you able to provide a few sample documents as well as showing the expected output?I would like to see how the \"_id\" values just to verify some theories I have whilst also attempting to re-produce the same behaviour.In the mean time, if it suits your environment / use case(s), would using the pipeline without the filter whilst using a $match for the \"_id\" values work for you as a temporary workaround whilst trying to figure out what might be causing the issue you’re experiencing with filter usage? There’s an example of this here.Regards,\nJason", "username": "Jason_Tran" }, { "code": "compoundfilterautocompletedb.collection.find()\n[\n {\n _id: '62e9b5ee86a51031fba35a17',\n title: 'JavaScript beginner course',\n url: 'https://youtube.com/xyz'\n },\n {\n _id: '62e9b5ee86a51031fba35a18',\n title: 'Java beginner course',\n url: 'https://youtube.com/xyz0'\n },\n {\n _id: '62e9b5ee86a51031fba35a19',\n title: 'JavaScript',\n url: 'https://youtube.com/123'\n },\n {\n _id: '62e9b5ee86a51031fba35a20',\n title: 'beginner course - JavaScript',\n url: 'https://youtube.com/xyz123'\n }\n]\n\"_id\"{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"_id\": {\n \"type\": \"string\"\n },\n \"title\": {\n \"type\": \"autocomplete\"\n }\n }\n }\n}\ndb.collection.aggregate({\n '$search': {\n index: 'default',\n compound: {\n should: [ { autocomplete: { query: 'Java', path: 'title' } } ],\n filter: [\n {\n text: {\n query: [ '62e9b5ee86a51031fba35a17', '62e9b5ee86a51031fba35a18' ],\n path: '_id'\n }\n }\n ]\n }\n }\n})\n[\n {\n _id: '62e9b5ee86a51031fba35a17',\n title: 'JavaScript beginner course',\n url: 'https://youtube.com/xyz'\n },\n {\n _id: '62e9b5ee86a51031fba35a18',\n title: 'Java beginner course',\n url: 'https://youtube.com/xyz0'\n }\n]\n", "text": "Based off a quick test on my test environment, I was able to create a similar working search aggregation using the compound operator with use of filter and autocomplete.Sample documents from my test environment:Note: \"_id\" is of string typeSearch index details:Search aggregation operation and output:Hopefully this can help in some manner. However, if not, please provide the sample documents and expected output Regards,\nJason", "username": "Jason_Tran" }, { "code": " filter: [\n {\n text: {\n query: [ '62e9b5ee86a51031fba35a17', '62e9b5ee86a51031fba35a18' ],\n path: '_id'\n }\n }\n ]\n", "text": "Document Collection details:\n\nimage1831×1199 148 KB\nIndex details:\n\nimage1626×574 24.9 KB\n\n\nimage1551×1083 54.5 KB\n\n\nimage1533×861 65.9 KB\nThanks @Jason_Tran . I tried using your suggestion however still did not get any result\nScenario 1: Applied filter by _id—> did not get result i.e. got empty array\n\nimage1893×1507 217 KB\nScenario 2: Did not apply filter by _id—> got result i.e. got actual array\nimage1672×696 48.8 KB\n\n\nimage1888×714 80.7 KB\n", "username": "Manoranjan_Bhol" }, { "code": "\"_id\"\"_id\"\"_id\"$match$searchfilter$match$in", "text": "Thanks @Manoranjan_Bhol,I tried using your suggestion however still did not get any result\nScenario 1: Applied filter by _id—> did not get result i.e. got empty arrayI presumed based off the search index details you provided for \"_id\", that it was of string type. In my example, the \"_id\" is a string but based off your images it appears the \"_id\" is of ObjectId type.Would the $match stage suggestion work for you instead? I.e, perform the $search without the filter clause then perform a $match using $in for the object id’s (as there appears to be a few object ID’s you’re attempting to filter for).Can you provide me 3-5 sample documents as well as the expected / desired output from those 3-5 sample documents?Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "@Marcus\nPer your suggestion, posted this issue here\nWould you be able to help please", "username": "Manoranjan_Bhol" }, { "code": "", "text": "{\nindex: ‘userNameCity’,\ncompound:{\nshould:{equals: {\nvalue: ObjectId(“6271f2bb79cd80194c81f631”),\npath: ‘_id’\n},\nautocomplete: {\nquery: ‘tamp’,\npath: ‘name’\n}\n}}", "username": "Manoranjan_Bhol" }, { "code": "", "text": "@Jason_Tran\nI checked mongodb documentation.\nShould apply equal operator to filter objectId type\nHowever am not able to get the compound operator work\n{\nindex: ‘userNameCity’,\ncompound:{\nshould:{equals: {\nvalue: ObjectId(“6271f2bb79cd80194c81f631”),\npath: ‘_id’\n},\nautocomplete: {\nquery: ‘tamp’,\npath: ‘name’\n}\n}}", "username": "Manoranjan_Bhol" }, { "code": "\"_id\"\"string\"{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"_id\": {\n \"type\": \"string\"\n }\n...\nobjectId\"_id\"\"_id\"DB>db.collection.aggregate({\n '$search': {\n index: 'default',\n compound: {\n should: [ { autocomplete: { query: 'Java', path: 'title' } } ],\n filter: [\n {\n equals: {\n value: ObjectId(\"62eb060bbae0364f8a222e72\"),\n path: '_id'\n }\n }\n ]\n }\n }\n})\n/// <--- /// Nothing returned\nDB> \n\"_id\"DB>db.collection.aggregate({\n '$search': {\n index: 'default',\n compound: {\n should: [ { autocomplete: { query: 'Java', path: 'title' } } ],\n filter: [\n {\n equals: {\n value: ObjectId(\"62eb060bbae0364f8a222e72\"),\n path: '_id'\n }\n }\n ]\n }\n }\n})\n/// Output document:\n[\n {\n _id: ObjectId(\"62eb060bbae0364f8a222e72\"),\n title: 'JavaScript beginner course',\n url: 'https://youtube.com/xyz'\n }\n]\nDB>\n", "text": "Can you try changing your index definition, more specifically the \"_id\" data type? From your initial post (from the index definition), it is stated the “_id” is of data type \"string\":I believe this needs to be objectId. Please refer to my example below from my test environment (the 2 same aggregations but the index definition field mapping changed for \"_id\").Using search index definition where field mapping for \"_id\" is data type string:Using search index definition where field mappign for \"_id\" is data type objectId:However am not able to get the compound operator workIf you’re still having issues, please send the following:Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "@Jason_Tran , Thank you a ton . Finally your solution works. I was able to filter by single objectiId using your solution.Would you be able to guide me how to filter by array of objectId / multiple objectiId.The array will have the list of objectId dynamically from the frontend.Tried with following however does not work.\nfilter: [\n{\nequals: {\nvalue: [ObjectId(“62eb060bbae0364f8a222e72”),ObjectId(“62cf35ce62effdc429efa9b0”)]\npath: ‘_id’\n}", "username": "Manoranjan_Bhol" }, { "code": "$search$matchdb.collection.aggregate({\n '$search': {\n index: 'default',\n compound: {\n should: [ { autocomplete: { query: 'Java', path: 'title' } } ]\n }\n }\n},\n{\n '$match': {\n _id: {\n '$in': [ \n ObjectId(\"62eb060bbae0364f8a222e72\"),\n ObjectId(\"62eb060bbae0364f8a222e73\")\n ] /// <---- Array of Object ID's.\n }\n }\n})\n[\n {\n _id: ObjectId(\"62eb060bbae0364f8a222e72\"),\n title: 'JavaScript beginner course',\n url: 'https://youtube.com/xyz'\n },\n {\n _id: ObjectId(\"62eb060bbae0364f8a222e73\"),\n title: 'Java beginner course',\n url: 'https://youtube.com/xyz0'\n }\n]\n", "text": "Thanks for confirming the solution to the post. I can see you’ve raised another post about this where this is currently ongoing discussion. I will marked this post as close as the original question has been answered.In terms of filtering for the array of object ID’s, I have not tested on my own environment yet but one alternative, if it suits your use case, would be to run the $search without the object ID filter and then performing a $match stage after to filter on for an array of object ID’s.A quick example would be something like:This gives the following output:I’ve not tested this thoroughly but hopefully this helps.Regards,\nJason", "username": "Jason_Tran" }, { "code": "bjectId(\"62eb060bbae0364f8a222e72\"),\n ObjectId(\"62\n", "text": "@Jason_Tran\nThank you so much for the help", "username": "Manoranjan_Bhol" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Autocomplete with filter compound query
2022-07-29T05:13:08.765Z
Autocomplete with filter compound query
4,722
null
[ "dot-net" ]
[ { "code": " var mockCollection = new Mock<IMongoCollection<Dto>>();\n mockCollection\n .Setup(x => x.BulkWriteAsync(\n It.IsAny<List<WriteModel<HierarchyDto>>>(),\n It.IsAny<BulkWriteOptions>(),\n It.IsAny<CancellationToken>()))\n .Returns(Task.FromResult(response));\n", "text": "I am having trouble writing a unit test for a method that uses Mongo Driver BulkWriteAsync().\nThe return for BulkWriteAsync() is BulkWriteResult which is an abstract class and I am not able to mock it. So I create a concrete class call MockBulkWriteResult which inherits BulkWriteResult.Here is a sample code:\nvar response = new MockBulkWriteResult(5);Any suggestions?", "username": "Supriya_Bansal" }, { "code": " var returnBulkResponse =(BulkWriteResult<HierarchyDto>) new BulkWriteResult<HierarchyDto>.Acknowledged(200,0,0,0,\n 0,new List<WriteModel<HierarchyDto>>(), new List<BulkWriteUpsert>());\n\n _mongoCollectionMock.Setup(s\n => s.BulkWriteAsync(It.IsAny<IEnumerable<WriteModel<HierarchyDto>>>(), null,\n new CancellationToken()))\n .Returns(Task.FromResult(returnBulkResponse));\n", "text": "I am just pasting code which might help", "username": "Rabi_shankar_Sathua" }, { "code": "", "text": "Outstanding. Thanks very much, as I had hit a similar issue and was puzzling how to create the BulkWriteResult for the unit test.", "username": "Roy_Berger" } ]
Unit Test BulkWriteAsync using C#
2020-11-16T22:40:19.036Z
Unit Test BulkWriteAsync using C#
5,629
null
[ "swift" ]
[ { "code": "", "text": "How can I show an alert in an iOS SwiftUI app that the user does not have permissions to access a particular collection?", "username": "Manjinder_Sandhu" }, { "code": "", "text": "This question is a bit off topic as it has nothing to do with Realm and is really a coding question about SwiftUI.A good place to start leaning about SwiftUI is an introduction to SwiftUI tutorial - I suggest Ray Wenderlich’s site as their tutorials are very thorough.If you have a specific coding question about Realm, please include a short code example of what you’ve attempted so we can get a feel for what you’re attempting to do. Also include more details as well as if this is a local or sync’d environment and if it’s single or multi-user.", "username": "Jay" } ]
Show alert to users that they do not have permissions to access a collection
2022-08-14T13:28:45.571Z
Show alert to users that they do not have permissions to access a collection
1,352
null
[ "security" ]
[ { "code": "", "text": "Hi,As mongodb generate individual database keys using master key (project level KMS->master key per cluster->individual database key), how to mitigate the risk of all different DB (tenant) data on the same cluster may subject to breach if the master key is breached?", "username": "Jie_Long_15309" }, { "code": "", "text": "If you configured a project level KMS and if you detect that the master key in your KMS has been breached, as a best practice you should consider immediately rotating it using the procedure below.", "username": "Salman_Baset" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Encryption key at database level
2022-07-18T20:12:28.730Z
Encryption key at database level
2,310
null
[]
[ { "code": "", "text": "Hi, I’ve fairly recently started writing a MAUI app that is using SQLite with Azure SQL in the back end. Azure SQL was always very temporary to allow the API’s to be written and we will be replacing that with a MongoDB Atlas database any time now. I watched Luce Carter’s great presentation in the MAUI conference last week and realised that Device Sync using Realm is perfect for our application. Are there any write ups or tutorials on replacing SQLite or at least adding Realm and Device Sync to an existing MAUI app? There is currently very little SQLite in the app so I’m happy to manually convert that to Realm. I’m just not sure where to start with adding Realm and setting up Device Sync and the MongoDB database. I’ve got as far as adding the Nuget packages. Thank you.", "username": "John_Atkins" }, { "code": "", "text": "This is a great article by our own @papafe which goes over how to use Realm effectively in a Xamarin.Forms app. Considering MAUI is mostly an evolution of XF, a lot of the concepts in the article apply to MAUI apps.", "username": "nirinchev" }, { "code": "", "text": "Thank you. I’ll take a look.", "username": "John_Atkins" }, { "code": "", "text": "That was a very good and useful article. It shows that it should be relatively simple to change my app from SQLite to Realm because it is using the MVVM pattern and making use of DTO’s.\nUseful as this article is, it shows that my question could have been better and was in the wrong forum category. My initial problem is in setting up Realm Sync where I already have models in my app. I see the author of the article says there will be a follow-up showing how to use Realm Sync.\nPerhaps it is just a case of creating an app in App Services, getting the AppId for use in the app and then the initial sync will automatically create the required collections in MongoDB.", "username": "John_Atkins" }, { "code": "", "text": "Hey John,So glad to hear you enjoyed my talk and were inspired to move to Realm!I wasn’t able to talk through Sync and how to handle this kind of use case (it is very common) as much as I would have liked due to time.But my advice is to do as you suggested and setup Sync. Atlas will create documents and collections, if they don’t exist, the first time you add documents. If you leave Developer Mode on, it allows your client data to define the structure of your documents as documents are added. This is to support the ever changing world of prototyping. It will handle your existing models no problem.Once you are happy that the documents match the schema that you want because your models are finalised and you have some data in your Atlas collection, you can turn Developer Mode off. This requires a schema but Atlas can generate that for you based on the existing data in the collection so really easy!Once", "username": "Luce_Carter" }, { "code": "", "text": "Hi Luce, Thank you for the useful answer. I’ll do as you suggest and no doubt I’ll report back or have further questions. Special thanks though for your talk at the conference. Without that, I would not have known about Realm or Sync or how easily it can be used in a MAUI MVVM app. The company I work for are already starting to use MongoDB and it now looks like it is a perfect fit with mobile apps.\nJohn.", "username": "John_Atkins" }, { "code": "", "text": "Hi @Luce_Carter\nI’ve made a little progress with this but I seem to have something configured wrongly in App Services. I suspect it is to do with me choosing Flexible Sync rather than Partition Based. In fact, I get the same problem if I run your HouseMovingAssistant app (with my AppId) as when I run my own app. Yours, of course, was configured for Partition Based, and my App Services is set to Flexible.\nThe inner exception I get in both apps is\nWrong wire protocol, switch to the flexible sync wire protocol\nso that pretty much says it all! What I don’t know is how to change my app so it is using Flexible Sync. My code was copied from yours with the exception on the second line of this:config = new PartitionSyncConfiguration($\"{App.RealmApp.CurrentUser.Id}\", App.RealmApp.CurrentUser);\nrealm = await Realm.GetInstanceAsync(config);John.", "username": "John_Atkins" }, { "code": "FlexibleSyncConfigurationconfig = new FlexibleSyncConfiguration(App.RealmApp.CurrentUser);\nrealm = await Realm.GetInstanceAsync(config);\n\n// Add subscriptions and read/write data\n", "text": "Hey John, if you want to use flexible sync on the backend, you should open a Realm using FlexibleSyncConfiguration:Be sure to check out the docs for adding subscriptions.", "username": "nirinchev" }, { "code": "", "text": "Thank you Nikola. I got there a few minutes before your post!Sorry to have wasted your time with such a basic question. It is starting to make sense now. I will check out adding subscriptions.John.", "username": "John_Atkins" }, { "code": "", "text": "No worries, we’re here to help ", "username": "nirinchev" } ]
Convert SQLite MAUI app to Realm with Device Sync
2022-08-14T21:46:19.215Z
Convert SQLite MAUI app to Realm with Device Sync
3,243
null
[ "replication", "python", "mongodb-shell" ]
[ { "code": "", "text": "Within a replica set the primary node can read and write data and the secondaries can only read data from a client application. How can you set up which member to read data from (primary and secondary)?Let’s say a replica set has 3 nodes and on another node, there is the client application using python. How can the client application query data to the replicate set? So essentially, the client app is not a replica set member but needs to query data from it. How can this be achievable?How can you access DBs and the contents of multiple nodes from a single node? I know mongosh is one way, are there other options for this? Can the DBs and contents still be accessible if the node is not a member of the replicate set, let’s say when the member has an error and is disconnected from the replicate set? Can the disconnected member still be accessible remotely?", "username": "Master_Selcuk" }, { "code": "readPreferencesecondarysecodaryPreferred", "text": "Within a replica set the primary node can read and write data and the secondaries can only read data from a client application. How can you set up which member to read data from (primary and secondary)?You would set the readPreference to either secondary or secodaryPreferredLet’s say a replica set has 3 nodes and on another node, there is the client application using python. How can the client application query data to the replicate set?The application uses the MongoDB URI to connect to the MongoDB instance. The application does not have to be on the same machine, and in general it shouldn’t be so the app and database do not compete for the same resources (CPU, RAM, etc).The client app has nothing to do with the replicaset. It is just an interface to get the data. Your app would make the necessary calls to the database to get the data needed.Can the DBs and contents still be accessible if the node is not a member of the replicate set, let’s say when the member has an error and is disconnected from the replicate set? Can the disconnected member still be accessible remotely?You can connect to the disconnected node directly by using the machine’s name/IP address instead of the replica set connecting. However it this machine cannot communicate with the other members of the replica set, it might have old or missing data. Only members that can connect to a majority of the replica set members will be kept in sync with the primary. Once the disconnected member can communicate with the rest of the replica set it will start replicating the operations again to get back into sync, as long as the oplog has not rolled over older data that this node needs.It might be worth reading up on replication to better understand how it works.", "username": "Doug_Duncan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Replicate Set Questions
2022-08-15T14:02:42.128Z
Replicate Set Questions
1,148
null
[ "node-js" ]
[ { "code": "{\n company_id: Number, \n company_name : String,\n company_email : String,\n .....some other info \n}\n{\n job_id: Number, \n job_title : String,\n job_role : String,\n job_location : String,\n .....some other info \n}\n", "text": "Hello guys,I have two collection.1.Company schema2. Job SchemaI want to search a text in following fieldI have some regular expression and I want to search itHow can I do it ?", "username": "Praveen_Gupta" }, { "code": "", "text": "I’d recommend using Atlas Search.Here is a tutorial for how to search across multiple collections.If there collections live in different databases, here is a tutorial for cross-cluster search.", "username": "Elle_Shwer" } ]
Search regular expression in Mulitple Collection
2022-08-09T04:20:10.201Z
Search regular expression in Mulitple Collection
1,279
null
[ "queries", "rust" ]
[ { "code": "pub struct MongDbRepository<T> {\n pub url: String,\n pub collection: Collection<T>,\n}\n\nimpl<T> MongDbRepository<T> {\n pub fn new(url: String, collection: Collection<T>) -> Self {\n MongDbRepository { url, collection }\n }\n\n pub async fn insert_one(\n &self,\n to_insert: &T,\n ) -> Result<InsertOneResult, mongodb::error::Error>\n where\n T: Serialize,\n {\n let result = self.collection.insert_one(to_insert, None).await;\n\n result\n }\n\n pub async fn query_one(&self, id: &Uuid) -> Option<T>\n where\n T: DeserializeOwned + Unpin + Send + Sync,\n {\n let bson_id = mongodb::bson::uuid::Uuid::from_bytes(id.into_bytes());\n\n let query_result = self\n .collection\n .find_one(Some(doc! {\"_id\": bson_id}), None)\n .await\n .expect(\"Query result not found\");\n\n query_result\n }\n}\n\n#[derive(Deserialize, Serialize)]\npub struct NewsarticleDto {\n pub name: String, \n}\n\n#[derive(Serialize, Clone, Deserialize)]\npub struct Newsarticle {\n pub id: Uuid,\n pub name: String,\n}\n\nimpl Display for Newsarticle {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n write!(f, \"{} {}\", self.id, self.name)\n }\n}\n\nimpl From<NewsarticleDto> for Newsarticle {\n fn from(article: NewsarticleDto) -> Self {\n Newsarticle {\n id: Uuid::new_v4(),\n name: article.name,\n }\n }\n}\n", "text": "Hi,\nI am trying to connect my actix web server with my mondob database and tried to implement a find by Id and simple insert endpoint. The insertion works but it inserts 2 id fields (id and _id) is there any way to specify my UUID as the only id to be used for MongoDB when inserting. And how do I query by Id with this driver I tried a lot of things and nothing seems to work. Code for the repository functions below. Generic T should be replaced with NewsArticle for example. Thanks for the help!", "username": "Thomas_Mages" }, { "code": "idNewsarticle_id_id_idid_idquery_one_idid_id_idbson::uuid::UuidUuidfrom_uuid_0_8from_uuid_1", "text": "Hi @Thomas_Mages -is there any way to specify my UUID as the only id to be used for MongoDB when insertingTo accomplish this, you can just rename the id field in your Newsarticle struct to _id. If there is already a field named _id present in the document, the driver won’t add one. Currently the driver adds one for you because MongoDB requires there is some field named _id.Alternatively, if you need the field to be named id on your Rust type for some reason you could use the Serde rename attribute to specify that the field should be serialized/deserialized to/from BSON under the name _id.how do I query by Id with this driverI think the problem with your query_one code sample is that you are trying to filter on the _id field but using the UUID value that is stored under id, not _id. If you make the change I suggest above so that the UUID is stored under the name _id then I think your code should work as expected.I also want to point out that rather than round-tripping through bytes to create a bson::uuid::Uuid from a Uuid you could use one of the conversion helper functions provided by the driver: either from_uuid_0_8 if you are using v0.8 of the uuid crate, or from_uuid_1 if you are using v1 of the uuid crate. (Each of these will require turning on a corresponding feature, as noted in the linked docs.)", "username": "kmahar" } ]
Mongodb rust driver find one by id
2022-08-14T14:34:30.901Z
Mongodb rust driver find one by id
4,702
null
[ "mongodb-shell", "schema-validation" ]
[ { "code": "db.createCollection(\"post\", {\n validator: {\n $jsonSchema: {\n bsonType: \"object\",\n required: [\"title\", \"creator\"],\n properties: {\n title: {\n bsonType: \"string\",\n description: \"must be a string and is required\",\n },\n text: { bsonType: \"string\", description: \"must be a string\" },\n creator: {\n bsonType: \"objectId\",\n description: \"must be an object id and is required\",\n },\n comments: {\n bsonType: \"array\",\n description: \"must be an array\",\n items: {\n bsonType: \"object\",\n required: [\"text\", \"author\"],\n properties: {\n text: { bsonType: \"string\", description: \"must be a string\" },\n author: {\n bsonType: \"objectId\",\n description: \"must be an objectId and is required\",\n },\n },\n },\n },\n },\n },\n },\n});\n\n db.post.insertOne({title:\"My First Post\",text:\"Lorem ipsum dolor\",tags:[\"new\",\"tech\"],creator:ObjectId(\"62ebe7c819cfdcc24a7f5017\"),comments:[{author:ObjectId(\"62ebe7c819cfdcc24a7f5017\")}]})\nMongoServerError: Document failed validation\nAdditional information: {\n failingDocumentId: ObjectId(\"62ebf4fb19cfdcc24a7f501f\"),\n details: {\n operatorName: '$jsonSchema',\n schemaRulesNotSatisfied: [\n {\n operatorName: 'properties',\n propertiesNotSatisfied: [\n {\n propertyName: 'comments',\n description: 'must be an array',\n details: [ [Object] ]\n }\n ]\n }\n ]\n }\n}\n", "text": "hi I am testing out the schema validation for documents. I am using mongosh v1.5.4 in ubuntu 20.using the above I created a collection with validation. When I try to add a document with wrong structure it fails as expected but the details field in the error is not readablecan someone help me with this?", "username": "Siddharth_Badola" }, { "code": "mongosh100Infinityconfig.set('inspectDepth', 100)\n", "text": "@Siddharth_Badola by default, mongosh only expands objects 6 levels down. You can configure it to go deeper, e.g. 100 or even Infinity potentially:", "username": "Massimiliano_Marcon" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Details properties for schema validation in mongosh
2022-08-04T17:06:45.023Z
Details properties for schema validation in mongosh
2,848
null
[ "swift" ]
[ { "code": "/**\n This will crash with error\n Object cannot be inserted unless the schema is initialized. This can happen if you try to insert objects\n into a RLMArray / List from a default value or from an overriden unmanaged initializer (`init()`)\n**/\nlet listOfObjects = secondBacklinkObject?.dynamicList(\"linksToFirstObject\")\nlet copy = DynamicObject(value: objectToCopyCopy)\nlistOfObjects?.replace(index:indexOfChildWithMultipleBacklinks, object: copy)\n/**\n This will crash with error: Embedded objects cannot be created directly\n*/\nlet listOfObjects = secondBacklinkObject?.dynamicList(\"linksToFirstObject\")\nlet copy = migration.create(\"EmbeddedObject\", value: objectToCopy)\nlistOfObjects?.replace(index:indexOfChildWithMultipleBacklinks, object: copy)\n", "text": "I am in the process of migrating a few classes from using objects and relations to embedded objects. There is a lot of edge cases when converting doing this migration. I need to ensure that there is exactly one backlink for each object that will be converted otherwise the app will crash at startup.After a lot of work, I have gotten the first case to work. I can successfully delete all orphaned objects. Now I need to handle the second case. Whenever there is an object that has multiple backlinks I want to keep the first backlink and for all subsequent backlinks I want to create a new object and copy all values over to the new object, which brings me to the question.How can I create embedded objects during migration?Here are two approaches I have triedOption 1: Create an unattached object by value and replace the object in the second backlinkOption 2: Create a managed object and replace it in the second backlinkIs there an api for creating an embedded object during migration? If not, how can I prevent data loss in cases where there are multiple backlinks when converting “object” to embeddedobject? The example is in swift, but I will have the same question for Kotlin as well as the app is available on both platforms.", "username": "Simon_Persson" }, { "code": "", "text": "On Android there does seem to be an API for this in the DynamicRealm.createEmbedded. But this doesn’t seem to be available in the Swift SDK?", "username": "Simon_Persson" }, { "code": "", "text": "As there doesn’t seem to be a public API in Swift for creating embedded objects during migration, I created a feature request for it here Add API for creating embedded objects during migration · Issue #7508 · realm/realm-swift · GitHub.Without this feature available in Swift I am not sure how to proceed with the migration ", "username": "Simon_Persson" }, { "code": "", "text": "Anyone from realm side care to explain what the public API for this is supposed to be like? Still waiting for the bug fix for this since november 2021 ", "username": "Simon_Persson" }, { "code": "", "text": "Anyone? I did see 15 people click to the linked issue, so I assume I am not alone in having this problem.", "username": "Simon_Persson" } ]
How can I create embedded objects during migration?
2021-10-31T14:36:12.734Z
How can I create embedded objects during migration?
3,293
null
[]
[ { "code": "rs.add(HOST_NAME:PORT NUMBER)HOST NAME ", "text": "How can you setup a Replicate Set where the members are in different networks? Since the rs.add(HOST_NAME:PORT NUMBER) command to add members will not work since the HOST NAME cannot be recognized because the network is not private but a public one.", "username": "Master_Selcuk" }, { "code": "", "text": "If the members are reachable, and they must be, despite being in different network, you may add them to your replica set.But the question is how do you reach them, right now, if HOST_NAME is not recognized?If you do it with IP address, then use the IP address in your rs.add().How come HOST_NAME is not recognized if the network is public?Note that your private network hosting one or more members also needs to be reachable from the public network that is hosting the other members.The rule is simple:All members of a replica set must be reachable from all members of the replica set and from all clients using the same host names/IP.", "username": "steevej" }, { "code": "rs.add()dbpathdbpathdbpath", "text": "Thank you got it, so I can just use the public IP address to identify and add a member in another network with the rs.add() command. May I also ask another question, for the command:mongod --port “PORT” --dbpath “YOUR_DB_DATA_PATH” --replSet “REPLICA_SET_INSTANCE_NAME”The dbpath is where all of the DBs are stored right, so if I am setting up the primary node for example it would write and read the data to the specified DBs from the dbpath and then would forward the data to the Secondary nodes for replication. In the Secondary nodes, the replicated data forwarded from the Primary node will be stored in the dbpath as well a reading of the dataset will be made from it by the client application.", "username": "Master_Selcuk" }, { "code": "", "text": "SeeA must to take isDiscover our MongoDB Database Management courses and begin improving your CV with MongoDB certificates. Start training with MongoDB University for free today.", "username": "steevej" }, { "code": "", "text": "Thanks for the recommendations.", "username": "Master_Selcuk" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Setting up a Replicate Set on different networks
2022-08-11T11:21:26.731Z
Setting up a Replicate Set on different networks
1,574
null
[]
[ { "code": "27017replSet mongodrs.add(HOST_NAME:PORT NUMBER)HOST NAME ", "text": "When setting up the replication the command:mongod --port “PORT” --dbpath “YOUR_DB_DATA_PATH” --replSet \"REPLICA_SET_INSTANCE_NAMEis issued. Questions relating to the command:How can you setup a Replicate Set where the members are in different networks? Since the rs.add(HOST_NAME:PORT NUMBER) command to add members will not work since the HOST NAME cannot be recognized because the network is not private but a public one.", "username": "Master_Selcuk" }, { "code": "local27017replSet mongod", "text": "Hi @Master_Selcuk ,Replica sets are designed for high availability and. failover, so each replica set member includes the same set of data (with the exception of the local system database, which is used for instance-specific data like the replication oplog). In a failover/election scenario, you want to have multiple members of a replica set eligible to become primary so there is some fault tolerance allowing some members of your replica set to be temporarily unavailable while still maintaining a primary.If you only want to copy a subset of collections or databases, you will have to work out a separate solution from the built-in replication. For example, you could have multiple MongoDB deployments and Back up & Restore selected databases with MongoDB tools. You could also implement custom sync in application code using Change Streams to follow and relay data changes of interest.Besides Port number 27017 what other ports can be used to create the replSet mongod instance. What ports can be used fo Replication ?You can use any available port on your local server:Ports 1024-49151 (“registered ports”) are used for applications. Popular applications often have registered standard ports to avoid conflicts for default installations. The default MongoDB ports are 27017-27020. You generally want to avoid using a port commonly used by another service (for example, MySQL’s default port is 3306) but the main technical limitation is that you can’t have two different processes listening to the same IP:port combination.Ports 0-1023 (“welll-known ports”) are usually used for system services (DNS, SSH, LDAP, …) and typically require superuser access (so are not recommended for applications like MongoDB).Ports 49152-65535 are typically used for dynamic or ephemeral ports.Your replica set members will need to be able to communicate with each other via the configured IP:port combinations, so you will have to set up appropriate firewall and networking configuration. The MongoDB Security Checklist has more information on recommended security measures.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Thanks for the detailed answers, to further iterate the first question all the DBs available will be replicated to the Secondary nodes is my understanding.", "username": "Master_Selcuk" }, { "code": "", "text": "to further iterate the first question all the DBs available will be replicated to the Secondary nodes is my understanding.Hi @Master_Selcuk,Yes, replication has two general stages:Initial Sync: a newly added secondary copies all of the data from another memberReplication: ongoing continuous replication after initial sync. Secondaries either pull their updates from the current primary or from another more up-to-date secondary that has less network latency.For more details please review Replica Set Data Synchronization.If you want a deeper dive into MongoDB administration I also recommend the free online courses in the MongoDB University Learning Path for DBAs.M103: Basic Cluster Administration has some useful background and exercises for different on-premises deployment types (standalone, replica set, sharded cluster).Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "thank you I will check the courses out.", "username": "Master_Selcuk" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Setting up Replication
2022-08-10T20:06:57.796Z
Setting up Replication
1,422
null
[]
[ { "code": "", "text": "Hi, I found it that need to stop the mongod service first and then start the service instead restart the service at once. is this a normal or am i doing something wrong?", "username": "Rajitha_Hewabandula" }, { "code": "", "text": "that is normal for mongod. it won’t ever start if an error is present in settings, and will just work unless something unexpected happens. you can also have multiple instances running for different purposes, so a single stop/restart command would just not work.for that reason, you need to login individually to each instance and invoke “shutdown” from “admin” database if you need to stop the server. or you may just kill the process if it is just a test server.", "username": "Yilmaz_Durmaz" }, { "code": "mongodsudo service mongod start\nsudo service mongod restart\nmongodmongod", "text": "Hi @Rajitha_Hewabandula ,You should be able to restart the service as long as you started the mongod process using a service wrapper. For example, on Linux:If you are having difficulties restarting, please provide some more details:Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "you can also have multiple instances running for different purposes, so a single stop/restart command would just not work.You could start/stop/restart multiple instances individually with systemctl if you create the appropriate systemd unit files.\nYou could even start/stop/restart multiple instances all at once with an appropriate systemd unit file.you may just kill the process if it is just a test serverMake sure you use the appropriate signal when you kill the process. Some files, like /tmp/mongodb-27017.sock and some lock files in the db directory, might remains if the wrong signal is used with kill. And you might not be able to restart. The appropriate signal SIGTERM.", "username": "steevej" }, { "code": "", "text": "@steevej @Yilmaz_Durmaz Thanks for the prompt reply. its totally fine with the production server. this was happened in the VM machine. ", "username": "Rajitha_Hewabandula" } ]
Mongod service restart
2022-08-09T18:00:00.985Z
Mongod service restart
23,201
null
[ "node-js", "mongoose-odm", "compass", "connecting", "atlas-cluster" ]
[ { "code": "", "text": "MongoDB Atlas was working fine at one time. Now I have not been able to connect for a time. I’ve looked at other solutions here on this forum as well as others including Stackoverflow.I’m getting the aforementioned error using the application connection string, the MongoDB Compass app connection string as well as the MongoDB shell connection string.\nHere are the things that I’ve tried:Here is my software list:\nnodejs: v16.13.0\nexpress: v4.18.1\nmongoose: v6.5.2I’m placing my MongoDB Compass connection string here. I invite you to use it to try and connect. There is no sensitive data here so it’s not an issue to me…I just want to find out what preventing a connection.\nI did read something about lowering the node version to fix it. If that’s the case, I’m willing to do that but what should it be lowered to?We are planning a major rollout of a new application involving millions of potential hits per day, but if I can’t connect, I might as use another solution besides MongoDB.MongoDB Compass connection string:\nmongodb+srv://brohjoe1:[email protected]/auth (password masked…confirmed to be working by Yilmaz_Durmaz but I still can’t connect.", "username": "Joseph_Brown" }, { "code": "", "text": "you may hide the connection string because it is working from my computer right now.Since you say it was working before, I suspect your router/vpn/proxy restricts you for some reason, or your DNS settings have a problem and cannot resolve the name of the server.I don’t think it is a Node.js or library version. Have you had any upgrades when this problem started?By the way, it would be better to see the full error log. can you provide it?", "username": "Yilmaz_Durmaz" }, { "code": "", "text": "In addition to my above post, do you try connecting from your home/work pc, or your app is on a cloud-hosted pc?\nIf you use a work network, it is possible the network admins have put some port restrictions on the network at that time. in that case, have you contacted them?\nWhat else can you say about your project’s network structure?", "username": "Yilmaz_Durmaz" }, { "code": "", "text": "by the way, on your connection string, add something like this: “confirmed to be working, masked the password”", "username": "Yilmaz_Durmaz" }, { "code": "", "text": "I’m working from home using an Xfinity router. What do I need to do about the DNS settings? Here is the full errror log:App is listening at http://localhost:3001\nC:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongoose\\lib\\connection.js:824\nconst serverSelectionError = new ServerSelectionError();\n^MongooseServerSelectionError: Could not connect to any servers in your MongoDB Atlas cluster. One common reason is that you’re trying to access the database from an\nIP that isn’t whitelisted. Make sure your current IP address is on your Atlas cluster’s IP whitelist: https://docs.atlas.mongodb.com/security-whitelist/\nat NativeConnection.Connection.openUri (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongoose\\lib\\connection.js:824:32)\nat C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongoose\\lib\\index.js:380:10\nat C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongoose\\lib\\helpers\\promiseOrCallback.js:41:5\nat new Promise ()\nat promiseOrCallback (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongoose\\lib\\helpers\\promiseOrCallback.js:40:10)\nat Mongoose._promiseOrCallback (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongoose\\lib\\index.js:1225:10)\nat Mongoose.connect (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongoose\\lib\\index.js:379:20)\nat Object. (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\app.js:11:10)\nat Module._compile (node:internal/modules/cjs/loader:1101:14)\nat Object.Module._extensions…js (node:internal/modules/cjs/loader:1153:10) {\nreason: TopologyDescription {\ntype: ‘ReplicaSetNoPrimary’,\nservers: Map(3) {\n‘ac-v2tectr-shard-00-01.4ross7o.mongodb.net:27017’ => ServerDescription {\n_hostAddress: HostAddress {\nisIPv6: false,\nhost: ‘ac-v2tectr-shard-00-01.4ross7o.mongodb.net’,\nport: 27017\n},\naddress: ‘ac-v2tectr-shard-00-01.4ross7o.mongodb.net:27017’,\ntype: ‘Unknown’,\nhosts: ,\npassives: ,\narbiters: ,\ntags: {},\nminWireVersion: 0,\nmaxWireVersion: 0,\nroundTripTime: -1,\nlastUpdateTime: 1097611983,\nlastWriteDate: 0,\nerror: MongoNetworkError: connect ETIMEDOUT 35.239.246.213:27017\nat connectionFailureError (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongodb\\lib\\cmap\\connect.js:379:20)\nat TLSSocket. (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongodb\\lib\\cmap\\connect.js:302:22)\nat Object.onceWrapper (node:events:510:26)\nat TLSSocket.emit (node:events:390:28)\nat emitErrorNT (node:internal/streams/destroy:157:8)\nat emitErrorCloseNT (node:internal/streams/destroy:122:3)\nat processTicksAndRejections (node:internal/process/task_queues:83:21) {\n[Symbol(errorLabels)]: Set(0) {}\n}\n},\n‘ac-v2tectr-shard-00-02.4ross7o.mongodb.net:27017’ => ServerDescription {\n_hostAddress: HostAddress {\nisIPv6: false,\nhost: ‘ac-v2tectr-shard-00-02.4ross7o.mongodb.net’,\nport: 27017\n},\naddress: ‘ac-v2tectr-shard-00-02.4ross7o.mongodb.net:27017’,\ntype: ‘Unknown’,\nhosts: ,\npassives: ,\narbiters: ,\ntags: {},\nminWireVersion: 0,\nmaxWireVersion: 0,\nroundTripTime: -1,\nlastUpdateTime: 1097611977,\nlastWriteDate: 0,\nerror: MongoNetworkError: connect ETIMEDOUT 35.232.109.33:27017\nat connectionFailureError (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongodb\\lib\\cmap\\connect.js:379:20)\nat TLSSocket. (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongodb\\lib\\cmap\\connect.js:302:22)\nat Object.onceWrapper (node:events:510:26)\nat TLSSocket.emit (node:events:390:28)\nat emitErrorNT (node:internal/streams/destroy:157:8)\nat emitErrorCloseNT (node:internal/streams/destroy:122:3)\nat processTicksAndRejections (node:internal/process/task_queues:83:21) {\n[Symbol(errorLabels)]: Set(0) {}\n}\n},\n‘ac-v2tectr-shard-00-00.4ross7o.mongodb.net:27017’ => ServerDescription {\n_hostAddress: HostAddress {\nisIPv6: false,\nhost: ‘ac-v2tectr-shard-00-00.4ross7o.mongodb.net’,\nport: 27017\n},\naddress: ‘ac-v2tectr-shard-00-00.4ross7o.mongodb.net:27017’,\ntype: ‘Unknown’,\nhosts: ,\npassives: ,\narbiters: ,\ntags: {},\nminWireVersion: 0,\nmaxWireVersion: 0,\nroundTripTime: -1,\nlastUpdateTime: 1097612045,\nlastWriteDate: 0,\nerror: MongoNetworkError: connect ETIMEDOUT 35.238.72.187:27017\nat connectionFailureError (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongodb\\lib\\cmap\\connect.js:379:20)\nat TLSSocket. (C:\\Users\\brohj\\WebstormProjects\\Firebase\\redEyeMedia\\node_modules\\mongodb\\lib\\cmap\\connect.js:302:22)\nat Object.onceWrapper (node:events:510:26)\nat TLSSocket.emit (node:events:390:28)\nat emitErrorNT (node:internal/streams/destroy:157:8)\nat emitErrorCloseNT (node:internal/streams/destroy:122:3)\nat processTicksAndRejections (node:internal/process/task_queues:83:21) {\n[Symbol(errorLabels)]: Set(0) {}\n}\n}\n},\nstale: false,\ncompatible: true,\nheartbeatFrequencyMS: 10000,\nlocalThresholdMS: 15,\nsetName: ‘atlas-bt7sor-shard-0’,\nlogicalSessionTimeoutMinutes: undefined\n},\ncode: undefined\n}", "username": "Joseph_Brown" }, { "code": "telnet ac-v2tectr-shard-00-00.4ross7o.mongodb.net 27017\ntelnet 35.238.72.187 27017\nmongo 35.238.72.187:27017\nmongo ac-v2tectr-shard-00-00.4ross7o.mongodb.net:27017\n", "text": "do you have any telnet program installed? I hope you do because the next commands will tell you if it is related to DNS or not, while also testing your connection to your cluster.From your error log, the following address and IP are associated. telnet is used if the target MongoDB server can be connected. Check if you get any errors. an error from only first means a DNS problem. if both errors out that might be a bit complicated (or easy, couldn’t anticipate for now) as any broken firewall, antivirus program, router itself, network adapter settings can cause issues. (I keep nodejs and driver problems as the last scenario as your app was working)If both connects, then first try these two directly to connect to this server of the cluster. and then try the same but by adding your username and password. if you fail, that might be a messy IP access list, so If you come to this step I would advise to clear and redefine all security setting.please report your findings about these commands.by the way, it is late night here. if no one else shows up to help further I will be back tomorrow.", "username": "Yilmaz_Durmaz" }, { "code": "", "text": "I changed the DNS to Google’s IPv4 address: 8.8.8.8 and alternate 8.8.4.4. Still getting the same errors. I’m working your telnet solution next.", "username": "Joseph_Brown" }, { "code": "", "text": "I used Putty to connect. Got timed out on both the host address and port and the IP address and port. By the way, I used Google’s DNS’s and my original settings (Obtain DNS server address automatically). I added Telnet to Windows programs and got ‘Could not open connection to the host, on port 27017: Connect failed.’ For the IP address, Windows telnet said, ‘Could not open connection to the host, on port 35.238.72.187 27017: Connect Failed.’", "username": "Joseph_Brown" }, { "code": "", "text": "Could not sleep yet, so back here I have just noticed I took the IP address from your error log, which means your DNS is working correctly and associates your server address to its IP.Then, the telnet giving a connection error eliminates the possibility of app and library problems. with putty, you should get either “connection closed” or window just closing without error. (set connection type to telnet, and “close window on exit” to “never” on the bottom).Your firewall or virus protection may somehow have been set to block communication toward Atlas. Try disabling them for a while and test again.Also, check if you have set some proxy or VPN and forgot you did so, and they might be blocking your access.", "username": "Yilmaz_Durmaz" }, { "code": "plink.exeplink.exe -telnet ac-v2tectr-shard-00-00.4ross7o.mongodb.net -P 27017 -vLooking up host \"ac-v2tectr-shard-00-00.4ross7o.mongodb.net\" for Telnet connection\nConnecting to 35.238.72.187 port 27017\nclient negotiation: WILL NAWS\nclient negotiation: WILL TSPEED\nclient negotiation: WILL TTYPE\nclient negotiation: WILL NEW_ENVIRON\nclient negotiation: DO ECHO\nclient negotiation: WILL SGA\nclient negotiation: DO SGA\nConnected to 35.238.72.187\nFailed to connect to 35.238.72.187: Network error: Connection timed out\nNetwork error: Connection timed out\nFATAL ERROR: Network error: Connection timed out\n", "text": "there is plink.exe on putty’s page. you can have plink.exe -telnet ac-v2tectr-shard-00-00.4ross7o.mongodb.net -P 27017 -v command and the output should be the following (it will just exit proving connection):this log one is from my pc showing Atlas is fine along with my connection to it.In your case, this should end with the following until you find the culprit:", "username": "Yilmaz_Durmaz" }, { "code": "", "text": "I finally connected. I used my cell phone to tether to my computer and I was able to connect. That let me know that the problem was with my ISP. I have to ‘port forward’ port 27017 on my gateway. Thanks so much for helping me.", "username": "Joseph_Brown" }, { "code": "", "text": "I don’t know how your service provider or router works, but the connection towards Atlas should work without port forwarding, which you were already having some time ago.It is good you have at least a connection now, but I recommend you investigate some more into the issue as you may experience worse problems in the future.", "username": "Yilmaz_Durmaz" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Getting ETIMEOUT error when attempting to connect
2022-08-14T21:10:05.041Z
Getting ETIMEOUT error when attempting to connect
19,349
null
[ "aggregation" ]
[ { "code": "{\n \"_id\": {\n \"$oid\": \"61416551dc536c566dd17cb0\"\n },\n \"name\": \"Store A\",\n \"number\": 345,\n \"location\": {\n \"type\": \"Point\",\n \"coordinates\": [\n -87.05365159,\n 42.67955354\n ]\n }\n}\n{\n \"_id\": {\n \"$oid\": \"61416551dc536c566dd17cb1\"\n },\n \"name\": \"Table\",\n \"storeNumber\": 345\n}\n", "text": "Say I have two collections: “stores” and “products”.Example store document:Example product documentMy goal involves two steps:I understand how to implement these steps using separate pipelines/queries, but I am hoping to complete both of these steps in one trip to the database.What are some possible approaches?", "username": "Suray_T" }, { "code": "$geoNear$lookup$lookup$geoNeardb.stores.aggregate([ \n{\n '$geoNear': {\n near: { type: 'Point', coordinates: [ -87.05, 42.67 ] },\n distanceField: 'distance'\n }\n},\n{\n '$lookup': {\n from: 'products',\n localField: 'number',\n foreignField: 'storeNumber',\n as: 'storeItems'\n }\n}\n])\n[\n {\n _id: ObjectId(\"62f9d1fbab5d901302a457e4\"),\n name: 'Store A',\n number: 345,\n location: { type: 'Point', coordinates: [ -87.05365159, 42.67955354 ] },\n distance: 1104.6830501177644,\n storeItems: [\n {\n _id: ObjectId(\"62f9d1afab5d901302a457e1\"),\n name: 'Table',\n storeNumber: 345\n },\n {\n _id: ObjectId(\"62f9d1b5ab5d901302a457e2\"),\n name: 'Chair',\n storeNumber: 345\n },\n {\n _id: ObjectId(\"62f9d1c5ab5d901302a457e3\"),\n name: 'Cushions',\n storeNumber: 345\n }\n ]\n }\n]\n", "text": "Hi @Suray_T,I understand how to implement these steps using separate pipelines/queries, but I am hoping to complete both of these steps in one trip to the database.\nMy goal involves two steps:Have you attempted using $geoNear with a $lookup in a pipeline?What are some possible approaches?I have a simple example below which uses data from my test environment below which uses the $lookup stage after $geoNear. You may need to alter this accordingly and test thoroughly to determine if it suits your use case / requirements:Output:If this is not what you are after or are still having troubles, please advise:Regards,\nJason", "username": "Jason_Tran" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Querying with results from $geoNear
2022-08-09T23:02:59.927Z
Querying with results from $geoNear
990
null
[]
[ { "code": "", "text": "Hi,I’m unable to download x509 certificates through the Atlas Access Management Interface for either a new user or an existing one. A new tab opens with the following error:{ status: 404,\nmessage: “Not Found” }I tried on 2 different projects with he same result.Is someone else experiencing this issue?Thanks in advance", "username": "Alexandre_Leite" }, { "code": "", "text": "Same here. Can anyone look into this issue? It would be very much appreciated, as my old cert is invalid now.", "username": "Anton_Wissmer" }, { "code": "atlas dbusers certs create --projectId <projectID> --username <username>\natlas dbusers certs create", "text": "Hi,Thanks for reporting this.Our MongoDB Atlas Engineering team has been made aware of this issue, and is currently working on the fix. Once the fix has been deployed and confirmed working I will update this thread.As a workaround until this is fixed you can use the Atlas CLI to create new users, create certs for users, etc.For example, once the Atlas CLI is installed and a connection to Atlas has been made, a certificate can be created usingFor additional info have a look at the atlas dbusers certs create documentation.Thanks and regards,\nMarkus", "username": "Markus_Thielsch" }, { "code": "", "text": "Thank you for your reply.I managed to download the certificate with the help of atlas support.Thanks", "username": "Alexandre_Leite" }, { "code": "", "text": "Thank you, good sir, it worked for me as well. Very much appreciated.", "username": "Anton_Wissmer" }, { "code": "", "text": "Hi all,Our MongoDB Atlas Engineering team has deployed and verified a fix for the creation of certificates using the Atlas UI.Thanks and regards,\nMarkus", "username": "Markus_Thielsch" }, { "code": "", "text": "I agree with you. Would anyone be able to investigate this problem? Considering my former certificate is no longer valid. I’d really appreciate it.", "username": "Usman_581_N_A" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Cannot download x509 certificate
2022-08-08T11:51:43.618Z
Cannot download x509 certificate
2,010
null
[ "data-modeling" ]
[ { "code": "", "text": "Hello,I’d like to create a good schema that allows a fast change of the (sort) order - the client should be able to add, remove and rearrange the order of the document(s) within a group (perhaps with a group identifier or set).Also every single document can get often called directly by it’s ID - and only this data is needed, not the complete group.It’s recommend to return the current order index with each single document.Using a key {…, order: 1} for every document in the group results in updating every single document with a new query (I am new to mongoDB - please correct me if I am wrong) to change and/or increment the order value.Or using a set [‘id1’, ‘id2’, …] or just inserting the whole document (they aren’t big - this post has nearly the same size ;-)) in this set.Not sure what’s the best solution and how to update this data.\nAlso I think I will have limit the group size for example to 999 items. But this is open and will depend on the performance.Thanks for your help!", "username": "Xairoo" }, { "code": "", "text": "I also want to know how to implement what @Xairoo is looking after. Can any humble person help me out as well please?", "username": "Venkatesh_Nadar" }, { "code": "", "text": "Hi @Venkatesh_Nadar,Can you provide some examples and details on what the current use case is? Please also elaborate on any examples provided with regards to schema. Additionally, please provide as much information on the use case as possible. For example - MongoDB version, requirements, etc.In saying so, some of the following pages may be of use regarding schemas:Regards,\nJason", "username": "Jason_Tran" } ]
Schema for a changeable ordering
2021-04-19T10:37:57.250Z
Schema for a changeable ordering
2,963
https://www.mongodb.com/…654cf07b0d86.png
[ "atlas-functions" ]
[ { "code": "use dt3_api\ndb[\"LCP Production\"].find({dt_name:\"kaplan\"}).collation({locale: 'en_US', strength: 1})\nexports= function (payload) {\n \n // const query={\"dt_name\":(payload.query.testparam)};\n const mongodb=context.services.get(\"mongodb-atlas\");\n const mycollection = mongodb.db(\"dt3_api\").collection(\"LCP Production\");\n \n return mycollection.find({dt_name:\"kaplan\"}).collation({locale: 'en_US', strength: 1})\n\n}; \n", "text": "In the Mongo shell this works correctly to create a case-insensitive search of a database for which I have created a case-insensitive index:I am trying to do the same thing as a Realm function as follows:But when I run this I get this error:I am using Mongo 4.4.8 so I believe the collation function should work (as indeed it works in the shell).Is there some alternate way to run a search on a collated index as a Realm function?", "username": "Richard_Kaplan" }, { "code": "", "text": "Later - does this mean there is no way to use a collated index with a Realm function?", "username": "Richard_Kaplan" }, { "code": " return mycollection.find ( {$text: { $search: payload.query.testparam, $caseSensitive:false}})", "text": "A workaround seems to be too use a text index search -this does work - however it searches globally in all fields which have a text index. Is there a way to restrict this to only specific fields? return mycollection.find ( {$text: { $search: payload.query.testparam, $caseSensitive:false}})", "username": "Richard_Kaplan" }, { "code": "{\n '$addFields': {\n 'sort_title': {\n '$toLower': '$title_data'\n }\n }\n},\n{\n '$sort': {\n 'sort_title': 1\n }\n}\n", "text": "i had this same error with collation except i was trying to use aggregate to sort text alphabetically (mongodb 4.4)\nmy remedy:use $addFields to add a new lowercase field of the target field’s text, then sort by the new lowercase field", "username": "Inspecta_Tech" }, { "code": "", "text": "I was surprise not to find a solution here. It’s so weird that it works on mongodb shell but not in Realm.They need to fix thisAlso, I tried @Richard_Kaplan solution and it works great if I write findOne instead of find (which doesn’t work)", "username": "Timothee_Wright" } ]
Collation is not a function
2021-08-22T16:11:17.042Z
Collation is not a function
4,435
null
[ "aggregation", "queries", "atlas-cluster", "kafka-connector" ]
[ { "code": "connect | [2022-07-27 17:51:46,250] ERROR Failed to put into the sink some records, see log entries below for the details (com.mongodb.kafka.connect.sink.MongoSinkTask)\nconnect | com.mongodb.MongoBulkWriteException: Bulk write operation error on server cluster0-shard-00-02.ebt7p.mongodb.net:27017. Write errors: [BulkWriteError{index=0, code=2, message='unknown operator: $oid', details={}}]. \n{\"name\": \"mongo-ts-sink\",\n \"config\": {\n \"connector.class\":\"com.mongodb.kafka.connect.MongoSinkConnector\",\n \"tasks.max\":\"1\",\n \"topics\": \"ChatData.socketIo-MongoDb.chat\",\n \"connection.uri\":\"\",\n \"database\":\"socketIo-MongoDb\",\n \"collection\":\"chatfeed\",\n \"key.converter\":\"org.apache.kafka.connect.json.JsonConverter\",\n \"key.converter.schemas.enable\":false,\n \"value.converter\":\"org.apache.kafka.connect.json.JsonConverter\",\n \"value.converter.schemas.enable\":false,\n \"publish.full.document.only\": true\n } \n}\ndocker exec -ti redpanda rpk topic consume ChatData.socketIo-MongoDb.chat\n\n{\n \"topic\": \"ChatData.socketIo-MongoDb.chat\",\n \"key\": \"{\\\"_id\\\": {\\\"_data\\\": \\\"8262E17ECA000000092B022C0100296E5A1004FC2D06E10EF64CA2967AEFB29F6E510B46645F6964006462E17ECA1F262FEED5EFB6520004\\\"}}\",\n \"value\": \"{\\\"_id\\\": {\\\"$oid\\\": \\\"62e17eca1f262feed5efb652\\\"}, \\\"name\\\": \\\"John Johnson\\\", \\\"message\\\": \\\"Bonjour\\\"}\",\n \"timestamp\": 1658945232072,\n \"partition\": 2,\n \"offset\": 0\n}\nAtlas atlas-7j1r6x-shard-0 [primary] socketIo-MongoDb> db.chatfeed.insertOne( { 'test' : 1 } )\n", "text": "Hi All,I am just trying to get data from mongodb atlas collection via mongodb-kafka-source-connector to redpanda then back to a different collection in mongodb atlas via mongodb-kafka-sink-connector.I managing to consume the data but then unable to send to mongodb atlas - getting exception —>Although when i run– > works fineThank you for your help.", "username": "Onesmus_Nyakotyo" }, { "code": "\"key.converter\": \"org.apache.kafka.connect.storage.StringConverter\",\"value.converter\": \"org.apache.kafka.connect.storage.StringConverter\",", "text": "try changing the key and value converter to\"key.converter\": \"org.apache.kafka.connect.storage.StringConverter\",and\"value.converter\": \"org.apache.kafka.connect.storage.StringConverter\",", "username": "Robert_Walters" }, { "code": "const express = require('express')\nconst app = express()\nconst { MongoClient } = require(\"mongodb\");\n// const { Kafka, KafkaJSBrokerNotFound, CompressionTypes, logLevel, Partitioners } = require('kafkajs')\n// const cors = require('cors')\n// const dotenv = require('dotenv')\nconst http = require('http');\nconst server = http.createServer(app);\n\n\n\nconst { Server } = require('socket.io')\n// const io = require('socket.io')\n\nconst io = new Server(server, {\n cors: {\n origin: \"http://localhost:3000\" \n }\n});\n\n\nconsole.log('MongoDB connected...');\n\n\n\n//Connect to MongoDB\nconst uri = \"mongodb+srv://<username/[email protected]/?retryWrites=true&w=majority\";\n\nconst client = new MongoClient(uri);\n\n\nconst simulateAsyncPause = () =>\n new Promise(resolve => {\n setTimeout(() => resolve(), 1000);\n });\n\n\nlet changeStream;\n\n\nasync function run() {\n\n try {\n\n // Select DB and Collection\n await client.connect();\n \n const database = client.db(\"socketIo-MongoDb\");\n\n const chat = database.collection(\"chat\");\n\n const chatfeed= database.collection(\"chatfeed\");\n\n \n chatfeed.find().limit(100).sort({_id:1}).toArray((err, res) => {\n if(err){\n throw err;\n }\n\n \n io.on('connection', socket => {\n\n // Emit the messages\n socket.emit('output', res);\n\n socket.on('disconnect', () => {\n console.log(`disconnect: ${socket.id}`);\n });\n\n })\n \n })\n \n\n // open a Change Stream on the \"chatfeed\" collection\n changeStream = chatfeed.watch();\n\n // set up a listener when change events are emitted\n changeStream.on(\"change\", async next => {\n // process any change next\n console.log(\"received a change to the collection: \\t\", next);\n\n // process any change next\n switch (next.operationType) {\n case 'insert':\n\n\n await simulateAsyncPause();\n \n io.emit('output', next.fullDocument.message);\n\n await simulateAsyncPause();\n\n\n console.log( 'INSERT',next.fullDocument.message);\n\n\n break;\n\n\n case 'update':\n\n \n io.emit('output', next.updateDescription.updatedFields.message);\n\n \n\n console.log('UPDATE', next.updateDescription.updatedFields.message);\n }\n });\n\n\n await simulateAsyncPause();\n\n await chat.insertOne({\n name: \"John Johnson\",\n message: \"No bytes, no problem. Just insert a document, in MongoDB\",\n });\n\n await simulateAsyncPause();\n\n \n\n io.on('connection', socket => {\n // Handle input events\n socket.on('input', async data => {\n\n \n\n let name = data.name;\n let message = data.message;\n\n \n\n await chat.insertOne({name: name, message: message}, () => {\n \n socket.emit('output', [data])\n\n console.log('OUTPUT', [data])\n \n })\n\n \n\n });\n\n socket.on('disconnect', () => {\n console.log(`disconnect: ${socket.id}`);\n });\n\n\n \n })\n \n await changeStream.close();\n \n console.log(\"closed the change stream\");\n\n \n \n\n\n \n\n\n\n } finally {\n\n // Ensures that the client will close when you finish/error\n await client.close();\n\n }\n}\nrun().catch(console.dir);\n\nconst PORT = 4000;\n\n\nserver.listen(PORT, console.log('IO RUNNING on PORT 4000')) ```\n\nThank you for your help", "text": "Thank you Robert_Walters,its now working, now encountering problems with inserting data into mongodbMy watch of the stream is not working, returning nothing, no error displayed and can’t insert data into the chat collection either but can get data from the chatfeed collection.", "username": "Onesmus_Nyakotyo" }, { "code": "", "text": "it might be something with how you are leveraging Change Stream, check out this article Change Streams & Triggers with Node.js Tutorial | MongoDB", "username": "Robert_Walters" }, { "code": "", "text": "Hi Ribert_Walters, please can you point me to any docs or resources on multiple kafka source/sink topics - can’t find any from the community or mongodb docs or stackoverflow.Thank you", "username": "Onesmus_Nyakotyo" }, { "code": "", "text": "What do you mean by multiple source/sink topics?", "username": "Robert_Walters" }, { "code": "", "text": "I mean how can I add more than one topic in the kafka sink connector?Is it good design to have more than one sink from the one source?", "username": "Onesmus_Nyakotyo" }, { "code": "", "text": "Yes see the Dynamic namespace and topic mapping described here\nMongoDB Connector for Apache Kafka 1.4 Available Now | MongoDB BlogYou can scale the sink by increasing the workers read the tuning the sink connector portion of this blogCode, content, tutorials, programs and community to enable developers of all skill levels on the MongoDB Data Platform. Join or follow us here to learn more!", "username": "Robert_Walters" }, { "code": " \"config\": {\n \"connector.class\":\"com.mongodb.kafka.connect.MongoSinkConnector\",\n \"tasks.max\":\"1\",\n \"topics\": \"ChatData.socketIo-MongoDb.chat\",\n \"connection.uri\":\"mongodb+srv://username/[email protected]/?retryWrites=true\",\n \"database\":\"socketIo-MongoDb\",\n \"collection\":\"chatfeed\",\n \"key.converter\":\"org.apache.kafka.connect.json.JsonConverter\",\n \"key.converter.schemas.enable\":false,\n \"value.converter\":\"org.apache.kafka.connect.json.JsonConverter\",\n \"value.converter.schemas.enable\":false,\n \"publish.full.document.only\": true,\n \"namespace.mapper\":\"com.mongodb.kafka.connect.sink.namespace.mapping.FieldPathNamespaceMapper\",\n \"namespace.mapper.value.collection.field\":{\"chat\": \"ChatData.socketIo-MongoDb.chat\", \"userProfile\": \"ChatData.socketIo-MongoDb.userProfile\"}\n } \n}```\n\nThank you", "text": "Thank you Robert_Walters, have tried mapping the topics but not working well for me, maybe i have configured the sink incorrectly → ", "username": "Onesmus_Nyakotyo" } ]
BulkWriteError - MongoBulkWriteException
2022-07-27T18:20:14.930Z
BulkWriteError - MongoBulkWriteException
5,670
null
[ "connecting", "atlas" ]
[ { "code": "Error: queryTxt ETIMEOUT test-cluster-wt6zx.mongodb.net\n at QueryReqWrap.onresolve [as oncomplete] (dns.js:202:19) {\n errno: 'ETIMEOUT',\n code: 'ETIMEOUT',\n syscall: 'queryTxt',\n hostname: 'test-cluster-wt6zx.mongodb.net'\n}\n", "text": "I’m facing this connection Error while conncecting with Atlas. It worked fine till yesterday.\nI’ve double checked my username and password.\nI’ve whitelisted all the IP’s.I’m stuck in the middle of a project and cannot connect to the DB itself.", "username": "Arpan_Adarsh" }, { "code": "", "text": "It is timing out at name resolution time. Try using different DNS servers. You may try with google’s 8.8.8.8 and 8.8.4.4.", "username": "steevej" }, { "code": "", "text": "A post was split to a new topic: ‘ETIMEOUT’ error connecting to Atlas", "username": "Stennie_X" }, { "code": "", "text": "", "username": "Stennie_X" } ]
Error: queryTxt ETIMEOUT :: Error while connecting with Atlas
2020-06-24T11:00:36.194Z
Error: queryTxt ETIMEOUT :: Error while connecting with Atlas
15,444
null
[ "app-services-user-auth" ]
[ { "code": "", "text": "Hi all,i enabled Authentication via Google in MongoDB Realm. And the authentication works well. But the App User in Realm is missing the email. I dont receive the email as Provider Data. Any idea whats wrong?", "username": "Julien_Rensch-Furste" }, { "code": "", "text": "Hi @Julien_Rensch-Furste , I faced the same issue and can not find any solution for it. Do you find the way to fix this issue?", "username": "Tai_Nguyen1" }, { "code": "", "text": "did anyone find out why?", "username": "Sohibe_Alhabroosh" }, { "code": "", "text": "I’m running into this issue as well. My guess is it has something to do with using OpenID. The JWT returned from Google contains all the metadata (so that can be decoded on the frontend with a library) but when passing that JWT to Realm App Services then everything except the name property gets stripped. This preventing me from associating custom data, like email and picture, with user in Atlas.Help!", "username": "J_W" } ]
Email with Google Auth missing
2021-04-23T21:00:03.726Z
Email with Google Auth missing
3,900
null
[ "aggregation" ]
[ { "code": "", "text": "How can I search for similar text value in both parent collection and its child relation collection?I have two collection like Product and Category. I want to search for product name and its relative category name. How can I do it with aggregation?", "username": "Sai_Main" }, { "code": "</>", "text": "Hi @Sai_Main and welcome to the MongoDB Community forums. Can you provide some sample documents that has the data you’re working with? Please keep them as small as possible and make sure to use the Preformatted text block (click on the </> icon in the tool bar) when pasting the documents into your post (if pasted into regular text, the quotes get converted to fancy quotes and then have to be converted back to regular quotes by the person trying to help you).", "username": "Doug_Duncan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to search similar in both parent collection and its child relation collection
2022-08-14T08:56:57.130Z
How to search similar in both parent collection and its child relation collection
888
null
[ "swift" ]
[ { "code": "", "text": "Hi,My Realm model is also codable, and theses properties can be null, but when I get the JSON from the serve and decode it. It’s not working because he doesn’t know map the optional valueI need to implement the decoder method to solve this. I don’t want to implement decoder to all my realm modelThere is an optional persisted propertyWrapper @OptionalPersisted something like this?", "username": "Mickael_Belhassen" }, { "code": "", "text": "and theses properties can be nullWhich properties? Can you include the Realm Model you’re referring to and perhaps a brief segment of the JSON you’re working with? Also, a snippet of the code you’ve attempted would be very helpful in clarifying the question", "username": "Jay" }, { "code": "@objcMembers\nclass Supplier: Object, Coddle {\n @Persisted(primaryKey: true) var id: UUID = UUID()\n @Persisted var name: String = \"\"\n @Persisted var phoneNB: String?\n @Persisted var address: String?\n @Persisted var email: String?\n @Persisted var website: String?\n @Persisted var createdAt: Date = Date()\n\n convenience init(name: String, phoneNB: String?, address: String?, email: String?, website: String?) {\n self.init()\n self.name = name\n self.phoneNB = phoneNB\n self.address = address\n self.email = email\n self.website = website\n }\n}\n{\n \"message\":\"OK\",\n \"data\":[\n {\n \"phoneNB\":null,\n \"faxNB\":null,\n \"address\":null,\n \"id\":\"477076BB-3114-4750-8E4F-042EF65B31E0\",\n \"website\":null,\n \"email\":null,\n \"createdAt\":\"2022-08-12T00:00:00Z\",\n \"name\":\"sup\",\n \"user\":{\n \"id\":\"661FC780-B01B-42E4-875A-C60A0B30EE34\"\n }\n }\n ],\n \"code\":200\n}\nDecoded JSON error: valueNotFound(Swift.Optional<Swift.String>, Swift.DecodingError.Context(codingPath: [CodingKeys(stringValue: \"data\", intValue: nil), _JSONKey(stringValue: \"Index 0\", intValue: 0), CodingKeys(stringValue: \"phoneNB\", intValue: nil)], debugDescription: \"Expected Optional<String> but found null value instead.\", underlyingError: nil))?", "text": "My model:The JSON that I’m trying to parseThe error that I get\nDecoded JSON error: valueNotFound(Swift.Optional<Swift.String>, Swift.DecodingError.Context(codingPath: [CodingKeys(stringValue: \"data\", intValue: nil), _JSONKey(stringValue: \"Index 0\", intValue: 0), CodingKeys(stringValue: \"phoneNB\", intValue: nil)], debugDescription: \"Expected Optional<String> but found null value instead.\", underlyingError: nil))It’s fail on phoneNB field. In my Realm model I marked it optional ?", "username": "Mickael_Belhassen" }, { "code": " convenience required init(from decoder: Decoder) throws {\n self.init()\n let container = try decoder.container(keyedBy: CodingKeys.self)\n id = try container.decode(UUID.self, forKey: .id)\n name = try container.decode(String.self, forKey: .name)\n phoneNB = try container.decodeIfPresent(String.self, forKey: .phoneNB)\n address = try container.decodeIfPresent(String.self, forKey: .address)\n email = try container.decodeIfPresent(String.self, forKey: .email)\n website = try container.decodeIfPresent(String.self, forKey: .website)\n createdAt = try container.decode(Date.self, forKey: .createdAt)\n }\n", "text": "To fix the error I need to implement manually the decoder methodI don’t want to add the method manually, I want to let the compier to synthesize it", "username": "Mickael_Belhassen" }, { "code": "Supplierdatalet supplierJSONString = \n\n {\n \"phoneNB\":null,\n \"id\":\"477076BB-3114-4750-8E4F-042EF65B31E0\",\n \"name\":\"sup\"\n }\nclass Supplier: Object, Codable {\n @Persisted(primaryKey: true) var id: UUID = UUID()\n @Persisted var name: String = \"\"\n @Persisted var phoneNB: String?\n}\n let jsonData = self.supplierJSONString.data(using: .utf8)!\n let response = try! JSONDecoder().decode(Supplier.self, from: jsonData)\n \n print(response)\nSupplier {\n\tid = 477076BB-3114-4750-8E4F-042EF65B31E0;\n\tname = sup;\n\tphoneNB = (null);\n}\n", "text": "Forgive the question but at first glance, the Realm object model Supplier doesn’t appear to match the presented JSON - the JSON data is a tier lower within a data child node. Perhaps there’s a third party library installed or more to it?But the question is about the optional value so I simplified your json data just for testing with optionals to this:and then created a Realm Model to map it toand then some quick code to encode the JSON string and then decode it into the Realm objectand printed to consoleSo it looks like the optional is being set to NULL as it should.Perhaps a bit more clarity in the question and model would help us narrow the issue?", "username": "Jay" } ]
There is an Optional Persisted propertyWrapper?
2022-08-12T08:08:51.014Z
There is an Optional Persisted propertyWrapper?
2,153
null
[ "atlas-cluster" ]
[ { "code": "com.mongodb.MongoSecurityException: Exception authenticating MongoCredential{mechanism=MONGODB-AWS, userName='BLABLABLA', source='$external', password=<hidden>, mechanismProperties=<hidden>}", "text": "Hi,I’ve successfully connected my application to MongoDB Atlas cluster from AWS-EKS using method described here: https://www.mongodb.com/docs/atlas/security/passwordless-authentication/#aws-eksAfter some time the application fails with com.mongodb.MongoSecurityException: Exception authenticating MongoCredential{mechanism=MONGODB-AWS, userName='BLABLABLA', source='$external', password=<hidden>, mechanismProperties=<hidden>} when doing operations against Mongo.Probably because it’s only a temporary client session. The IAM role has a maximum session duration of 1 hour. It seems it’s a bit random how long it takes before this MongoSecurityException kicks in.Anyone has any experience with this and a recommended approach to solve this…? Haven’t found anything regarding this yet.", "username": "Kristoffer_Almas" }, { "code": "", "text": "Don’t quite unterstand how this works…Just testet now with the k8s pod which has been running ~2 days without interruption (and no activity towards Mongo). I would expect it to get an MongoSecurityException when tries to do a operation towards Mongo. But everything worked perfectly.Found this post which is related: Re-establish AWS Assumerole connection - but still unsure about a solution here.", "username": "Kristoffer_Almas" } ]
MongoSecurityException - AWS-EKS - How to keep session alive
2022-08-11T17:44:14.456Z
MongoSecurityException - AWS-EKS - How to keep session alive
1,638
null
[ "graphql", "realm-web" ]
[ { "code": "{\n \"query\": {\n \"_id\": \"611e4c5a290dbc5e94ec9bad\"\n }\n}\n\nquery GetProductQuery($query: ProductQueryInput) {\n product(query: $query) {\n __typename\n _id\n associatedProducts {\n __typename\n image\n title\n url\n }\n description\n externalId\n gender\n image\n price\n provider {\n __typename\n logo\n name\n }\n status\n tags\n title\n url\n }\n}\n\nreason=\"could not validate document: \\n\\tcreatedAt: Invalid type. Expected: type: undefined, bsonType: date, given: [string mixed]\\n\\texternalId: Invalid type. Expected: type: undefined, bsonType: string, given: [integer int long number mixed]\"; code=\"SchemaValidationFailedRead\"; untrusted=\"read not permitted\"; details=map[]\n{\n \"_id\": {\n \"$oid\": \"611e4c5a290dbc5e94ec9bad\"\n },\n \"title\": \"Bar & Cocoa Chocolate Gift\",\n \"createdAt\": {\n \"$date\": {\n \"$numberLong\": \"1629375480456\"\n }\n },\n \"updatedAt\": {\n \"$date\": {\n \"$numberLong\": \"1654656290084\"\n }\n },\n \"status\": \"live\",\n \"externalId\": \"878138011\",\n \"metaData\": {},\n \"provider\": {\n \"$oid\": \"610454bf3abf792aeb29b38f\"\n },\n}\n", "text": "I have a realm graphql query that is returning an error in the React client I have, but when I try it out in the App Services admin UI, it works fine.and the error is:but the document looks like this(I’ve removed some of the irrelevant fields):So what the heck is up with that error that seems to be a lie?", "username": "Lukas_deConantseszn1" }, { "code": "", "text": "I figured it out. It was a problem with the joined/lookedup document “provider”.", "username": "Lukas_deConantseszn1" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Realm GraphQL Query returning error but for no apparent reason
2022-08-13T15:46:58.793Z
Realm GraphQL Query returning error but for no apparent reason
2,787
null
[ "aggregation", "java" ]
[ { "code": "mongodb-driver-sync:4.4.1BasicDBObjectDocumentBson groupStage = group(id(\"$item\", \"$producer\"), sum(\"totalQuantity\", \"$quantity\"), avg(\"averageQuantity\", \"$quantity\"));\n", "text": "I am using mongodb-driver-sync:4.4.1 driver. I want to do a grouping with multiple fields and I want to it to be done using only the new Aggregates Builders. The other answers on this site use BasicDBObject or Document but I don’t want that.Something like the code below:", "username": "khat33b" }, { "code": "", "text": "Didd you find an answer here?", "username": "Rahma_Javed" }, { "code": "/*\n * Requires the MongoDB Java Driver.\n * https://mongodb.github.io/mongo-java-driver\n */\n\nMongoClient mongoClient = new MongoClient(\n new MongoClientURI(\n \"mongodb://127.0.0.1:27017/?readPreference=primary&appname=MongoDB+Compass&directConnection=true&ssl=false\"\n )\n);\nMongoDatabase database = mongoClient.getDatabase(\"db\");\nMongoCollection<Document> collection = database.getCollection(\"coll\");\n\nFindIterable<Document> result = collection.aggregate(Arrays.asList(new Document(\"$group\", \n new Document(\"_id\", \n new Document(\"item\", \"$item\")\n .append(\"producer\", \"$producer\"))\n .append(\"totalQuantity\", \n new Document(\"$sum\", \"$quantity\"))\n .append(\"averageQuantity\", \n new Document(\"$avg\", \"$quantity\")))));\n", "text": "Hi @khat33b ,Are you reffering to something like below:You can use compass aggregation builder to generate the code of your favorite language.Thanks,\nDarshan", "username": "DarshanJayarama" } ]
How to groupby multiple fields using the Java MongoDB driver?
2022-01-27T12:25:34.047Z
How to groupby multiple fields using the Java MongoDB driver?
5,114
null
[ "aggregation", "java" ]
[ { "code": "mongodb-driver-sync:4.4.1BasicDBObjectDocumentBson groupStage = group(id(\"$item\", \"$producer\"), sum(\"totalQuantity\", \"$quantity\"), avg(\"averageQuantity\", \"$quantity\"));\n", "text": "I am using mongodb-driver-sync:4.4.1 driver. I want to do a grouping with multiple fields and I want to it to be done using only the new Aggregates Builders. The other answers on this site use BasicDBObject or Document but I don’t want that.Something like the code below:", "username": "khat33b" }, { "code": "", "text": "Didd you find an answer here?", "username": "Rahma_Javed" }, { "code": "/*\n * Requires the MongoDB Java Driver.\n * https://mongodb.github.io/mongo-java-driver\n */\n\nMongoClient mongoClient = new MongoClient(\n new MongoClientURI(\n \"mongodb://127.0.0.1:27017/?readPreference=primary&appname=MongoDB+Compass&directConnection=true&ssl=false\"\n )\n);\nMongoDatabase database = mongoClient.getDatabase(\"db\");\nMongoCollection<Document> collection = database.getCollection(\"coll\");\n\nFindIterable<Document> result = collection.aggregate(Arrays.asList(new Document(\"$group\", \n new Document(\"_id\", \n new Document(\"item\", \"$item\")\n .append(\"producer\", \"$producer\"))\n .append(\"totalQuantity\", \n new Document(\"$sum\", \"$quantity\"))\n .append(\"averageQuantity\", \n new Document(\"$avg\", \"$quantity\")))));\n", "text": "Hi @khat33b ,Are you reffering to something like below:You can use compass aggregation builder to generate the code of your favorite language.Thanks,\nDarshan", "username": "DarshanJayarama" } ]
How to groupby multiple fields using the Java MongoDB driver?
2022-01-27T12:48:10.042Z
How to groupby multiple fields using the Java MongoDB driver?
2,185
null
[ "connecting", "security" ]
[ { "code": "mongodb+srv://<username>:<password>@<my-cluster>-pri.rthhs.mongodb.net/<dbname>?retryWrites=true&w=majority\n", "text": "Hey!I’ve set up the peering connection between MongoDB Atlas and Googles “default” VPC and the connection is labeled as “active” on both ends.The ip range of the vpc is whitelisted in MongoDB Atlas.But my node hosted in google-app-engine still gets timed out when accessing the MongoDB.I use the connection url of mongodb atlas for peered connection in the form of (notice the “-pri”):Which part am i missing to establich the connection? I’ve even set up a google vpc connector but the problem persists.As soon as i delete my “allow all ip adresses” whitelist and only have the one for my peering CIDR the backend can’t connect to mongo atlas anymore.Thanks for any help!", "username": "Maximilian_Korner" }, { "code": "", "text": "The following helped me : explored “VPC network peering” -> , clicked on “Exported routes” and chose a “destination ip addresses” for the region, where both my appengine and mongodb atlas reside.\nIf this does not hep you - try to review “View Database Access History” in your MongoDB Atlas project and cluster. It should give you an idea what IP addresses accesses MongoDB, while whitelist for 0.0.0.0/0 is enacted.", "username": "Oleksandr_Iegorov" }, { "code": "", "text": "I’m struggling with this as well. And I’ve managed to lock app engine out entirely, regardless of whether or not I’ve whitelisted 0.0.0.0/0. Similar to yourself, both sides according to their respective IDEs are active. I don’t suppose you figured out what the issue was since then?", "username": "Stephen_Reilly" }, { "code": "", "text": ".It turns out that this isn’t available with Google’s app engine hosted in the standard environment. It only works with flex. It does work now though ", "username": "Stephen_Reilly" }, { "code": "vpc_access_connector:\n name: projects/PROJECT_ID/locations/us-central1/connectors/mongo-connector\n", "text": "For anyone who stumbles across this post trying to find the answer, here it is:For the App Engine Standard environment, follow this guide: cloud.google .com/community/tutorials/serverless-vpc-access-private-mongodb-atlas\nThen, in your app.yaml, specify the vpc access connector you created (the region of the connector must match the region of your App Engine, either us-central1 or europe-west1)cloud.google .com/vpc/docs/configure-serverless-vpc-access#all-other-runtimesFor the App Engine Flexible environment:\nYou still need the vpc network peering, but you don’t need serverless vpc access.\nFollow this guide: cloud.google .com/community/tutorials/serverless-vpc-access-private-mongodb-atlas\nONLY the “Configure MongoDB Atlas VPC peering”, \" Configure VPC Network Peering on Google Cloud\", and “Retrieve the connection string for your cluster” sections.\nThen, on the Network Access page of MongoDB, add the ip range “10.0.0.0/8” - this is the entire private ip range and includes connections from App Engine\nYou don’t need to change anything in app.yaml as long as you are using the default google cloud vpc network.", "username": "terren" }, { "code": "vpc_access_connector:\n name: projects/PROJECT_ID/locations/us-central1/connectors/mongo-connector\n", "text": "For anyone who stumbles across this post trying to find the answer, here it is:For the App Engine Standard environmentFollow this guide: Configure private access to MongoDB Atlas with Serverless VPC Access  |  Google Cloud Platform CommunityThen, in your app.yaml, specify the vpc access connector you created (the region of the connector must match the region of your App Engine, either us-central1 or europe-west1)Source: Configure Serverless VPC Access  |  Google CloudFor the App Engine Flexible environmentYou still need the vpc network peering, but you don’t need serverless vpc access.Follow this guide: Configure private access to MongoDB Atlas with Serverless VPC Access  |  Google Cloud Platform Community\nBut ONLY the “Configure MongoDB Atlas VPC peering”, \" Configure VPC Network Peering on Google Cloud\", and “Retrieve the connection string for your cluster” sections.Then, on the Network Access page of MongoDB, add the ip range “10.0.0.0/8” - this is the entire private ip range and includes connections from App Engine.You don’t need to change anything in app.yaml as long as you are using the default google cloud vpc network.", "username": "terren" }, { "code": "", "text": "Something that’s not clear in the guide is what projects are being referred to in the ’ Configure MongoDB Atlas VPC peering’ and the ’ Configure VPC Network Peering on Google Cloud’ sections.In the Configure VPC Network Peering on Google Cloud section it says:But I used the same project at the end of the previous step. If I’m not supposed to use the same project, what project am I supposed to use and where in the guide was this created?", "username": "Slope_Health" }, { "code": "", "text": "When you enable VPC peering in the mongoDB Atlas console, you are provided with a GCP project ID and VPC name that are generated by mongoDB. You can find them under the Peering tab of the Network Access section of the mongoDB Atlas console. Please let me know if this helps!", "username": "terren" }, { "code": "", "text": "Thanks for your quick reply @terren. That’s exactly the help i needed!", "username": "Slope_Health" } ]
Can't establish peering connection to Google App Engine
2020-12-14T17:56:04.008Z
Can&rsquo;t establish peering connection to Google App Engine
6,770
null
[ "kafka-connector" ]
[ { "code": "com.mongodb.kafka.connect.MongoSinkConnectorrecord MyObjectType {\n string id;\n string userId;\n}\nuserId_id{\n \"id\" : \"60a50547e578c87ac72f1042\",\n \"userId\" : \"60a50547e578c87ac72f1041\"\n}\n{\n \"_id\" : ObjectId(\"60a50547e578c87ac72f1041\")\n \"id\" : \"60a50547e578c87ac72f1042\",\n \"userId\" : \"60a50547e578c87ac72f1041\"\n}\n{ _id : {$oid: \"60a50547e578c87ac72f1041\"} }\"value.converter\": \"io.confluent.connect.avro.AvroConverter\",\n\"value.converter.schema.registry.url\":\"http://local-kafka-schema-registry-cp-schema-registry:8081\",\n\"value.converter.schemas.enable\": true,\n\"document.id.strategy\": \"com.mongodb.kafka.connect.sink.processor.id.strategy.ProvidedInKeyStrategy\",\n\"transforms\":\"createKey,RenameField_userId,HoistField_id\",\n\"transforms.createKey.type\":\"org.apache.kafka.connect.transforms.ValueToKey\",\n\"transforms.createKey.fields\":\"userId\",\n\"transforms.RenameField_userId.type\": \"org.apache.kafka.connect.transforms.ReplaceField$Key\",\n\"transforms.RenameField_userId.renames\": \"userId:oid\",\n\"transforms.HoistField_id.type\": \"org.apache.kafka.connect.transforms.HoistField$Key\",\n\"transforms.HoistField_id.field\": \"_id\"\n{\n \"_id\" : {\n \"oid\" : \"60a50547e578c87ac72f1041\"\n },\n \"id\" : \"60a50547e578c87ac72f1042\",\n \"userId\" : \"60a50547e578c87ac72f1041\"\n}\n", "text": "Hi,I’m trying to sink a Kafka topic using com.mongodb.kafka.connect.MongoSinkConnector and the messages follow this avro definition:What I want to do is to make the sink connector treat the userId attribute as the _id (of type ObjectId) in the upserted MongoDB document.i.e. given avro message:I want the below MongoDB documentI have made various attempts, e.g. to use SMTs to alter the incoming avro message to be similar to extended json format { _id : {$oid: \"60a50547e578c87ac72f1041\"} }The closest I’ve got is using the below connector configurationThen I get this document inserted into MongoDBSome notes:Any suggestions to accomplish this would be greatly appreciated!", "username": "Marcus_Wallin" }, { "code": "", "text": "You need to use DocumentIdAddr post processor. Looks like you are on the right track above. Not sure you need all those transforms to make it happen.", "username": "Robert_Walters" }, { "code": "", "text": "I have the some problem, and i cannot find a solution in Post-Processors, you can help me ?", "username": "Bob_La_tolda" } ]
How to convert a String field to ObjectId in MongoSinkConnector
2021-05-24T09:45:58.254Z
How to convert a String field to ObjectId in MongoSinkConnector
7,622
null
[ "queries", "dot-net", "compass" ]
[ { "code": "var sendDateFrom = DateTime.ParseExact(DateFrom, \"yyyy-MM-ddTHH:mm:ss.fff\",CultureInfo.InvariantCulture);\nfilterDateFrom = Builders<Account>.Filter.Gte(x => x.SubscriptionDate, sendDateFrom);\nvar sendDateTo = DateTime.ParseExact(DateTo, \"yyyy-MM-ddTHH:mm:ss.fff\", CultureInfo.InvariantCulture);\nfilterDateTo = filterDateFrom & Builders<Account>.Filter.Lte(x => x.SubscriptionDate, DateTo);\n", "text": "Hello,Im using mongoDb 1.32.6 and i had build a c# app in .net 6.0 to retrieve documents from MongoD.I particular i have a simply mask for ui and i receive startDate and endDate in my api.My schema isAccount:\npublic int Id { get; set; }\npublic string Name { get; set; }\npublic string Email { get; set; }\npublic DateTime SubscriptionDate { get; set; }in my db i have an account with:SubscriptionDate : “2022-01-15T00:00:00.000”Searching this item using filter mask from Compass returning me correctly this one.but if i create a filter with StartDate and EndDate passed from ui , i dont have anything returned.my c# code is:items = await AccountCollection.Find(filterDateTo.ToList();I send from ui :DateFrom=2022-01-01T00:00:00.000DateTo=2022-01-31T00:00:00.000But item is never returned.Thaks for support.", "username": "M_P1" }, { "code": "SubscriptionDate : “2022-01-15T00:00:00.000”\ndatetimeDateTimedatetimedatetimemongoshdb.coll.insertOne({SubscriptionDate: ISODate(\"2022-01-15T00:00:00.000Z\"\")})\nDateTimedatetime", "text": "Hi, @M_P1,Welcome to the MongoDB Community Forums. I understand that you’re having trouble filtering by dates with the .NET/C# Driver. You mention that you have an account with:This data indicates that the date is stored as a string and not a BSON datetime type. Since you passed in .NET DateTime structs to your query, these would be rendered in the MQL as BSON datetime types, not strings.If you have the flexibility to change your schema I would recommend storing the dates as BSON datetime. This can be accomplished using an insert such as the following in the mongosh shell:Alternatively you can store the data using your C# application and the object model that you’ve defined. As long as your property is of type DateTime, it will be stored as a BSON datetime in the database.There are more advanced options such as using $dateFromString in an aggregation pipeline to transform and query dates stored as strings.Hopefully this resolves your issue.Sincerely,\nJames", "username": "James_Kovacs" } ]
Filter between to dates
2022-08-11T15:35:08.955Z
Filter between to dates
4,613
null
[ "aggregation", "queries", "atlas-search" ]
[ { "code": "{\n \"_id\": ...,\n \"otherFields\": ...,\n \"positions\": [\n {\n \"x\": 1,\n \"y\": 15\n },\n {\n \"x\": 3,\n \"y\": 15\n },\n {\n \"x\": 3,\n \"y\": 9\n }\n ]\n}\n$matchpositionsx3{\n $match: {\n positions: {\n $elemMatch: {\"x\": 3}\n }\n }\n}\n$searchpositions$search{\n $search: {\n range: {\n path: 'x',\n gte: 3,\n lte: 3\n }\n }\n}\n$search$unwind$match$search", "text": "In native queries it is possible to query a document based on the contents of an array field. Take a document that has a field likeIn native queries you could do a $match like the following to retrieve all documents where an entry in positions has x of 3:Is a similar per-array-element query possible with Atlas $search syntax?If the positions sub-documents were in their own collection I know you could do a $search likebut this query doesn’t work on nested fields of an array (and I’m not here to ask about re-structuring the data). Also, due to the nature of $search I can’t first do an $unwind, and doing a $match after the $search would be a major hit to query speed.", "username": "Lucas_Burns" }, { "code": "$unwindelemMatch", "text": "So, the best option today is to have a new collection where you can $unwind the array that is used only for the search index.In the near future, we will release support for elemMatch style queries. If you would like to be notified of the feature’s availability, please vote on the issue here. Even though it is on the way, the best way to model data for most search use cases is to flatten it. What do the coordinates in position represent? Are they ordered? Could the be broken into sub-documents of the top-level document? These are the sorts of questions I ask myself as well.It appears that Atlas Search does not yet support nested documents in arrays like MongoDB\n\nhttps://docs.mongodb.com/v4.2/reference/operator/query/elemMatch/", "username": "Marcus" }, { "code": "elemMatchposition$search$match", "text": "Hey nice to hear that an elemMatch equivalent is on the way.The position documents here are just a simplified version of some data I need to access but don’t have control over the shape of. As for reshaping the data, I’m aware of many of the best practices for making this play nicer with queries but it’s unfortunately not something I have control over.Sounds like for now my best option is to take the hit when necessary and do a $search and then a $match", "username": "Lucas_Burns" }, { "code": "elemMatch", "text": "I’ll update this ticket once we release the equivalent of elemMatch", "username": "Marcus" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" }, { "code": "", "text": "Here is the feature: https://www.mongodb.com/docs/atlas/atlas-search/embedded-document/", "username": "Elle_Shwer" } ]
$search aggragation $elemMatches equivalent
2022-03-08T19:55:34.399Z
$search aggragation $elemMatches equivalent
3,389
null
[ "aggregation", "queries" ]
[ { "code": "gender: 1, email: 1, location: 1, fullName: {\n\n $concat: [{ $toUpper: { substrCP: [\"$name.first\", 0, 1] } },\n\n { $substrCP: [\"name.first\", 1, { $subtract: [{ $strLenCP: \"$name.first\" }, 1] }] },\n\n \" \", { $toUpper: { substrCP: [\"$name.last\", 0, 1] } },\n\n { $substrCP: [\"name.last\", 1, { $subtract: [{ $strLenCP: \"$name.last\" }, 1] }] }]\n\n}\n", "text": "db.persons.aggregate([{ $project: { _id: 0, name: 1, email: 1, location: { type: “Point”, coordinates: [\"$location.coordinates.longitude\", “$location.coordinates.latitude”] } } },{$project: {}}]).pretty();…\n…\n…\noutput\nplanexecutor error during aggregation :: caused by :: can’t convert from bson type object to string\n.\n.\nplease Give a solution", "username": "Puneeth_Reddy" }, { "code": "$concat: [{ $toUpper: { substrCP: [\"$name.first\", 0, 1] } },$substrCP", "text": "$concat: [{ $toUpper: { substrCP: [\"$name.first\", 0, 1] } },It looks like you’re missing the $ in front of substrCP. This is missing in both the first and last name sections. Try changing that and let us know if you have any other issues.", "username": "Doug_Duncan" }, { "code": "", "text": "thank you so much sir…im a beginner at this so I didn’t know the perfect reason for the error it got solved ", "username": "Puneeth_Reddy" }, { "code": "", "text": "Glad that worked out for you Puneeth. Keep asking questions as you progress on your journey with MongoDB, and soon you’ll be helping those who are new on the path.", "username": "Doug_Duncan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Planexecutor error during aggregation :: caused by :: can't convert from bson type object to string
2022-08-12T12:12:53.123Z
Planexecutor error during aggregation :: caused by :: can&rsquo;t convert from bson type object to string
5,235
null
[ "node-js", "mongoose-odm", "performance" ]
[ { "code": "", "text": "Hi there,We have a nodejs Api running with Express and connecting to MongoDb Atlas via Mongoose.So when the app starts up we create the db connection and we reuse that connection for all requests coming in on express routes.So for some reason if we start getting a lot of requests, meaning the more requests comes in the slower the db response time becomes.This is for all operations, Queries, Inserts and Updated all gets slow after a bunch of requests are completed.Could this be something we need to adjust in MongoDb Atlas, Or code issue?From What I read Pooling connections is the way to go. I think the problem is, all these requests to the db with the same connection, seem to be slowing down the requests.Please can someone help ?", "username": "Heinrich_Coetzee" }, { "code": "", "text": "Hi, were you able to fix this problem?", "username": "Tobenna_Kelvin_Abanofor" } ]
Nodejs, Mongoose reusing connection slows db down
2021-02-03T18:29:41.685Z
Nodejs, Mongoose reusing connection slows db down
3,850
https://www.mongodb.com/…0_2_1024x611.png
[ "queries" ]
[ { "code": "", "text": "Hi,While inserting records with backslash(\"\"), I am getting “Unable to parse JSON: Expecting 4 hex digits, at (2,33) from Robo 3T -1.4 IDE”Is it a limitation? If yes then any workaround for inserting record with a backslash\nMongo_Insert_SQL1608×961 38.5 KB\nThanks & Regards,\nSaravanan A.G", "username": "Saravanan_Alagumalai_Ganesan" }, { "code": "\\\\u{\n \"notesJsonErrorFilePath\": \"abc\\\\uvw\"\n}\nfind()\\t\\n\\r\\\"", "text": "Hi @Saravanan_Alagumalai_Ganesan and welcome to the MongoDB community forums.The problem is not with 3T, but with Javascript and string interpretation. The backslash character (\\) starts an escape sequence. Because of this, the MongoDB engine thinks you’re try to store the character \\u which it is having problems with. What you want to do, if you’re trying to store a backslash in the value is to actually put two backslashes in a row as follows:This will store the correct value, although it might not look like it when you return the data with a find() call:\nimage764×510 48.3 KB\nStoring escape sequences does have it’s advantages as you can store tabs (\\t), new lines (\\n), carriage returns (\\r) and embed quotes (\\\") in a string.Hopefully this helps you out.", "username": "Doug_Duncan" }, { "code": "", "text": "Thanks @Doug_Duncan.If I try to insert the below record\n{\n“notesJsonErrorFilePath” : “abc\\cba”\n}then the value gets stored without backslash as below\n{\n“_id” : ObjectId(“62f63089c3620c7a68ab811d”),\n“notesJsonErrorFilePath” : “abccba”\n}and the global search also works irrespective of backslash. Any suggestionsSearch Query\ndb.getCollection(‘MyCollection’).find({ $text: { $search: ““ab\\ccba”” } })\ndb.getCollection(‘MyCollection’).find({ $text: { $search: ““abc\\cba”” } })\ndb.getCollection(‘MyCollection’).find({ $text: { $search: ““abccba”” } })Result\n/* 1 */\n{\n“_id” : ObjectId(“62f631c1c3620c7a68ab834c”),\n“notesJsonErrorFilePath” : “abccba”\n}Thanks & Regards,\nSaravanan A.G", "username": "Saravanan_Alagumalai_Ganesan" }, { "code": "\\c\\c\\t\\t\\\\\\\\u\\x", "text": "Since \\c isn’t a recognized escape sequence in Javascript, the \\ is dropped and the c is stored as normal. This will not happen with all characters however. For example, \\t will be stored as \\t and you will have an embedded tab in the string. As I stated before, if your value contains a \\ character, you will want to store the value with \\\\.As for the \\u character, this signifies that you’re typing a unicode character and expects a four digit hex value. The \\x character signifies a two digit hex value. Sorry for not calling that out in my earlier post.You can read more on javascript escape sequences if your interested.", "username": "Doug_Duncan" } ]
Inserting a record with backslash is throwing exception from Robo3T
2022-08-11T09:35:54.159Z
Inserting a record with backslash is throwing exception from Robo3T
2,881
null
[ "data-modeling" ]
[ { "code": "", "text": "Hi ,\nI need to create database for my school project application, but data I need to store in DB are referring to each other so I would really appreciate some help with modeling this DB.DB Connections:Thank you for your help.", "username": "Chickenbiscuito_Dev" }, { "code": "", "text": "Here are some resourcesA summary of all the patterns we've looked at in this seriesGet a summary of the six MongoDB Schema Design Anti-Patterns. Plus, learn how MongoDB Atlas can help you spot the anti-patterns in your databases.AndDiscover our MongoDB Database Management courses and begin improving your CV with MongoDB certificates. Start training with MongoDB University for free today.", "username": "steevej" } ]
What is the best way to model this database?
2022-08-11T11:29:59.235Z
What is the best way to model this database?
1,266
null
[ "serverless" ]
[ { "code": "", "text": "Hi, i have similar trouble like this:When the serverless database had more than 5 minutes idled, the connection lost and reconnect, and take’s 1 minute delay to start again.At free cluster I didn’t have this situation.Thank you for your help", "username": "ZoomPixels_SAS" }, { "code": "", "text": "This is definitely not expected… in fact one of the key architectural advantages of how Atlas serverless instances are built is to not have to deal with cold start issues. Would you mind opening a support case?", "username": "Andrew_Davidson" } ]
Timeout at Serverless cluster
2022-08-10T00:33:43.026Z
Timeout at Serverless cluster
2,824
null
[ "queries", "indexes" ]
[ { "code": "\ndb.collection.find({\"$and\": [\n {\n \"Obj.status\": {\n \"$ne\": \"REJECTED\"\n }\n },\n {\n \"show\": {\n \"$ne\": false\n }\n },\n {\n \"priority\": {\n \"$ne\": true\n }\n },\n {\n \"$or\": [\n {\n \"pc\": {\n \"$exists\": false\n }\n },\n {\n \"pc\": {\n \"$in\": [\n \"VALUE 1\",\n \"VALUE 2\",\n \"VALUE 3\"\n ]\n }\n }\n ]\n },\n {\n \"$or\": [\n {\n \"pcv\": {\n \"$exists\": false\n }\n },\n {\n \"pcv\": \"VALUE 4\"\n },\n {\n \"pcv\": \"VALUE 5\",\n \"pcvs\": {\n \"$ne\": \"VALUE 6\"\n }\n },\n {\n \"pcv\": \"VALUE 7\"\n }\n ]\n },\n {\n \"$or\": [\n {\n \"uo\": {\n \"$exists\": false\n }\n },\n {\n \"uo\": {\n \"$elemMatch\": {\n \"status\": \"COMPLETED\"\n }\n }\n },\n {\n \"isLS\": true,\n \"isLSO\": true,\n \"isDeleted\": {\n \"$ne\": true\n }\n }\n ]\n },\n {\n \"category\": {\n \"$exists\": false\n }\n },\n {\n \"type\": {\n \"$in\": [\n \"V\",\n \"I\"\n ]\n }\n }\n]\n})\n", "text": "Hello Community,I have one query which is the combination of $and and $or.e.g.Above query is just a sample, actually I have 7 $or sections inside the $and.I saw that there is a document available for $or query, and it’s recommended to have separate index on each of the columns used in $or.\nRef: https://www.mongodb.com/docs/manual/reference/operator/query/or/#or-clauses-and-indexesI have few questions here:Any help/tip to improve and index creation would be highly appreciated.Thank you,\nFaiz", "username": "Faiz_ul_haque" }, { "code": "", "text": "Any help or suggestion?", "username": "Faiz_ul_haque" }, { "code": "{$or: [ \n { something }\n { something-else }\n]]\n$or$and$or", "text": "Are any of these filters selective? If not then I’m not sure you’ll be able to create a good index.The docs are referring to top level $or clause where if your query isthen having separate indexes on something and something-else can both be used since $or is a union of those two results sets. What you have is a complex $and with some of the branches being $or groups. The best thing you can do is figure out which of the top level conditions are most selective and index those. Unfortunately they all seem to be range queries - if some of them are point queries then you can more easily combine them into a compound index.Asya", "username": "Asya_Kamsky" }, { "code": "", "text": "Thank you @Asya_KamskyCan you please help me understand what do you mean by “selective filters”?All of the $or groups always present in the query. Some of them have $in query where value can be changed. But attribute will be there.", "username": "Faiz_ul_haque" }, { "code": "", "text": "When you have a large number of documents a selective filter would narrow them down to very few, a less selective filter might match a majority of them.", "username": "Asya_Kamsky" }, { "code": "", "text": "Got your point. Thanks @Asya_KamskyUnfortunately, I don’t have such attrs/filter in the that query.", "username": "Faiz_ul_haque" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How indexing works for the combination of $and and $or
2022-08-08T18:53:45.639Z
How indexing works for the combination of $and and $or
2,869
null
[ "time-series" ]
[ { "code": "unique", "text": "Sensors check-in periodically, but network connectivity issues may cause them to check-in with the same data more than once.MongoDB does not allow the unique property on secondary indexes for timeseries collections (MongoDB 5.0). Timeseries LimitationsIn addition, calculations need to be done on the data (preferably using aggregations) that involve counting the number of entries, which will be inaccurate if there are duplicates. Not to mention it bloats the database and is just messy.Is there any way to prevent duplicate entries in a MongoDB Timeseries collection?", "username": "Jordan_Baczuk" }, { "code": "", "text": "I’m wondering the same thing, has anyone found a solution?", "username": "Darren_Haba" }, { "code": "// Usual inserted document that produces duplicates\ninsert = { \"sensor\":1 ,\n \"timestamp\":2 ,\n \"mesure\":\"pressure\" ,\n \"value\":10 }\n\n// Modified into a query that would define you uniqueness\nquery = { \"sensor\" : 1 , \"timestamp\" : 2 , \"mesure\" : \"pressure\" }\n\n// With the value\nvalue = { \"value\":10 }\n\n// Also replace the following insert that produces a new duplicate\n// every time it is called\nc.insert( insert ) ;\n\n// into an update with upsert:true that will insert only once\n// no matter how often it is called.\nc.update( query , value , { upsert:true } )\n", "text": "Can’t you use an update with upsert:true?Something along the way:I am not yet familiar with the new time series collections. I do not know if it is applicable or not. But I feel it could.", "username": "steevej" }, { "code": "", "text": "I’m having the same issue here. Any solutions other than upsert?", "username": "Gorkem_Erdogan" }, { "code": "", "text": "Any solutions other than upsert?What is wrong with upsert?", "username": "steevej" }, { "code": "", "text": "Hello man,Well I dont think time series collections allow us to use update operations. It is append only.", "username": "Gorkem_Erdogan" }, { "code": "", "text": "Insert only would make sense in order to improve performance of the main use‑case for time series.But it would prevent some others where you would want to update your time series entries with some post inserting data.This simply confirms thatI am not yet familiar with the new time series collections.and that I must start learning more.", "username": "steevej" }, { "code": "some_timeseriesdb.some_timeseries.aggregate([\n // ...\n {\n $group: {\n _id: {\n username: '$metadata.username',\n media_id: '$metadata.media_id',\n timestamp: {\n $dateTrunc: {\n date: '$timestamp',\n unit: 'day',\n startOfWeek: 'monday',\n binSize: 1,\n },\n },\n },\n // ...\n },\n },\n ///\n])\n", "text": "While you cannot prevent entries with duplicated data. You can query/aggregate the data in such a way that replicated data is ignored.Below is some aggregation pipeline on the some_timeseries Time Series collection", "username": "sergeimeza" }, { "code": "", "text": "On the same $group stage you could pick up the max, min, avg etc for a given entry.", "username": "sergeimeza" }, { "code": "", "text": "Well I dont think time series collections allow us to use update operations.You are absolutely right. I should have followed the link provided in the first post (https://docs.mongodb.com/manual/core/timeseries/timeseries-limitations/)\nand I would have read:Time series collections only support insert operations and read queries. Updates and manual delete operations result in an error.", "username": "steevej" }, { "code": "", "text": "It appears that the limitations imposed on time-series collections are temporary because in the upcoming version MongoDB 5.1, we can perform some delete and update operations. I’m guessing time-series might support upsert and the unique property on secondary indexes in future releases.I came up with my own method to ensure data is never retrieved from an API and accidentally inserted twice, which was my original problem.I’m still learning the aggregation pipeline, but @sergeimeza way of creating a query that ignores duplicates is probably the best solution for now.Thanks for all the help.", "username": "Darren_Haba" }, { "code": "$group", "text": "Hi there,I was hoping to see the support if the Unique indexes in time series but apparently it is not there yet: https://www.mongodb.com/docs/manual/core/timeseries/timeseries-limitations/#std-label-timeseries-limitations-secondary-indexesThe $group solution looks like a workaround, it doesn’t really avoid duplicates and adds constraints to the clients who acess the collection.Anyone has any other suggestion?", "username": "Hector_Valverde" } ]
How to prevent duplicates in mongodb timeseries collection
2021-10-13T21:56:45.598Z
How to prevent duplicates in mongodb timeseries collection
17,001
null
[ "queries", "java", "spring-data-odm" ]
[ { "code": "public class SessionGroupEntity {\n private ObjectId id;\n private String tenantId;\n private List<SessionEntity> sessions;\n}\n\npublic class SessionEntity {\n String sessionId;\n private Instant creationTime;\n private SessionStatus status;\n private List<BookmarkEntity> bookmarks;\n}\n\npublic class BookmarkEntity {\n private String bookmarkId;\n private Instant timestamp;\n private String title;\n private String description;\n}\n public boolean addBookmark(String tenantId, String sessionId, String sessionGroupId, BookmarkEntity entity) {\n\n Update update = new Update()\n .push(\"sessions.$[session].bookmarks\", entity)\n .filterArray(Criteria.where(\"sessions.sessionId\").is(sessionId));\n\n Query query = queryByTenantIdSessionIdAndSessionGroupId(tenantId, sessionId, sessionGroupId);\n UpdateResult status = mongoTemplate.updateFirst(query, update, SessionGroupEntity.class);\n return status.getModifiedCount() > 0;\n }\n", "text": "Hi,I have a MongoDB collection containing entities of type SessionGroupEntity.Each sessionGroup contains Sessions,\nand each session can contain Bookmarks.\nNote that sessionGroup ids are unique across the entire collection.\nSessionIds are unique across all sessionGroups.Following is a pseudo model:Initially we populate the collection with a SessionGroup with one or more Sessions in it,\nyet without any Bookmarks in any of the sessions.Later we would like to add bookmarks into a specific session within a specific sessionGroup.We are using Spring data MongoTemplate, and MongoDB 4.0.2\nFollowing is the code I run:Result:The update fails with an exception:com.mongodb.MongoWriteException: No array filter found for identifier ‘session’ in path ‘sessions.$[session].bookmarks’\"}Can somebody point out what I am doing wrong?Thanks in advance!", "username": "Tsiyona_Dershowitz" }, { "code": " private Query queryByTenantIdSessionIdAndSessionGroupId(String tenantId, String sessionId, String sessionGroupId) {\n return Query.query(\n Criteria.where(\"_id\").is(new ObjectId(sessionGroupId))\n .and(\"sessions.sessionId\").is(sessionId)\n .and(\"tenantId\").is(tenantId));\n }\n", "text": "I forgot to add the query code:", "username": "Tsiyona_Dershowitz" }, { "code": "Update update = new Update()\n .push(\"sessions.$[session].bookmarks\", entity)\n .filterArray(Criteria.where(\"session.sessionId\").is(sessionId));```", "text": "The issue is resolved. There was a typo. The “$push” used “$[session]” while the arrayFilter used “sessions”.\nA fix:", "username": "Tsiyona_Dershowitz" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Inserting an element into an Array within an Array in a MongoDB Collection
2022-08-11T10:02:43.349Z
Inserting an element into an Array within an Array in a MongoDB Collection
4,218