image_url
stringlengths
113
131
tags
sequence
discussion
list
title
stringlengths
8
254
created_at
stringlengths
24
24
fancy_title
stringlengths
8
396
views
int64
73
422k
null
[ "java", "connecting", "atlas" ]
[ { "code": "", "text": "Hello,I am using MongoDB Atlas with my Java application. Currently, I am using MongoDB driver version 4.2.0 and whenever I try to insert a document into my collection I receive this exception:com.mongodb.MongoTimeoutException: Timed out after 30000 ms while waiting for a server that matches com.mongodb.client.internal.MongoClientDelegate$1@4e41089d. Client view of cluster state is {type=REPLICA_SET, servers=[{address:27017=cluster0-shard-00-01.rnnax.mongodb.net, type=UNKNOWN, state=CONNECTING, exception={com.mongodb.MongoSocketWriteException: Exception sending message}, caused by {javax.net.ssl.SSLHandshakeException: extension (5) should not be presented in certificate_request}}, {address:27017=cluster0-shard-00-00.rnnax.mongodb.net, type=UNKNOWN, state=CONNECTING, exception={com.mongodb.MongoSocketWriteException: Exception sending message}, caused by {javax.net.ssl.SSLHandshakeException: extension (5) should not be presented in certificate_request}}, {address:27017=cluster0-shard-00-02.rnnax.mongodb.net, type=UNKNOWN, state=CONNECTING, exception={com.mongodb.MongoSocketWriteException: Exception sending message}, caused by {javax.net.ssl.SSLHandshakeException: extension (5) should not be presented in certificate_request}}]\nat com.mongodb.internal.connection.BaseCluster.createTimeoutException(BaseCluster.java:404)\nat com.mongodb.internal.connection.BaseCluster.selectServer(BaseCluster.java:123)\nat com.mongodb.internal.connection.AbstractMultiServerCluster.selectServer(AbstractMultiServerCluster.java:54)\nat com.mongodb.client.internal.MongoClientDelegate.getConnectedClusterDescription(MongoClientDelegate.java:147)\nat com.mongodb.client.internal.MongoClientDelegate.createClientSession(MongoClientDelegate.java:100)\nat com.mongodb.client.internal.MongoClientDelegate$DelegateOperationExecutor.getClientSession(MongoClientDelegate.java:277)\nat com.mongodb.client.internal.MongoClientDelegate$DelegateOperationExecutor.execute(MongoClientDelegate.java:201)\nat com.mongodb.client.internal.MongoCollectionImpl.executeSingleWriteRequest(MongoCollectionImpl.java:1048)\nat com.mongodb.client.internal.MongoCollectionImpl.executeInsertOne(MongoCollectionImpl.java:498)\nat com.mongodb.client.internal.MongoCollectionImpl.insertOne(MongoCollectionImpl.java:482)\nat com.mongodb.client.internal.MongoCollectionImpl.insertOne(MongoCollectionImpl.java:476)\nat me.skhanal.StockDweebs.MongoDB.main(MongoDB.java:24)I have ensured that the credentials for the MongoClient is correct and that I am using the correct driver version. Does anyone know what is causing this issue?", "username": "Subodh_Khanal" }, { "code": "", "text": "Did you whitelist your client IP addr on Atlas via the “Network Access” settings?", "username": "Jack_Woehr" }, { "code": "", "text": "Yes my client IP address is currently whitelisted", "username": "Subodh_Khanal" }, { "code": "", "text": "Sorry to hear that you’re having trouble. What is the exact version of the JDK you are using? I wonder if you’re running into a bug like https://bugs.java.com/bugdatabase/view_bug.do?bug_id=JDK-8236039?Regards,\nJeff", "username": "Jeffrey_Yemin" }, { "code": "", "text": "I am currently using version 13.0.2.8", "username": "Subodh_Khanal" }, { "code": "", "text": "Please try with 13.0.3, as the JDK bug I linked to has been fixed in that patch.", "username": "Jeffrey_Yemin" }, { "code": "", "text": "Sorry for the late response, this fixed the issue!", "username": "Subodh_Khanal" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoTimeoutException
2020-12-13T22:20:45.816Z
MongoTimeoutException
16,808
null
[ "aggregation" ]
[ { "code": "db.sales.aggregate(\n [\n { $sort: { item: 1, date: 1 } },\n {\n $group:\n {\n _id: \"$item\",\n firstSalesDate: { $first: \"$date\" }\n }\n }\n ]\n)\n", "text": "Hi,\nI have the following pipeline as in the example at: https://docs.mongodb.com/manual/reference/operator/aggregation/first/But now, I want to return for each “item” the whole document that associates with the group resultsThanks,\nItzhak", "username": "Itzhak_Kagan" }, { "code": "$$ROOT{\n $group: {\n _id: \"$item\",\n firstItem: { $first: \"$$ROOT\" }\n }\n}\n$push$$ROOT{\n $group: {\n _id: \"$item\",\n allItems: { $push: \"$$ROOT\" }\n }\n}\n", "text": "Hello @Itzhak_Kagan Welcome back,You can get a document using $$ROOT,For array of documents using $push and $$ROOT,For more information you can checkout $gruop docs", "username": "turivishal" }, { "code": "", "text": "Thanks @Vishal Turi\nIt was a very fast and helpful reply.\nYou have a great knowledge.Thanks a lot,\nItzhak", "username": "Itzhak_Kagan" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to returns first sorted group document
2020-12-19T15:33:33.468Z
How to returns first sorted group document
2,774
null
[ "legacy-realm-cloud" ]
[ { "code": "[31791]: Error on connection to sync server: connect EHOSTUNREACH 10.130.28.70:7800\n[31791]: Connection to sync server closed. \n", "text": "Hi,Nothing is syncing, nor are any realms loading. This is for realm cloud / legacy.Realm.io pointed me here. Hoping I can get some support help.This error appeared on our Realm Cloud instance today:Seems that a realm cloud server is down.Help would be greatly appreciated. Our production apps are down as a result.Thanks.", "username": "Carter_Sprigings" }, { "code": "", "text": "Hi Carter,Thank you for posting your issue here and apologies for the inconvenience.Unfortunately some basic instances in our Legacy Realm Cloud platform experienced an issue with the shared sync worker stack. Your instance was one of them and with no sync worker available, it was no operational.Our SRE Team has been investigating this issue and we can confirm that this should be addressed and resolved now. We can confirm that your instance is up and running and fully accessible.Apologies again for the inconvenience.Kind Regards,\nMarco", "username": "Marco_Bonezzi" } ]
Realm Cloud (Legacy) Error - Error on connection to sync server: connect EHOSTUNREACH
2020-12-19T04:15:52.122Z
Realm Cloud (Legacy) Error - Error on connection to sync server: connect EHOSTUNREACH
4,227
null
[]
[ { "code": "", "text": "Hi there. So I am evaluating Realm for a new project. Right now I am testing its write speed on a mobile device. I am having trouble with it however. The issue is that after I write to disk and then fetch all the items I just wrote, the count is mismatched. I am writing 10k objects to the realm. After the write I am getting all the items in said realm and getting the count. The count always comes back as 1k instead of 10k. I think this is because of how I am getting the data. I am making an API call to our backend to get 1k objects. I then multiply thats items by 10 and store that in another collection. I iterate through all 10k items and change their ID to a random GUID so that each item in the 10k is unique. I then write all 10k items to disk and then get all the items back. But the count is always off. Can anyone help me understand why this is?", "username": "Sevren_Brewer" }, { "code": "", "text": "Which SDK are you using? And if you can post how you write and read the data it would be easier to figure out what could be wrong.", "username": "ChristanMelchior" }, { "code": "", "text": "Hey Christan, I actually figured out the cause of the problem right after posting. The problem was that I was copying the 1k objects into the larger collection, then when changing the ID to a GUID, it did it for all 10 copies of that object in the large collection. To fix the problem I preform a Clone on each object so that I am not referencing the same object multiple times. This can be closed out, thanks.", "username": "Sevren_Brewer" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Evaluating Realm DB, need help
2020-12-18T18:38:20.963Z
Evaluating Realm DB, need help
1,650
null
[ "python" ]
[ { "code": " Traceback (most recent call last):\n File \"manage.py\", line 21, in <module>\n main()\n File \"manage.py\", line 17, in main\n execute_from_command_line(sys.argv)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\n utility.execute()\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/core/management/__init__.py\", line 395, in execute\n self.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/core/management/base.py\", line 328, in run_from_argv\n self.execute(*args, **cmd_options)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/core/management/base.py\", line 369, in execute\n output = self.handle(*args, **options)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/core/management/base.py\", line 83, in wrapped\n res = handle_func(*args, **kwargs)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/core/management/commands/migrate.py\", line 86, in handle\n executor = MigrationExecutor(connection, self.migration_progress_callback)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/db/migrations/executor.py\", line 18, in __init__\n self.loader = MigrationLoader(self.connection)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/db/migrations/loader.py\", line 49, in __init__\n self.build_graph()\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/db/migrations/loader.py\", line 212, in build_graph\n self.applied_migrations = recorder.applied_migrations()\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/db/migrations/recorder.py\", line 76, in applied_migrations\n if self.has_table():\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/db/migrations/recorder.py\", line 56, in has_table\n return self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor())\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/db/backends/base/introspection.py\", line 48, in table_names\n return get_names(cursor)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/django/db/backends/base/introspection.py\", line 43, in get_names\n return sorted(ti.name for ti in self.get_table_list(cursor)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/djongo/introspection.py\", line 47, in get_table_list\n for c in cursor.db_conn.list_collection_names()\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/pymongo/database.py\", line 863, in list_collection_names\n for result in self.list_collections(session=session, **kwargs)]\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/pymongo/database.py\", line 826, in list_collections\n _cmd, read_pref, session)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/pymongo/mongo_client.py\", line 1462, in _retryable_read\n read_pref, session, address=address)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/pymongo/mongo_client.py\", line 1279, in _select_server\n server = topology.select_server(server_selector)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/pymongo/topology.py\", line 243, in select_server\n address))\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/pymongo/topology.py\", line 200, in select_servers\n selector, server_timeout, address)\n File \"/home/bluethink/Desktop/Abdulla/MongoDBProject/env/lib/python3.6/site-packages/pymongo/topology.py\", line 217, in _select_servers_loop\n (self._error_message(selector), timeout, self.description))\npymongo.errors.ServerSelectionTimeoutError: localhost:27017: timed out, Timeout: 30s, Topology Description: <TopologyDescription id: 5fdc9bf1e88fc8cd3e5e3310, topology_type: Single, servers: [<ServerDescription ('localhost', 27017) server_type: Unknown, rtt: None, error=NetworkTimeout('localhost:27017: timed out',)>]>\nDATABASES = {\n'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n},\n'myDB': {\n 'ENGINE': 'djongo',\n 'NAME': 'myDB',\n 'HOST': 'mongodb+srv://TrupinionSurvey:<password>@test-db-shard-00-01.jtj0h.mongodb.net/myDB?retryWrites=true&w=majority',\n 'USER': 'TrupinionSurvey',\n 'PASSWORD': 'TrupinionSurvey',\n}\n", "text": "Hello, Can Anyone Help me with this error,\nI am using Django to connect with MongoDB using the Djongo library and dnspythonBut I am getting this errorHere is my DB settings}When I try the command python manage.py migrate --database=myDB\nI get this error. I use --database=myDB because I am using two database.Also I tried this method but After using this method I got connectionTimeout Error after 21 second with dnspythonIf anybody can solve this thanks in advance", "username": "Abdulla_Ansari" }, { "code": "pymongo.errors.ServerSelectionTimeoutError: localhost:27017: timed out, Timeout: 30s, Topology Description: <TopologyDescription id: 5fdc9bf1e88fc8cd3e5e3310, topology_type: Single, servers: [<ServerDescription ('localhost', 27017) server_type: Unknown, rtt: None, error=NetworkTimeout('localhost:27017: timed out',)>]>", "text": "Hi @Abdulla_AnsariAre you sure you are loading your settings correctly? It says right here it is connecting to localhost:27017pymongo.errors.ServerSelectionTimeoutError: localhost:27017: timed out, Timeout: 30s, Topology Description: <TopologyDescription id: 5fdc9bf1e88fc8cd3e5e3310, topology_type: Single, servers: [<ServerDescription ('localhost', 27017) server_type: Unknown, rtt: None, error=NetworkTimeout('localhost:27017: timed out',)>]>", "username": "chris" }, { "code": "", "text": "Hi @Abdulla_Ansari,Have a closer look at the djongo README . It looks to me like the MongoDB URL should be provided as a ‘host’ value inside a ‘CLIENT’ dict, instead of a ‘HOST’ value at the top level. If that’s it, then I totally see why you made the mistake!Let me know if this fixes your problem.Mark", "username": "Mark_Smith" }, { "code": "", "text": "Some more documentation here which might help.", "username": "Mark_Smith" } ]
Getting Timeout Error While connecting to MongoDB atlas URI in Django Project
2020-12-18T13:14:55.462Z
Getting Timeout Error While connecting to MongoDB atlas URI in Django Project
14,836
null
[ "sharding", "indexes" ]
[ { "code": "", "text": "i need help on choosing a shard key in my mongodb sharded cluster.\nScenario My application is built on .net core 2.1. What it does is actually read websites and update details in the database. I’ve list of around 1 million websites which need to be crawled. The application just finds new pages which are not already in my database and saves them to database.Cluster and Server Details I have 3 shards (one primary and 2 secondary each) on dell r820 machines. Each machine having 512gb of RAM. And i run my application on 4 dell r620 machines, its mutithreadrd application.Database Structure: I have 2 databases mainly, one for all the home pages list and one for Pages.HomePages:_idURL (shard key)Pages:_idURL (shard key and unique indexed to avoid duplicate entries in collection)HomePageURLAlreadyRead (indexed field)So the application reads home pages and saves the inner pages from home page in Pages database. And the other part of application gets Pages from Pages database where AlreadyRead is 0, updates it to 1 and crawls it to save other pages found on that page in the database. But this part takes time as the data size grows, which i think is because of wrong shard key as it is set to URL field, and the command goes on all shards (i am assuming). I am saving URL without http or www. And if i set the HomePageURL as the shard key, it unevenly distributes the data across clusters ( which i already experienced, it was having 92% of data on one cluster).Cutting the long story short, cosidering the above scenario, what could be the best shard key? Or do i have to choose compound shard key?", "username": "Meva159" }, { "code": "HomePageURL : 1 ,\nURL : 1,\nAlreadyRead : 1\n", "text": "Hi @Meva159,Welcome to MongoDB community!It sounds like your queries are either on HomePageURL or on HomePageURL , URL, AlreadyRead .Based on this I believe you should consider having a shard key on HomePageURL, URL . while the index that the shardkey is using can potentially be a unique one based on:Indexes can be covering shard keys prefixes to be a key index. This could be unique.Please note that re sharding a collection is a painful task. 4.4 only allows refine a shardkey by adding fields but it can be done only forward.To fully reshard a collection you will need to:Best", "username": "Pavel_Duchovny" }, { "code": "", "text": "HomePageURL, URL@Pavel_Duchovny Thanks for replying with your valuable feedback. basically my queries is based on AlreadyRead. I pick up the URL from PagesDB where AlreadyRead is 0, update it to 1 and read its HTML. And save the new pages found from that page in PagesDB with AlreadyRead set as 0. I’ve a unique index on URL field to avoid duplicate entries in the PagesDB.\nWhat I’ve read in mongod documentation, it seems I should have a compound shard key on HomePageURL, URL and possibly some other filed maybe AppliationNumber, as my application runs or multiple servers. Do you think choosing such shard key and applying unique index on URL field will solve my problem ?", "username": "Meva159" }, { "code": "", "text": "Hi @Meva159,So if your queries on AlreadyRead : 0 perhaps you can index it seperately from your shard key as it will be executed as a scattered query anyhow. This index might be partial I think if the shard key cannot answer your queries it should be designed to allow the best equal distribution and avoid hot sharding for your writes therefore it sounds like the compound key might be right.Uniqueness can be enforced across shards only when the shard key index is a unique one.Perhaps you can have a unique shardkey on just url but use a hash function on it.Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "@Pavel_Duchovny ,\nThanks once again for taking time out to write with your valuable feedback.\nI’ve already Indexed AlreadyRead. what I am asking is, actually when the application saves new pages in PagesDB, it gets slow as the data size grows, as the shard key is only on URL field. and it seems, the query hops on multiple shards to write the data, making it slower.\nso will it fix this problem if I make the shard key as follows\nURL:1, HomePageURL: 1, ApplicationNumber: 1\nas my application runs on difference servers and I can include it in my shard key.\nNow the queries which will run will be as follows\nselect URL from PagesDB where AlreadyRead is 0 and ApplicationNumber is 1\nAnd while saving the new pages in PagesDB, the application will pass URL,HomePageURL and ApplicationNumber in order to target the specific shard.This is what I am thinking right now, your feedback on this will be much appreciated.", "username": "Meva159" }, { "code": "", "text": "Hi @Meva159,To optimise write workload you should consider adding HomePageURL and ApplicationNumber , but this is possible only on 4.4 server with FCV 4.4.I guess that if writes will be evenly distributed it will make queries eventually run faster as more resources will be available.Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "@Pavel_Duchovny\nWouldn’t that unevenly distribute the data across the shards? I previously created ShardKey on HomePageURL, but it created hot shards with some shards having more than 90% of the data. As my application is designed such that a specific website is bind to specific Application. Means if an application picks up a HomePage (say abc.com) from HomePagesDB, that particular application will read all the Pages of that specific website (abc.com/about , abc.com/contact, abc.com/careers and so on… ).\nAlso I am using Mongodb V4.2.9 right now, but I can upgrade to 4.4.\nI am actually new to MongoDB, still learning its basics. so if I upgrade to 4.4, and chose to make a compound shard key, going through the documentation, it seems that I first need to create a compound index, and then shard the collection based on the compound index name. am I correct in interpreting it ?", "username": "Meva159" }, { "code": "", "text": "Hi @Meva159,Ok so having just HomePageURL does not make sense as it will create a hot shard.However, adding ApplicationNumber and PageHomeURL to URL should create more split points and randomise the write access to different shards.Refining shard keys are only available in 4.4 which means adding shard key fields.You currently do not have an ability to completely reshard yhe collection and this can be done only by exporting recreating and importing. Which is a painful process that we are working in improving.To avoid hot shards just by using HomePageURL is by defining it as hashed shard key , but this still means resharding the whole collection .Let me know if this clear things up.Read more here:Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "@Pavel_Duchovny\nOnce again many thanks for your feedback. so I got your point and as I said earlier, I can upgrade to 4.4 and open to reshard my collection.\nLast thing, does it make sense if I create a compound key on HomePageURL as hashed with URL and ApplicationNumber?", "username": "Meva159" }, { "code": "", "text": "Hi @Meva159,This only make sense if you this combination is monotonically increasing:\nhttps://docs.mongodb.com/manual/core/sharding-shard-key/#monotonically-changing-shard-keysIf not then I don’t see a point in creating a compound hash key. Also this is available in 4.4 as well.Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "thanks. creating a compound shard key solved the problem \none more thing, is it necessary to create indexes separately on the fields used in the compound shard key ?", "username": "Meva159" }, { "code": "", "text": "Hi @Meva159,The index will be created if the collection is empty. Otherwise I suggest to build indexes on a rolling or background methods.Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Suggestions Choosing Shard Key
2020-12-13T19:20:28.460Z
Suggestions Choosing Shard Key
2,935
null
[ "app-services-user-auth" ]
[ { "code": "", "text": "Hi everybody,I am trying to figure out how to do a simple password change, which I thought should be a pretty basic task. However, I can’t find any documentation (imho realm really lacks useful docu) on how to do this. Custom function was the way to go I thought as I needed a custom mail for the password reset anyway. Inside the function I have username, password and the token stuff, which should be everything I need but I can’t find any source on how to call the required password change/reset function from within the custom function.\nI hope you can help me out.Best regards and stay healthy!", "username": "Daniel_Rollenmiller" }, { "code": "passwordpasswordcallResetPasswordFunction", "text": "Hey Daniel - you should be able to call the reset password function with password as the parameter. By returning either “pending” or “success” as return string, the password will automatically be reset to the string passed in the password parameter of callResetPasswordFunction.", "username": "Sumedha_Mehta1" }, { "code": "", "text": "But the questions for me is, how do I call the password reset function within a custom function in Realm?\nThe use case I want to implement is: Customer enters old password as well as new password in the web application, then everything needed is send to the backend and probably validated there as well and then the new password is set for the user. Basic password change.", "username": "Daniel_Rollenmiller" }, { "code": "await app.emailPasswordAuth.callResetPasswordFunction(email, password, args);", "text": "Daniel - there is no way to call a password reset function within a custom function in Realm. You must call the password reset function directly, like so:await app.emailPasswordAuth.callResetPasswordFunction(email, password, args); if the call to this function returns “success” or “pending”, the password will reset automatically based on the value you passed in.We don’t currently have a way to verify a user’s old password within this function but it is something we are looking into doing in the near term.", "username": "Sumedha_Mehta1" }, { "code": "", "text": "Thanks for your reply.\nNow I understand and returning success seems to work for password change!\nHowever, now I am unsure about “pending”, because that’s what we return in case we send an email for a password reset, are you sure that this also sets the password?", "username": "Daniel_Rollenmiller" }, { "code": "resetPasswordawait app.emailPasswordAuth.resetPassword(token, tokenId, \"newPassw0rd\");resetPassword", "text": "Hey Daniel - you’re right, that’s my mistake. You have to then call resetPassword from the SDK with the token and tokenIdlike so - await app.emailPasswordAuth.resetPassword(token, tokenId, \"newPassw0rd\");You can find the more detail about resetPassword here - https://docs.mongodb.com/realm/web/manage-email-password-users#complete-a-password-reset", "username": "Sumedha_Mehta1" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Password change with realm
2020-12-11T08:51:38.192Z
Password change with realm
4,926
null
[ "aggregation" ]
[ { "code": "{\n $cond: {\n if: { ‘notes.title’: { $exists: true } },\n then: {\n $unset: {\n ‘notes.highlights’: true,\n }\n },\n else: {\n $unset: {\n notes: true \n }\n },\n }\n}\n{\n $unset: {\n $cond: {\n if: { ‘notes.title’: { $exists: true } },\n then: { ‘notes.highlights’: true },\n else: { notes: true },\n }\n }\n}\n", "text": "Hi!I am new to $cond and I just can’t get things to work or know if what I want to do is possible. I would like to do one of two things:Thank you ", "username": "Scott_Wager" }, { "code": "$unsetnotesnotes: {\n title: \"some text\",\n highlights: \"some value\"\n}\nif the field `notes.title` exists, \nthen 'notes.highlights' = true // setting a boolean true (or is it remove this field)\nelse notes = true // setting a boolean true (or is it remove this field)\n", "text": "Hello @Scott_Wager, the $unset update pipeline stage is used to remove (or exclude) field(s) from the document in an update operation. I’d like some clarification about what you had posted.Assuming the field notes is:The pseudo-code I am reading is:setting a boolean true (or is it remove this field) - what you are trying to say is not clear. Can you post a sample document with the two possible outcomes?", "username": "Prasad_Saya" }, { "code": "{\n $unset: { 'notes.highlights': true },\n}\n{\n $unset: { 'notes': true },\n}\n", "text": "Hi, thank you for the quick reply! I apologise for the confusion, I am removing fields.This operation removes the ‘highlights’ key from the notes objectWhile this operation removes the ‘notes’ key from the documentIf a ‘title’ key exists then I’d like the first operation to run, else, I’d like the second operation to run.What I am trying to avoid: If ‘highlights’ is the only key in ‘notes’, when it is removed, an empty object is left behind.", "username": "Scott_Wager" }, { "code": "{\n \"_id\" : 1,\n \"fld\" : 111,\n \"notes\" : {\n \"title\" : \"some_title_1\",\n \"highlights\" : \"abc 123\"\n }\n}\n{ \"_id\" : 2, \"fld\" : 222, \"notes\" : { \"highlights\" : \"abc 999\" } }\ndb.collection.updateMany(\n {},\n [\n { \n $set: { \n \"notes.highlights\": { $cond: { if: { $eq: [ { $type: \"$notes.title\" }, \"string\" ] },\n then: \"$$REMOVE\",\n else: \"$notes.highlights\"\n } }\n }\n },\n { \n $set: { \n notes: { $cond: { if: { $eq: [ { $type: \"$notes.title\" }, \"string\" ] },\n then: \"$notes\",\n else: \"$$REMOVE\"\n } } \n }\n }\n])\n{ \"_id\" : 1, \"fld\" : 111, \"notes\" : { \"title\" : \"some_title_1\" } }\n{ \"_id\" : 2, \"fld\" : 222 }", "text": "@Scott_Wager, I have this solution (and it works fine). I suspect there might be “better” way of doing this. The syntax needs to be same for this to work.Take these two sample documents:The update operation:The result:", "username": "Prasad_Saya" }, { "code": "{ $not: [\"$notes.highlights\"] }", "text": "This worked perfectly, thank you so much!!I also used ‘$not’ instead of ‘$eq’:\n{ $not: [\"$notes.highlights\"] }", "username": "Scott_Wager" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Update pipeline: conditioning $unset
2020-12-17T11:49:23.112Z
Update pipeline: conditioning $unset
5,703
null
[]
[ { "code": "", "text": "Hello,I had a question that can we set up sync or replication between my on-prem MongoDB server and my azure VM MongoDB server.Thanks in Advance.", "username": "Nimai_Ahluwalia" }, { "code": "", "text": "Hi @Nimai_AhluwaliaThere are a few methods for live migration. See https://docs.atlas.mongodb.com/importLive Migration and mongo-mirror will keep Atlas in sync until you are ready to cut over to Atlas.", "username": "chris" }, { "code": "", "text": "Welcome to the MongoDB community @Nimai_Ahluwalia!In order to suggest relevant migration options, can you please confirm:The methods @chris has suggested are specifically for migration to MongoDB Atlas on Azure.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Yup I did that thing and read Azure as Atlas. SMH.", "username": "chris" } ]
On-Premise Mongo DB to Azure VM Mongo DB Migration.
2020-12-16T16:12:05.556Z
On-Premise Mongo DB to Azure VM Mongo DB Migration.
2,427
null
[ "java" ]
[ { "code": "Extended JSONUUIDLUUIDimport com.mongodb.MongoClientSettings\nimport com.mongodb.MongoCredential\nimport com.mongodb.ServerAddress\nimport com.mongodb.client.MongoClients\nimport com.mongodb.client.MongoCollection\nimport com.mongodb.connection.ClusterSettings\nimport org.bson.Document\nimport org.bson.UuidRepresentation\nimport org.bson.codecs.DocumentCodec\nimport org.bson.codecs.UuidCodec\nimport org.bson.codecs.configuration.CodecRegistries\n\nfun connect(): MongoCollection<Document> =\n MongoClients.create(\n MongoClientSettings.builder()\n .credential(MongoCredential.createCredential(\"mongo\", \"admin\", \"mongo\".toCharArray()))\n .applyToClusterSettings { builder: ClusterSettings.Builder ->\n builder.hosts(listOf(ServerAddress(\"localhost\", 27017)))\n }\n .uuidRepresentation(UuidRepresentation.STANDARD)\n .build())\n .getDatabase(\"uuid-demo\")\n .getCollection(\"collection\")\n\nfun parseDocument(json: String): Document =\n Document.parse(\n json,\n DocumentCodec(\n CodecRegistries.fromRegistries(\n CodecRegistries.fromCodecs(UuidCodec(UuidRepresentation.STANDARD)),\n MongoClientSettings.getDefaultCodecRegistry()\n )\n )\n )\n\nfun main(args: Array<String>) {\n val document = parseDocument(\n \"\"\"\n {\n \"someId\" : UUID(\"cfbca728-4e39-4613-96bc-f920b5c37e16\")\n } \n \"\"\".trimIndent()\n )\n val collection = connect()\n\n collection.drop()\n collection.insertOne(document)\n val inserted = collection.find().first()!!\n\n println(inserted.toJson())\n}\nLUUID{\"_id\": {\"$oid\": \"5fdb73cc7dab4766e448f2be\"}, \"someId\": {\"$binary\": {\"base64\": \"z7ynKE45RhOWvPkgtcN+Fg==\", \"subType\": \"03\"}}}", "text": "I am trying to insert an Extended JSON document into a MongoDB collection. This document contains a standard UUID . My problem is that after inserting, the UUID is saved in the legacy LUUID format.The following (Kotlin) program illustrates my problem:However the UUID is stored using the LUUID type with binary subtype 0x03:\n{\"_id\": {\"$oid\": \"5fdb73cc7dab4766e448f2be\"}, \"someId\": {\"$binary\": {\"base64\": \"z7ynKE45RhOWvPkgtcN+Fg==\", \"subType\": \"03\"}}}What I am doing wrong here?", "username": "Joh_Bar" }, { "code": "MongoDB Enterprise > db.collection.find()\n{ \"_id\" : ObjectId(\"5fdc1d15944a7e6e66a2a541\"), \"someId\" : UUID(\"cfbca728-4e39-4613-96bc-f920b5c37e16\") }\nfun main(args: Array<String>) {\n val collection = connect()\n\n val document = Document.parse(\"\"\"\n {\n \"someId\" : UUID(\"cfbca728-4e39-4613-96bc-f920b5c37e16\")\n }\"\"\",\n collection.codecRegistry.get(Document::class.java)\n )\n\n collection.drop()\n collection.insertOne(document)\n val inserted = collection.find().first()!!\n\n println(inserted.toJson(collection.codecRegistry.get(Document::class.java)))\n}\n{\n \"_id\": {\"$oid\": \"5fdc1d15944a7e6e66a2a541\"}, \n \"someId\": {\"$binary\": {\"base64\": \"z7ynKE45RhOWvPkgtcN+Fg==\", \"subType\": \"04\"}}}\nCodec<Document>UuidRepresentation", "text": "It’s being inserted correctly, as you can see in the shell:The problem is that you need to use the correct codec in your call to Document.toJson. Try this (simplified) program:which prints:The trick is to use the Codec<Document> from the collection, which has been configured from the UuidRepresentation from the settings.Hope this helps.Regards,\nJeff", "username": "Jeffrey_Yemin" }, { "code": "\"subType\": \"03\"> db.collection.find()\n{ \"_id\" : ObjectId(\"5fdc8235119b8e3f3fd06c72\"), \"someId\" : BinData(3,\"z7ynKE45RhOWvPkgtcN+Fg==\") }\nprintln(\n Document.parse(\n \"\"\"{\"someId\" : UUID(\"cfbca728-4e39-4613-96bc-f920b5c37e16\")}\"\"\".trimIndent(),\n collection.codecRegistry.get(Document::class.java)\n ).toJson(collection.codecRegistry.get(Document::class.java))\n)\n\"subType\": \"03\"", "text": "Hi,Thank you very much for your help. Unfortunately, it doesn’t quite work. It is still outputting \"subType\": \"03\".I also don’t think that it is inserted correctly. When I query the shell I get:Even\"subType\": \"03\".MongoDB server version: 4.4.2\nMongoDB shell version v4.2.0\nMongoDB Java driver version: 4.0.5", "username": "Joh_Bar" }, { "code": "4.1.1", "text": "Ok. It works after I upgraded the version of the driver to 4.1.1. Thanks for your help.", "username": "Joh_Bar" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to store a UUID with binary subtype 0x04 using the MongoDB Java Driver
2020-12-17T18:18:58.621Z
How to store a UUID with binary subtype 0x04 using the MongoDB Java Driver
11,128
null
[ "queries" ]
[ { "code": "", "text": "I have a 10 year history of the Dow Jones index. Is there a way I can retrieve every, say, 5th document? I want to do this to speed up graphing Closing data. So, instead of plotting over 1800 points, only 360+ points need to be plotted. Thank you.", "username": "David_Robinson" }, { "code": "{ plotDoc: { $last : \"$$CURRENT\" } }\n", "text": "Hi @David_Robinson,Welcome to MongoDB community.There are a few ways to work around it. You can add an aggregation stage with $sample to get random few documents :Or you can rank each document and query the ones mod with 5 is 0:Likes technical puzzles, world travel and fancy foodYou can group the data by lets say a day or a week and use $last to project one document of each group:Thanks\nPavel", "username": "Pavel_Duchovny" } ]
Retrieve every nth document in a collection
2020-12-17T19:09:09.345Z
Retrieve every nth document in a collection
3,835
https://www.mongodb.com/…b_2_1024x143.png
[ "aggregation" ]
[ { "code": "{\n\"_id\" : \"20110418123336260\",\n\"radar\" : {\n \"mileage\" : \"15\",\n \"direction\" : \"ascending\",\n \"speed limit\" : \"180\"\n },\n\"Record\" : {\n \"date\" : \"18/04/2007\",\n \"time\" : \"12:33:36.260\", //Hour<24 and .260 are ms\n \"speed\" : \"193\"\n }\n}\ndb.traffic.aggregate([{$project: {\n \"_id\": \"$radar\",\n array :{\n $split:[\"$Record.date\",\"/\"]\n } \n}}, {$group: {\n _id: {radar: \"$_id\", month: {$arrayElemAt: [\"$array\",1]}},\n count: {\"$sum\": 1 }\n\n }}, {$sort: {\n \"_id.radar\": 1,\n \"_id.month\": 1,\n \"count\": -1\n}}]);\n avg : {\"$avg\" : \"$_id.Record.speed\"}\n", "text": "Hello everyone,I’m learning MongoDB and I’m practicing with a traffic database, and I’d like to know how to get the most popular radar per month (this means that for each month, the query has to show the radar with the highest number of records on it).This is how a document looks like in the database:Thanks to the help of @Pavel_Duchovny in a similar example in my last post, I got really close to the final result, but in the end I got stucked in this query, which finds for each radar his maximum number of appeareances per month:This is how the data looks like after this query (and as I said, it continues with the rest of the months for this radar until 12 documents (12 months), when next radar is showed)\nCaptura1092×153 3.29 KBCould please someone help me? I tried to group in the next stage and I tried to use first, $ROOT,etc properly but with no luck.PD: Would it be possible to show also an average of the speed of the radar fines? If so, how would it be done? I imagine it would be necessary to project the Record.speed and in some group/project stage do this, but when?:Thank you in advance!!", "username": "ReyBinario" }, { "code": "", "text": "Hi @ReyBinario,It seems that the there is one document per radar? You use _id as the radar Id.But in your data example the Record field is not an array, this is where I am confused.I can help you if you clarify the exact document structure and and output example.Best regards,\nPavel", "username": "Pavel_Duchovny" }, { "code": "db.trafico.find({},{\"radar\":1,\"Record\":1,_id:0}).pretty();\n{\n \"radar\" : {\n \"mileage\" : 15,\n \"direction\" : \"ascending\",\n \"speed limit\" : 180\n },\n \"Record\" : {\n \"date\" : \"18/04/2007\",\n \"time\" : \"12:33:36.260\",\n \"speed\" : 193\n }\n}\n", "text": "Hi @Pavel_Duchovny,Yes, there is one document per radar (the real document has a lot of information, but I filtered it to simplify the example).\nEach document has the object “radar” and the object “Record” , and in order to get only the month I used “split” in Record.date (String) and created an array. In this array, the month (string) is storaged in position “1”.\nI hope I clarified the structure.Anyway, here is an image of the data of this query:and the output (one doccument):I hope I have solved your doubts.\nIf you need anything else please let me know.\nThank you very much for your time, best regards,\nRB.", "username": "ReyBinario" }, { "code": "\"radar\" : {\n \"mileage\" : 15,\n \"direction\" : \"ascending\",\n \"speed limit\" : 180\n },\n", "text": "Hi @ReyBinario,So if each radar has only one document why do you group and count them? It sounds like it will always be 1.Or do you have a document for each recording? How do you identify a radar? By:Thanks,\nPavel", "username": "Pavel_Duchovny" } ]
How to count maximun cardinality per month?
2020-12-17T01:42:30.395Z
How to count maximun cardinality per month?
2,501
null
[ "data-modeling" ]
[ { "code": "group_id: \"group_1\"\nmembers:\n 0:\n user_id: \"test_user0\"\n role: \"ADMIN\"\n tag: \"tag_1\"\n 1:\n user_id: \"test_user1\"\n role: \"MOD\"\n tag: \"tag_1\"", "text": "Hey, I couldn’t make a schema for my project due to some fields inside the schema.\nBasically, the schema has only two fields which are “group_id” and “members”. The problem is members field can has 10,000 elements inside but not every field. The document isn’t exceed the limit of 16MB but I am worried about the performance.What do you think about it? Should I keep continue with current schema?", "username": "Duck" }, { "code": "members", "text": "Hello @Duck, welcome to the MongoDB Community forum.…worried about the performance.The question I see is why are you worried about performance? What is it you are going to (or trying) do with the data in the members array.You can insert new elements, update or delete elements in the array. And query too. Array fields can have indexes - and these are called Multikey Indexes. The queries on array fields can benefit, in terms of performance, from Multikey indexes. You can study the query performance by generating query plans.As such your current model might work fine as it is. The kind of queries (the important ones and includes CRUD) you are going to have in your application can determine the design of the data model. So, what are these queries?", "username": "Prasad_Saya" }, { "code": "", "text": "It often uses “get”, “$push” and “$pull” queries.I’ve indexes for in array objects but I am worried about size of the array how it is going to affects to queries.", "username": "Duck" }, { "code": "", "text": "If you know, the size of array is going to be finite (e.g., it can be between 1 and 10,000 only - and never more than that) and the size of the document is within limits - I think it will workout fine.The find and update (push and pull operations on the array) queries use the index when the query condition has the indexed field(s).…I am worried about…I suggest you create some test data and run the important queries. Use the explain() method on the queries and study the plan output statistics.Then, there is always the option of re-consider the present design.", "username": "Prasad_Saya" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Optimized Schema with Array
2020-12-17T12:39:39.946Z
Optimized Schema with Array
2,450
https://www.mongodb.com/…5519513f5bfc.png
[]
[ { "code": "", "text": "", "username": "Jack_Woehr" }, { "code": "", "text": "Hi @Jack_Woehr,Thanks for reporting this. Translations are part of the default Discourse configuration, but we haven’t actively checked all of the site messaging in available languages yet. The selection of locales with default translation will give us some early insight into community interest, and hopefully having translation of some core actions and messaging is better than no translation.We would love to see some community involvement in creating and updating translations, but we aren’t ready to get those projects started yet. We are planning for more international language resources and discussions in the new year, including language-based user groups. For example, there is a French speaking MongoDB User Group hosting a first event in January.If anyone is interested in potentially speaking or co-organising a language-based user group (or any user group), we have a signup form to become a community leader and a User Groups forum category for discussion.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Hi Stennie –Good to chat with you, thanks for all the help as I learn MongoDB.I am “fluent but not native” in French and Spanish so I might attend such (virtual) events though I probably would not be a suitable speaker. I’ll check that out.Regards,\nJack", "username": "Jack_Woehr" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Spanish -- untranslated message
2020-12-05T15:29:20.808Z
Spanish &ndash; untranslated message
3,277
null
[ "atlas-device-sync" ]
[ { "code": "", "text": "I have been using MongoDB Realm now since June 2020. One thing that I have noticed is that after one creates a new cluster, initializes a new Realm application, the very first time the Realm application writes to the database, the actual time for the data to show up in Atlas can take several minutes. We joke around here that it’s like warming up a lawn mower. Every subsequent write after that is pretty instantaneous. I was just wondering what is going on underneath the hood.Richard Krueger", "username": "Richard_Krueger" }, { "code": "", "text": "@Richard_Krueger We actually have just merged our Realm history compaction feature which will improve the situation you are describing. We are slowing rolling it out - I can turn it on for your cluster if you send me your Realm app URL but keep in mind it is beta and might have unexpected behavior", "username": "Ian_Ward" }, { "code": "", "text": "Ian, thanks for the quick reply. As I said, the problem goes away after the first write, which can take several minutes. When I create a new cluster I will send you the Realm app URL to enable the new feature. But good to know that this issue is being addressed.", "username": "Richard_Krueger" } ]
Slow first sync for MongoDB Realm
2020-12-16T19:23:59.483Z
Slow first sync for MongoDB Realm
1,932
https://www.mongodb.com/…bb80d058411e.png
[ "indexes" ]
[ { "code": "{\"deletedAt\": null, \"userIds\": ObjectId(\"5fda25bc24d241000d01e775\")}\n{ userIds: 1}\n", "text": "Hello,I have a pretty simple query in Atlas which seems to be largely ignoring my defined index. I’m confident that I’m ignorant of some detail of how the index should work. However when I run what looks to me to be the same query in compass with explain the index is used.The collection is called cars and has a few fields on it we’re interested in. The first is the deletedAt field a date and the second userIds an array of ObjectIds. The query I’m running isAnd I have an index on userIdsIn compass a single key is examined while in my mongoose connection the same query seems to examine every document in the collection.Compass\ncompass1005×277 19.3 KBAtlas\natlas504×1051 27.1 KBI didn’t see anything in the plan cache that would explain this behaviour and I added that index quite a while back so I assume that somewhere along the line that plan would have been recalculated. I’m afraid I’m not very familiar with mongo so chances are this is something simple. Please feel free to just point me at the appropriate docs.Simon", "username": "Simon_Timms" }, { "code": "userIdscreatedAt{\"userIds\": 1, \"deletedAt\": 1, \"createdAt\": 1}\n", "text": "Hi @Simon_Timms and welcome in the MongoDB Community !From what I see, you are running a different query in Atlas because you have also a sort and a projection. And because it says “In Memory Sort” = “No”, I have to imagine it’s not using the userIds index but rather a createdAt index which probably also exists.\nSo in the second query with the sort and the projection, MongoDB chooses to avoid the in memory sort. It could use the other index though and it would be more efficient - but it cannot guess that.A better index for your query though would be:With this compound index, you have a nice selectivity (very little keys needs to be scanned if you don’t have the same objectId in many documents) and also the sort is included so no inefficient in memory sort.I hope it helps .Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "Thanks, @MaBeuLux88!That’s interesting. There is indeed a createdAt index which is perhaps not the optimal index to use. I added the suggested index and I’ll see if that improves things. For my future reference is there a way to provide index utilization hints to the engine? It seems like in this case it selected a highly sub-optimal index especially considering that I wasn’t even filtering by the created date but only using it in sorting.", "username": "Simon_Timms" }, { "code": "", "text": "Yes, you can hint but in 99.9% of the cases, that’s usually a bad idea.In your present case, an in-memory sort is always bad compared to a free sort (which you get because the index is already sorted and you are retrieving the docs in the same order). So in your case, MongoDB had to choose between a bad index and a poor index. It chose the best it could use, but the correct solution here is not to hint, it’s to create a good index. The one I proposed was definitely better than the 2 others.Please make sure to re-run your query with an explain(true) to double check that your query now executes efficiently (== not scanning a lot of entries in the index and no in-memory sort).Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Inefficient query seems to be ignoring index
2020-12-16T23:01:21.268Z
Inefficient query seems to be ignoring index
3,841
null
[ "golang" ]
[ { "code": "func IsDuplicateKeyError(err error) bool {\n\tswitch e := err.(type) {\n\tcase mongo.CommandError:\n\t\treturn e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || \n(e.Code == 16460 && strings.Contains(e.Error(), \" E11000 \"))\n\tcase mongo.WriteException:\n\t\tif len(e.WriteErrors) > 0 {\n\t\t\tfor _, we := range e.WriteErrors {\n\t\t\t\tif we.Code == 11000 || we.Code == 11001 || we.Code == 12582 || \n(we.Code == 16460 && strings.Contains(we.Error(), \" E11000 \")) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase mongo.BulkWriteException:\n\t\tif len(e.WriteErrors) > 0 {\n\t\t\tfor _, we := range e.WriteErrors {\n\t\t\t\tif we.Code == 11000 || we.Code == 11001 || we.Code == 12582 || \n(we.Code == 16460 && strings.Contains(we.Error(), \" E11000 \")) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn false\n\t}\n}\ntype ServerError interface {\n error\n HasErrorCode(int) bool\n HasErrorLabel(string) bool\n Unwrap() error\n}\nfunc IsDuplicateKeyError(err error) bool {\n\tif e, ok := err.(mongo.ServerError); ok {\n\t\treturn e.HasErrorCode(11000) || e.HasErrorCode(11001) || e.HasErrorCode(12582) || \n\t\t\t(e.HasErrorCode(16460) && strings.Contains(e.Error(), \" E11000 \"))\n\t}\n\treturn false\n}\nIsTimeout(error) bool\nIsDuplicateKeyError(error) bool\nVar err error\nif mongo.IsTimeout(err) {\n\t // handle the error\n}\n", "text": "Hi, the team would like user perspective on one of the things we’re working on: error improvements. Currently, writing mgo’s IsDup helper with the driver would look something like this:Tentatively, the plan is to add an interface with some helpers that CommandError, WriteException, and BulkWriteException will implement:Which would turn that DuplicateKey helper function into this:IsDuplicateKeyError would also be one of the standalone helpers added for commonly checked errors:ex.Would this solution serve your needs and are there other helpers that would be nice to have?", "username": "Isabella_Siu" }, { "code": "return e.HasErrorCode(11000) || e.HasErrorCode(11001) || e.HasErrorCode(12582) || \n\t\t\t(e.HasErrorCode(16460) && strings.Contains(e.Error(), \" E11000 \"))\niotatype ErrorCode uint8\nconst (\n ErrorCodeType1 ErrorCode = iota\n ErrorCodeType2 ErrorCode = iota\n)\ntype ServerError interface {\n error\n HasErrorCode(ErrorCode) bool\n HasErrorLabel(string) bool\n Unwrap() error\n}\niotatype ErrorLabel string\n\nvar (\n ErrorLabelType1 ErrorLabel = ErrorLabel(\"just an example type\")\n ErrorLabelType2 ErrorLabel = ErrorLabel(\"just another example type\")\n)\n", "text": "Hello @Isabella_Siu and thanks for your work on the golang driverMy first feedback would be: if you plan to use error codes in this way, probably is better to enumerate them uysing golang iota featureand change the interface to the followingsame reasoning can also be applied to the ErrorLabels, if they can be enumerated in some way, but without iotahope it helpsThere is a whole literature in how to do the IsError “pattern” in golang and usually it’s similar to the way you are proposing to implement, so I have no more feedback.Just do it for all errors (including for example ErrNoDocuments) to be consistent with current codebaseAgain, thanks for your work", "username": "Alessandro_Sanino" }, { "code": "(e.HasErrorCode(16460) && strings.Contains(e.Error(), \" E11000 \"))ErrorCategory()DuplicateKeytype ErrorCategory int {\n ErrDuplicateKey iota\n ErrDuplicateIndex\n}\nerrCategoryMap := map[int]int{\n 11000: ErrDuplicateKey,\n 11001: ErrDuplicateKey,\n ...\n}\n", "text": "@Isabella_Siu -\nThis is coming along nicely - thank you so much for sharing the proposal with us.\nI love the idea of extending the error types to have .HasErrorCode() and .HasErrorLabel(), but I personally am with @Alessandro_Sanino in that I’d also like iota enumerated types so that performing error checking is really light and fast.Taking a look at the new IsDup error function, I can see that while this proposal simplifies some logic, at the end of the day, it doesn’t seem to solve the fundamental issue. This line in particular is what’s concerning me (e.HasErrorCode(16460) && strings.Contains(e.Error(), \" E11000 \")). If there is one error code in the error type and another code in the string, then it doesn’t seem like it’s simple to determine what the actual error is.Maybe something that might help is to create an internal map in mongo-go-driver. Extrapolate certain error codes to categories. So rather than having an IsDup function, the error could contain an additional field/accessor, ErrorCategory(), in this case it would be set to an iota int const of DuplicateKey, which is set by the driver when the result comes back from the database (as opposed to having to call it after every insert). I would argue that there are quite a few errors that could be categorized.Having fine-grained access to the specific code/error is nice, but since there’s no documentation on what the individual error codes mean outside of the golang driver (e.g. DuckDuckGo & Google). Having the low level error code just typically isn’t directly helpful.Possible map", "username": "TopherGopher" }, { "code": "", "text": "Hi, @Alessandro_Sanino and @TopherGopher thanks for your feedback!The reason that the error code and labels aren’t iota enumerated is that they’re parsed from the server errors, which the driver doesn’t have control over, and this allows us the flexibility we need to handle changes in that. The driver API has stricter constraints than the server error codes, for example, the server can stop including an error code fairly easily, but we would be stuck with the iota value until the next major release. If you want to take a look, the server error codes are listed here.This is why IsDuplicateKeyError ends up having complex logic. Between major server versions the way the server changed how it signals that, so the function needs to catch all the different ways different versions return it. We’re currently only planning to add standalone helpers for error checking that requires more complex logic and not things that can be checked with equality or by calling the HasErrorCode() or HasErrorLabel() functions.Specifically in regards to the idea of error categories, our feeling is that any category that could be handled that way either is already handled by the server labels and HasErrorLabel() or could be handled with a standalone helper. If you have ideas for any additional error helpers, we’d be happy to consider them.", "username": "Isabella_Siu" }, { "code": "16460E11000164601100112582- ErrCategoryBadInputParameter:\n - InvalidLength\n - InvalidBSON\n - InvalidPath\n - InvalidRoleModification\n - InvalidIdField\n - InvalidDBRef\n - InvalidOptions\n - InvalidNamespace\n - InvalidReplicaSetConfig\n - InvalidSyncSource\n - InvalidSSLConfiguration\n - InvalidPipelineOperator\n - InvalidViewDefinition\n - InvalidIndexSpecificationOption\n - InvalidUUID\n - InvalidResumeToken\n - InvalidSeedList\n - InvalidTopologyType\n - InvalidHeartBeatFrequency\n - InvalidServerType\n\n- ErrCategoryConflict:\n - WriteConflict\n - ConflictingOperationInProgress\n - ObjectIsBusy\n - TenantMigrationConflict\n - PreparedTransactionInProgress\n - DuplicateKey\n\n- ErrCategorySpecification:\n - DatabaseDifferCase\n - KeyNotFound\n - IndexOptionsConflict\n - IndexKeySpecsConflict\n - IndexAlreadyExists\n - IndexNotFound\n - NamespaceNotFound\n\n- ErrCategoryTooLarge:\n - ExceededMemoryLimit\n - QueryExceededMemoryLimitNoDiskUseAllowed\n - OutOfDiskSpace\n - BSONObjectTooLarge\n - OBSOLETE_KeyTooLong\n\nErrCategoryBadConnection:\n- DNSHostNotFound\n- DNSProtocolError\n- NodeNotFound\n- ReplicaSetNotFound\n- NetworkTimeout\n\nAborted:\n- TransactionExceededLifetimeLimitSeconds\n- PeriodicJobIsStopped\n", "text": "The problem comes down to lack of knowledge of the error codes themselves. I googled around for a long time trying to find the reference. I appreciate you linking it.\nNow if each error in that document had a corresponding detail - or there was a table on the mongo doc site - it might be more feasible for me to know which all errors to check for given each server version in order to maintain compatibility in a library for both old and new versions of mongo.Let me give you an example:\nHow am I as an end user supposed to know things like 16460 isn’t a duplicate key error unless it has E11000 in the string for mongo versions X-Y? 16460 isn’t listed in the document linked (nor is 11001/12582), so what does it mean when I get that code and E11000 isn’t in the string? The IsDup helper helps with this case, but are there other errors where similar operations has to be done?I see in the linked dictionary table, that there is already the beginnings of a notion of categories. Couldn’t we expand on that? So when an error comes back from the API, that mapping would come into play.Commonly, errors are used for two purposes - either to make some sort of informed/automatic programmatic decision or to provide a message to a user. Having to check for multiple codes for various common use cases gets cumbersome with larger code bases.\nBy setting this category using a map on the way in, that avoids all the look-up logic later.Just to run through a few to give you an idea of what I would love to check for (these names are totally negotiable):", "username": "TopherGopher" }, { "code": "$pushmultiple write errors: [{write errors: [{The field 'myArray' must be an array but is of type null in document {_id: ObjectId('5fc80a1650cd0c9ecb7cc8fb')}}]}, {<nil>}]\n$set$push\t// Attempt a $push query into a null array - error occurs\n\tmErr := mongo.WriteException{}\n\tif isMongoErr := errors.As(err, &mErr); isMongoErr {\n\t\tfor _, wErr := range mErr.WriteErrors {\n\t\t\tif wErr.Code == 2 && strings.Contains(wErr.Message, \"must be an array but is of type null\") {\n\t\t\t\t// Perform a $set rather than $push\n\t\t\t}\n\t\t}\n\t}\n", "text": "I think a use-case might be helpful.\nSay I’m getting this error back from the DB (The Code is 2) when I attempt to run a $push using UpdateOne():In this case, I want to programmatically retry using a $set rather than $push and the operation should succeed.\nThe best way I know how to check for this:Does this new error proposal simplify that logic at all? That’s one of the most common use-cases in our code-base.\nI realize I could resolve that particular issue using custom registries instead, but if we use a new registry, there’s a LOT of testing we would need to do, thus this logic instead.", "username": "TopherGopher" }, { "code": " if mErr, ok := err.(mongo.ServerError); ok {\n \tIf mErr.HasErrorCodeWithMessage(2, \"must be an array but is of type null\") {\n \t\t// Perform a $set rather than $push\n \t}\n }\n var pipeline = mongo.Pipeline{\n bson.D{\n \t\t{\"$set\", bson.D{\n \t\t\t{\"x\", bson.D{\n \t \t\t{\"$concatArrays\", bson.A{\n \t \t bson.D{{\"$ifNull\", bson.A{\"$x\", bson.A{}}}},\n \t \t bson.A{3},\n \t }},\n \t }},\n \t }},\n \t },\n }\n res, err := coll.UpdateMany(context.Background(), bson.D{}, pipeline)", "text": "Hi @TopherGopher!Three more things that are being added to this API are ServerError.HasMessage(string) to check for a message substring, ServerError.HasErrorCodeWithMessage(int, string) to check for a message substring on errors with a specific code, and a standalone IsNetworkError(error) to check if errors are network errors.The goal of the standalone helpers is to handle all cases where multiple error representations benefit from being linked, so a user wouldn’t be expected to know all the historical ways IsDuplicateKey error could be represented. It’s only Duplicate Key errors right now, but any errors in the future require more complex logic, they would also get helpers.We don’t want to set up our own go driver error categories because setting up our own categories requires us to maintain them on fairly unstable server error codes. While we’re happy to add standalone helpers for checking for useful groups of errors, I’m not sure of the benefit of these specific categories. Many of the errors in each category wouldn’t occur together, and wouldn’t necessarily want to be handled the same way. I’m also not sure of the benefit of knowing that an error is, for example, a bad input parameter without knowing what’s wrong with which parameter. Do you have a specific use case you’re thinking of for these?As an additional note, there is a ticket to add the error codes to the documentation, which should make them easier to find, but as of right now, that server file is the best source.For the case that you’re describing, you could instead do:For your specific use case, the issue is that nil slices get marshaled to BSON null values, so myArray in your collection may either be an array or null. You could avoid mixing null and array by using a custom registry or aggregation updates. To work around this with a custom registry, you would use a registry with a SliceCodec with SetEncodeNilAsArray(true). Alternatively, to work around this with aggregation updates, you would use aggregation to conditionally append or set.For example, on a collection that contains {x: null} and {x: [1,2]}, you could append or set x to [3] with the following:", "username": "Isabella_Siu" }, { "code": "HasErrorCodeWithMessageErrCategoryBadInputParameterErrCategoryConflictErrCategorySpecificationErrCategoryBadConnection", "text": "Heya back @Isabella_Siu -\nYou’re right, HasErrorCodeWithMessage definitely simplifies this logic, but question - will the code always be 2 across all mongo server versions for a nil insert? Is it a different code say if I try to $push to a datetime for example? How can I as a developer go about figuring that out? If I could answer the question of “have I handled all possible incarnations of this error?” then I would have more confidence in the error handling.Those were just example groupings and could easily be changed, but let me walk you through my thoughts on general handling of a few of those categories and maybe you’ll follow where I’m coming from:Maybe I don’t understand the meaning behind the errors themselves, which is possibly why I’m grouping them together incorrectly, but maybe that just demonstrates that it’s hard to find documentation and understand what these errors mean, and therefore, how to handle them.Ahhh - I didn’t know that nil helper made it into master yet - I’ve been using the old godoc site rather than gopkg, which doesn’t pick up that package, but I found it now on the new doc site. I see the SliceCodec you’re talking about and the Array option - very cool to know that’s available now Unfortunately, if we update the registry, then that change will affect literally hundreds of queries, thus the reason we’re gun-shy to use a registry. Makes us nervous to do something widespread in our library without a lot of testing and we have not had time for that lately.That aggregation query is really nifty and exactly what we’re after. I’ll have to give it a go. I truly appreciate you crafting that because it’s something we’ve been struggling with for a while.", "username": "TopherGopher" }, { "code": "", "text": "Hi @TopherGopher !To clarify, it’s not common for error codes to change between server versions, and if it happens with any more error codes, we will add helpers for them. We do agree that it should be easier to check what error codes correspond to what behavior, and you can follow that ticket here. For better or worse, testing a scenario with a live server is the most accurate way to identify error codes. The server, not the driver, controls what error codes are returned for what operations.Overall, we, as the driver, can’t provide error categories for all the errors because we don’t necessarily know all of them. Having the groupings would somewhat imply that users would sometimes want to handle all the errors in a grouping the same, and as a lot of these errors wouldn’t be handled programmatically, that isn’t true for many of them. For example the things in BadInputParameter and Specification, would be fixed by changing the actual code, where knowing what category of error it is doesn’t provide any additional benefit. Errors that are because of bad connections will be grouped together with IsNetworkError helper, so those should be handled. Though it should be noted that the driver will retry on its own for certain errors that we know benefit from being retried, including network errors.And you’re welcome! I’m glad that that was helpful.", "username": "Isabella_Siu" }, { "code": "IsTimeout()IsNetworkError()IsDuplicateKeyError()mongo.ServerErrorHasErrorLabelHasErrorCode", "text": "After chatting it out with you and given that ticket ^ for making errors easier, all of the helpers your team has proposed will be perfect (imho).\nIsTimeout(), IsNetworkError() and IsDuplicateKeyError() are all awesome sounding.The idea that all the mongo errors will conform to a common mongo.ServerError interface rocks. I think HasErrorLabel and HasErrorCode paired with that common interface will radically simplify our error checking hunks. Thank you for this thoughtful design and help in understanding it.", "username": "TopherGopher" }, { "code": "type ServerError interface {\n error\n ErrorCode() int\n ErrorLabel() string\n Unwrap() error\n}\n", "text": "This all looks great. Thanks for all the hardwork! The one improvement I’d suggest is producing the error code and label from the interface.The helper methods will be what most people use. But in special situations, no one is going to loop through all the possible error codes/labels to see which one the error “has”, they will end up parsing the string, which is a no-no. Better to just produce the error and label.", "username": "Dustin_Currie" }, { "code": "", "text": "Also, Dave Cheney would suggest making this interface private and just having public helper methods.https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefullyBut I do remember that the reasons for this were a bit arcane.", "username": "Dustin_Currie" }, { "code": "", "text": "Hi @Dustin_Currie!The reason we didn’t decide to do return functions is that the errors that ServerError wraps often have multiple labels and multiple codes, for example, a WriteException can contain multiple write errors, each with their own code. When users are trying to programmatically handle errors, they usually have something specific in mind, and the “has” functions allow us to abstract away checking the internals. If you just want to see what the error is, it’s best to print it.While the idea of just having public helpers is interesting, the current interface design is set up to echo the existing design choices in the go library. Though I agree, users will likely use the standalone helpers much more than the interface.", "username": "Isabella_Siu" }, { "code": "", "text": "That makes sense. The current strategy looks good to me. After reminding myself of the suggestions in that Dave Cheney post, I think the way you guys have done it is far more clear. Thanks!", "username": "Dustin_Currie" } ]
Seeking Developer Feedback: Go Driver Error Improvements
2020-11-18T21:07:44.548Z
Seeking Developer Feedback: Go Driver Error Improvements
4,816
null
[ "aggregation", "realm-web", "react-js" ]
[ { "code": "[\n {\n '$match': {\n 'email': '[email protected]'\n }\n }, {\n '$facet': {\n 'attempt': [\n {\n '$count': 'count'\n }\n ]\n }\n }\n ]", "text": "Hi,I’m using “realm-web”: “^1.1.0” on ReactJS. I’m trying to convert my queries to use $facet to reduce my queries. The results that I’m getting from $facet queries are nonexistent or a lot lower than running the queries by themselves. I ran a basic aggregate in the Atlas GUI and with the Realm SDK. I get 291 from the GUI vs 115 in SDK. What’s causing the inconsistency? I generated the aggregate in the GUI and copy/pasted into my SDK.", "username": "Winston_Zhao" }, { "code": "", "text": "There are some differences but I can say if they can explain what you observe from the information given.", "username": "Kenneth_Geisshirt" }, { "code": "", "text": "Hi Winston – One way to explain the difference may be Rules. For Web requests Realm will use Rules to figure out which documents can/cannot be seen by a user and this can impact aggregation results unless the code is within a Function run as a System Function (which explicitly skips rules).", "username": "Drew_DiPalma" }, { "code": "", "text": "Thanks for pointing me in the right direction.", "username": "Winston_Zhao" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Facet Count Incorrect
2020-12-16T05:38:27.328Z
Facet Count Incorrect
3,548
null
[ "atlas-search" ]
[ { "code": "", "text": "Not clear how to do. https://docs.atlas.mongodb.com/reference/atlas-search/regex/", "username": "Fred_Kufner" }, { "code": "", "text": "Hi @Fred_Kufner,I suggest to read the following README developed by one of our engineers regarding atlas search case insensitive queries:\nGitHub - MarcusSorealheis/Atlas-Search-Python: This is a Python Flask and MongoDB Atlas Search tutorial. Let me know if you have any questions at @marcusforpeace on twitter.I think by design many analyzers are case agnostic and if you.cant achieve it look for a custom lowercase tokenizerBest\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Got it. I went in another direction. Thanks!", "username": "Fred_Kufner" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Atlas Search regex - how to specify case insensitive?
2020-12-03T19:57:08.739Z
Atlas Search regex - how to specify case insensitive?
4,737
null
[ "golang", "field-encryption" ]
[ { "code": "", "text": "I’m trying to implement the MongoDB client-side field-level encryption in Go. When I try to run the project, I’m getting the below error.sudo go build -tags cse main.go# go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt …/…/go/pkg/mod/go.mongodb.org/[email protected]/x/mongo/driver/mongocrypt/binary.go:11:11: fatal error: mongocrypt.h: No such file or directory // #include <mongocrypt.h> ^~~~~~~~~~~~~~ compilation terminated.I followed the steps mentioned in this tutorial. Installed libmongocryptMongoDB Version used:4.4 enterprise edition.\nOS: Ubuntu 18.4/Windows\ngo version:1.15\nPlease help me to resolve this issue.", "username": "Viveka_BC" }, { "code": "pkg-config --cflags --libs libmongocrypt\n", "text": "Hi @Viveka_BC,To verify that libmongocrypt was successfully installed, can you run the following command and post the output in a comment:This will help us ensure that the installation worked and will also let us see where the required files are installed. Based on this information, we can provide follow-up debugging steps.– Divjot", "username": "Divjot_Arora" }, { "code": "", "text": "Hi @Divjot_Arora ,\nThis is the output I got:\n-DBSON_STATIC -I/usr/local/include/mongocrypt -I/usr/local/include/libbson-1.0 -L/usr/local/lib -lmongocrypt -lbson-static-1.0 /usr/lib/x86_64-linux-gnu/librt.so /usr/lib/x86_64-linux-gnu/libm.so -pthread", "username": "Viveka_BC" }, { "code": "/usr/localpkg-config --debug --cflags --libs libmongocrypt\n--debug", "text": "@Viveka_BC the output you provided is not consistent with the instructions you claim to have followed. It appears that you might have another libmongocrypt installation in /usr/local and that it may have been only partially uninstalled. Could you provide the complete output of the following command:(Note the addition of the --debug option).", "username": "Roberto_Sanchez" }, { "code": "pkg-config --debug --cflags --libs libmongocrypt\nError printing enabled by default due to use of output options besides --exists, --atleast/exact/max-version or --list-all. Value of --silence-errors: 0\nError printing enabled\nAdding virtual 'pkg-config' package to list of known packages\nCannot open directory #1 '/usr/local/lib/x86_64-linux-gnu/pkgconfig' in package search path: No such file or directory\nScanning directory #2 '/usr/local/lib/pkgconfig'\nFile 'libbson-1.0.pc' appears to be a .pc file\nWill find package 'libbson-1.0' in file '/usr/local/lib/pkgconfig/libbson-1.0.pc'\nFile 'libbson-static-1.0.pc' appears to be a .pc file\nWill find package 'libbson-static-1.0' in file '/usr/local/lib/pkgconfig/libbson-static-1.0.pc'\nFile 'libmongocrypt.pc' appears to be a .pc file\nWill find package 'libmongocrypt' in file '/usr/local/lib/pkgconfig/libmongocrypt.pc'\nCannot open directory #3 '/usr/local/share/pkgconfig' in package search path: No such file or directory\nScanning directory #4 '/usr/lib/x86_64-linux-gnu/pkgconfig'\nFile 'libkms_message.pc' appears to be a .pc file\nWill find package 'libkms_message' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/libkms_message.pc'\nFile 'openssl.pc' appears to be a .pc file\nWill find package 'openssl' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/openssl.pc'\nFile 'libbson-1.0.pc' appears to be a .pc file\nFile 'libbson-1.0.pc' ignored, we already know about package 'libbson-1.0'\nFile 'snappy.pc' appears to be a .pc file\nWill find package 'snappy' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/snappy.pc'\nFile 'ruby-2.5.pc' appears to be a .pc file\nWill find package 'ruby-2.5' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/ruby-2.5.pc'\nFile 'libmongocrypt.pc' appears to be a .pc file\nFile 'libmongocrypt.pc' ignored, we already know about package 'libmongocrypt'\nFile 'libmongoc-1.0.pc' appears to be a .pc file\nWill find package 'libmongoc-1.0' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/libmongoc-1.0.pc'\nFile 'geoclue-2.0.pc' appears to be a .pc file\nWill find package 'geoclue-2.0' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/geoclue-2.0.pc'\nFile 'libmongoc-ssl-1.0.pc' appears to be a .pc file\nWill find package 'libmongoc-ssl-1.0' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/libmongoc-ssl-1.0.pc'\nFile 'zlib.pc' appears to be a .pc file\nWill find package 'zlib' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/zlib.pc'\nFile 'libcrypto.pc' appears to be a .pc file\nWill find package 'libcrypto' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/libcrypto.pc'\nFile 'xorg-wacom.pc' appears to be a .pc file\nWill find package 'xorg-wacom' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/xorg-wacom.pc'\nFile 'ruby.pc' appears to be a .pc file\nWill find package 'ruby' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/ruby.pc'\nFile 'libmongocrypt-static.pc' appears to be a .pc file\nWill find package 'libmongocrypt-static' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/libmongocrypt-static.pc'\nFile 'libsasl2.pc' appears to be a .pc file\nWill find package 'libsasl2' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/libsasl2.pc'\nFile 'libssl.pc' appears to be a .pc file\nWill find package 'libssl' in file '/usr/lib/x86_64-linux-gnu/pkgconfig/libssl.pc'\nScanning directory #5 '/usr/lib/pkgconfig'\nFile 'ibus-table.pc' appears to be a .pc file\nWill find package 'ibus-table' in file '/usr/lib/pkgconfig/ibus-table.pc'\nScanning directory #6 '/usr/share/pkgconfig'\nFile 'yelp-xsl.pc' appears to be a .pc file\nWill find package 'yelp-xsl' in file '/usr/share/pkgconfig/yelp-xsl.pc'\nFile 'udev.pc' appears to be a .pc file\nWill find package 'udev' in file '/usr/share/pkgconfig/udev.pc'\nFile 'm17n-db.pc' appears to be a .pc file\nWill find package 'm17n-db' in file '/usr/share/pkgconfig/m17n-db.pc'\nFile 'poppler-data.pc' appears to be a .pc file\nWill find package 'poppler-data' in file '/usr/share/pkgconfig/poppler-data.pc'\nFile 'mobile-broadband-provider-info.pc' appears to be a .pc file\nWill find package 'mobile-broadband-provider-info' in file '/usr/share/pkgconfig/mobile-broadband-provider-info.pc'\nFile 'fontutil.pc' appears to be a .pc file\nWill find package 'fontutil' in file '/usr/share/pkgconfig/fontutil.pc'\nFile 'iso-codes.pc' appears to be a .pc file\nWill find package 'iso-codes' in file '/usr/share/pkgconfig/iso-codes.pc'\nFile 'adwaita-icon-theme.pc' appears to be a .pc file\nWill find package 'adwaita-icon-theme' in file '/usr/share/pkgconfig/adwaita-icon-theme.pc'\nFile 'xkeyboard-config.pc' appears to be a .pc file\nWill find package 'xkeyboard-config' in file '/usr/share/pkgconfig/xkeyboard-config.pc'\nFile 'systemd.pc' appears to be a .pc file\nWill find package 'systemd' in file '/usr/share/pkgconfig/systemd.pc'\nFile 'shared-mime-info.pc' appears to be a .pc file\nWill find package 'shared-mime-info' in file '/usr/share/pkgconfig/shared-mime-info.pc'\nFile 'usbutils.pc' appears to be a .pc file\nWill find package 'usbutils' in file '/usr/share/pkgconfig/usbutils.pc'\nFile 'bash-completion.pc' appears to be a .pc file\nWill find package 'bash-completion' in file '/usr/share/pkgconfig/bash-completion.pc'\nFile 'xkbcomp.pc' appears to be a .pc file\nWill find package 'xkbcomp' in file '/usr/share/pkgconfig/xkbcomp.pc'\nFile 'xbitmaps.pc' appears to be a .pc file\nWill find package 'xbitmaps' in file '/usr/share/pkgconfig/xbitmaps.pc'\nLooking for package 'libmongocrypt'\nLooking for package 'libmongocrypt-uninstalled'\nReading 'libmongocrypt' from file '/usr/local/lib/pkgconfig/libmongocrypt.pc'\nParsing package file '/usr/local/lib/pkgconfig/libmongocrypt.pc'\n line>Name: mongocrypt\n line>Description: The libmongocrypt client-side field level encryption library.\n line>Version: 1.1.0-pre2+20201126gitbe0a08387e\n line>Requires: libbson-static-1.0\n line>Requires.private: \n line>prefix=/usr/local\n Variable declaration, 'prefix' has value '/usr/local'\n line>includedir=${prefix}/include/mongocrypt\n Variable declaration, 'includedir' has value '/usr/local/include/mongocrypt'\n line>libdir=${prefix}/lib\n Variable declaration, 'libdir' has value '/usr/local/lib'\n line>Libs: -L${libdir} -lmongocrypt\n line>Cflags: -I${includedir}\nPath position of 'libmongocrypt' is 2\nAdding 'libmongocrypt' to list of known packages\nSearching for 'libmongocrypt' requirement 'libbson-static-1.0'\nLooking for package 'libbson-static-1.0'\nLooking for package 'libbson-static-1.0-uninstalled'\nReading 'libbson-static-1.0' from file '/usr/local/lib/pkgconfig/libbson-static-1.0.pc'\nParsing package file '/usr/local/lib/pkgconfig/libbson-static-1.0.pc'\n line>prefix=/usr/local\n Variable declaration, 'prefix' has value '/usr/local'\n line>exec_prefix=${prefix}\n Variable declaration, 'exec_prefix' has value '/usr/local'\n line>libdir=${prefix}/lib\n Variable declaration, 'libdir' has value '/usr/local/lib'\n line>includedir=${exec_prefix}/include\n Variable declaration, 'includedir' has value '/usr/local/include'\n line>\n line>Name: libbson static archive\n line>Description: The libbson BSON serialization library.\n line>Version: 1.18.0-pre\n line>Libs: -L${libdir} -lbson-static-1.0 /usr/lib/x86_64-linux-gnu/librt.so /usr/lib/x86_64-linux-gnu/libm.so -pthread\n line>Cflags: -I${includedir}/libbson-1.0 -DBSON_STATIC\nPath position of 'libbson-static-1.0' is 2\nAdding 'libbson-static-1.0' to list of known packages\n pre-remove: libmongocrypt libbson-static-1.0\n post-remove: libmongocrypt libbson-static-1.0\nadding CFLAGS_OTHER string \"-DBSON_STATIC \"\n pre-remove: libmongocrypt libbson-static-1.0\n post-remove: libmongocrypt libbson-static-1.0\n original: libmongocrypt libbson-static-1.0\n sorted: libmongocrypt libbson-static-1.0\n removing duplicate \"-I/usr/local/include/libbson-1.0\"\nadding CFLAGS_I string \"-I/usr/local/include/mongocrypt -I/usr/local/include/libbson-1.0 \"\n pre-remove: libmongocrypt libbson-static-1.0\n post-remove: libmongocrypt libbson-static-1.0\n original: libmongocrypt libbson-static-1.0\n sorted: libmongocrypt libbson-static-1.0\n removing duplicate \"-L/usr/local/lib\"\n removing duplicate \"-L/usr/local/lib\"\nadding LIBS_L string \"-L/usr/local/lib \"\n pre-remove: libmongocrypt libbson-static-1.0\n post-remove: libmongocrypt libbson-static-1.0\n removing duplicate \"-lbson-static-1.0\"\n removing duplicate \"/usr/lib/x86_64-linux-gnu/librt.so\"\n removing duplicate \"/usr/lib/x86_64-linux-gnu/libm.so\"\n removing duplicate \"-pthread\"\nadding LIBS_OTHER | LIBS_l string \"-lmongocrypt -lbson-static-1.0 /usr/lib/x86_64-linux-gnu/librt.so /usr/lib/x86_64-linux-gnu/libm.so -pthread \"\nreturning flags string \"-DBSON_STATIC -I/usr/local/include/mongocrypt -I/usr/local/include/libbson-1.0 -L/usr/local/lib -lmongocrypt -lbson-static-1.0 /usr/lib/x86_64-linux-gnu/librt.so /usr/lib/x86_64-linux-gnu/libm.so -pthread\"\n-DBSON_STATIC -I/usr/local/include/mongocrypt -I/usr/local/include/libbson-1.0 -L/usr/local/lib -lmongocrypt -lbson-static-1.0 /usr/lib/x86_64-linux-gnu/librt.so /usr/lib/x86_64-linux-gnu/libm.so -pthread\n", "text": "Please find the details below:", "username": "Viveka_BC" }, { "code": "/usr/local/usr/local", "text": "@Viveka_BC, as I suspected, you have an installation of the C driver and libmongocrypt under /usr/local. The pkg-config files are being found there (at lines 7-11 of the output you provided). You should either remove the C driver and libmongocrypt installations located under /usr/local or remove the distribution packages you installed. It is best to only have one or the other present on your system in order to avoid issues like what you have encountered.", "username": "Roberto_Sanchez" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongodb Client-Side Field Level Encryption (CSFLE) :fatal error: mongocrypt.h not found
2020-12-14T09:28:17.738Z
Mongodb Client-Side Field Level Encryption (CSFLE) :fatal error: mongocrypt.h not found
6,960
null
[ "atlas-device-sync" ]
[ { "code": "BadChangeset Error: failed to validate upload changesets: failed to validate Set instruction: cannot have index (0) greater than or equal to prior-size (0) (ProtocolErrorCode=212)", "text": "After migrating legacy Realm Sync data to Atlas, I keep running into this error in my MongoDB Realm logs:BadChangeset Error: failed to validate upload changesets: failed to validate Set instruction: cannot have index (0) greater than or equal to prior-size (0) (ProtocolErrorCode=212)I get this error after doing the following:Migrate data to MongoDB Realm using the steps in this guide: https://docs.realm.io/realm-legacy-migration-guide/.Login user and read data from MongoDB Realm.Perform updates on the data that was migrated over.I’ve searched around, but haven’t seen any discussion around this particular error message. I’m not sure if it has to do with a schema mismatch between the legacy JSON I inserted into Atlas and my client-side Realm Objects or if it’s something else.Additive and destructive changes don’t seem to be an issue, but updates throw this error.I’m using RealmSwift 10.4.0 and RealmJS 6.1.4 to migrate over legacy data.Any help is greatly appreciated!", "username": "Obi_Anachebe" }, { "code": "", "text": "After updating to RealmSwift 10.5.0, I don’t seem to run into this issue anymore", "username": "Obi_Anachebe" }, { "code": "", "text": "Yes RealmSwift 10.4+ has a fix for that error", "username": "Ian_Ward" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
BadChangeset Error after migrating from legacy Realm Sync
2020-12-16T07:31:46.168Z
BadChangeset Error after migrating from legacy Realm Sync
1,473
null
[ "queries" ]
[ { "code": "", "text": "Hello Everyone,I’m trying to compare the data between two similar collections,Example:\nCollection1 : Employee\nEmp_id, First_Name,Last_Name,Phone,Address,e-mailCollection2 : Employee_Test\nEmp_id, First_Name,Last_Name,Phone,Address,e-mailI would need to write the mongo query to compare the data between these two to findAny suggestions or guidance would be highly appreciated.Thanks in advance,\nBalaji", "username": "Balaji_L" }, { "code": "", "text": "My first step would be to use $lookup on Emp_id. Then using $cond to compared the fields.", "username": "steevej" } ]
Compare data between collections
2020-12-17T12:39:24.625Z
Compare data between collections
4,184
null
[ "indexes" ]
[ { "code": "", "text": "Hi,\nI am trying to implement Mongodb TTL feature into my project. Here I have one scenario, i need to update ttl field date value later.\nIn this case, I insert record with “expiryAt = null”, but later i will update date on “expiryAt” variable for expire this record by updating.\nAbove case looks not working.\nPlease assist me on this, what should i do for this above case?", "username": "Visva_Ram" }, { "code": "expireAfterSecondsdb.runCommand({\n collMod: \"user_log\",\n index: {\n keyPattern: { lastAccess: 1 },\n expireAfterSeconds: 3600\n }\n})\n", "text": "Hi @Visva_Ram,Welcome to MongoDB community!Its possible to do that via collMod command:https://docs.mongodb.com/manual/reference/command/collMod/#change-expiration-value-for-indexesIn this command you will need to specify the collection, the index and the new expireAfterSeconds expression.Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Thanks Much Pavel.In my case expireAfterSeconds is always zero in index. I am trying to control this by having date attribute from the document.", "username": "Visva_Ram" }, { "code": "expireAfterSecondsrs0:PRIMARY> db.deleteme.createIndex({expireAt:1},{expireAfterSeconds:0})\n{\n\t\"createdCollectionAutomatically\" : false,\n\t\"numIndexesBefore\" : 1,\n\t\"numIndexesAfter\" : 2,\n\t\"commitQuorum\" : \"votingMembers\",\n\t\"ok\" : 1,\n\t\"$clusterTime\" : {\n\t\t\"clusterTime\" : Timestamp(1608036401, 1),\n\t\t\"signature\" : {\n\t\t\t\"hash\" : BinData(0,\"AAAAAAAAAAAAAAAAAAAAAAAAAAA=\"),\n\t\t\t\"keyId\" : NumberLong(0)\n\t\t}\n\t},\n\t\"operationTime\" : Timestamp(1608036401, 1)\n}\nrs0:PRIMARY> ISODate()\nISODate(\"2020-12-15T12:53:50.780Z\")\nrs0:PRIMARY> db.deleteme.insertMany(\n [ \n {_id:1,expireAt:ISODate(\"2020-12-15T12:59\")}, \n {_id:2,expireAt:null},\n {_id:3,expireAt:ISODate(\"2020-12-15T12:58\")}\n ]\n)\n{ \"acknowledged\" : true, \"insertedIds\" : [ 1, 2, 3 ] }\nrs0:PRIMARY> db.deleteme.find()\n{ \"_id\" : 1, \"expireAt\" : ISODate(\"2020-12-15T12:59:00Z\") }\n{ \"_id\" : 2, \"expireAt\" : null }\n{ \"_id\" : 3, \"expireAt\" : ISODate(\"2020-12-15T12:58:00Z\") }\n\nrs0:PRIMARY> ISODate()\nISODate(\"2020-12-15T12:59:54.106Z\")\nrs0:PRIMARY> db.deleteme.find()\n{ \"_id\" : 2, \"expireAt\" : null }\nrs0:PRIMARY> db.deleteme.update({_id:2},{expireAt:ISODate('2020-12-15T13:03')})\nWriteResult({ \"nMatched\" : 1, \"nUpserted\" : 0, \"nModified\" : 1 })\nrs0:PRIMARY> db.deleteme.find()\n{ \"_id\" : 2, \"expireAt\" : ISODate(\"2020-12-15T13:03:00Z\") }\nrs0:PRIMARY> ISODate()\nISODate(\"2020-12-15T13:03:44.895Z\")\nrs0:PRIMARY> db.deleteme.find()\n", "text": "@Visva_RamI think I understand what you are attempting to achieve. As you may already know the expireAfterSeconds should be 0 to use the timestamp as the expiry time.I have just recreated what I think you are doing below and it works fine. The field you are using for TTL must be a date type or array of date values.There is no real point in using a field with null, I would recommend just not inserting with that field, and add it when you do the update.Wait…Wait…Profit!", "username": "chris" }, { "code": "Nested Object by extending abstract:", "text": "Thanks Much ChrisYes, It is working fine for me.One problem i am facing with indexing field for TTL from Java which we are using Spring Data Mongodb Framework.In that below case for nested object spring data mongodb creating multiple TTL index for Class(ClassA). So finally multiple TTL index there in mongodb.Nested Object by extending abstract:@Document\n@CompoundIndexes({\n@CompoundIndex(name = “var1_IX”, def = “{‘classBVar.var1’: 1}”),\n}\npublic class ClassA extends MyDBObjectAbstract {\nprivate ClassB classBVar = new ClassB() ;\n}Noraml Object by extending abstract :@Document\n@CompoundIndexes({\n@CompoundIndex(name = “var1_InnerIX”, def = “{‘var1’: 1}”),\n}\npublic class ClassB extends MyDBObjectAbstract {\nprivate String var1 =“Welcome”\n}Abstract class with TTL Index Annotation :public abstract class FMObjectAbstract {\n@Indexed(name = “expireRecordAt_index”, expireAfterSeconds = 0)\nprivate Date expireRecordAt=null;\n}Multiple Index created for Class A for TTL:{\n“v” : 2,\n“key” : {\n“classBVar.expireRecordAt” : 1\n},\n“name” : “classBVar.expireRecordAt_index”,\n“ns” : “MYDB.ClassA”,\n“expireAfterSeconds” : NumberLong(0)\n},\n{\n“v” : 2,\n“key” : {\n“expireRecordAt” : 1\n},\n“name” : “expireRecordAt_index”,\n“ns” : “MYDB.ClassA”,\n“expireAfterSeconds” : NumberLong(0)\n}", "username": "Visva_Ram" } ]
Update TTL Indexed Field expiry date later
2020-12-11T08:51:49.814Z
Update TTL Indexed Field expiry date later
8,182
null
[ "containers", "installation" ]
[ { "code": "FROM ubuntu:20.04\n\nRUN apt update\nRUN apt-get install -y wget\nRUN apt-get install -y software-properties-common\n\nRUN wget -qO - https://www.mongodb.org/static/pgp/server-4.2.asc | apt-key add -\nRUN echo \"deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/4.2 multiverse\" | tee /etc/apt/sources.list.d/mongodb-org-4.2.list\nRUN apt update\nRUN apt-get install -y mongodb-org\n\nEXPOSE 27017\n\nCMD /usr/bin/mongod \n(...)\nSetting up mongodb-org-server (4.2.11) ...\nAdding system user `mongodb' (UID 105) ...\nAdding new user `mongodb' (UID 105) with group `nogroup' ...\nNot creating home directory `/home/mongodb'.\nAdding group `mongodb' (GID 106) ...\nDone.\nAdding user `mongodb' to group `mongodb' ...\nAdding user mongodb to group mongodb\nDone.\nSystem has not been booted with systemd as init system (PID 1). Can't operate.\nFailed to connect to bus: Host is down\ndpkg: error processing package mongodb-org-server (--configure):\n installed mongodb-org-server package post-installation script subprocess returned error exit status 1\nSetting up mongodb-org-shell (4.2.11) ...\nSetting up mongodb-org-mongos (4.2.11) ...\ndpkg: dependency problems prevent configuration of mongodb-org:\n mongodb-org depends on mongodb-org-server; however:\n Package mongodb-org-server is not configured yet.\n\ndpkg: error processing package mongodb-org (--configure):\n dependency problems - leaving unconfigured\nProcessing triggers for libc-bin (2.31-0ubuntu9.1) ...\nErrors were encountered while processing:\n mongodb-org-server\n mongodb-org\nE: Sub-process /usr/bin/dpkg returned an error code (1)\nThe command '/bin/sh -c apt-get install -y mongodb-org' returned a non-zero code: 100\nWarning: apt-key output should not be parsed (stdout is not a terminal)\n\nWARNING: apt does not have a stable CLI interface. Use with caution in scripts.\n\ndebconf: delaying package configuration, since apt-utils is not installed\nRUN apt-get install -y mongodb-org-server\nSetting up mongodb-org-server (4.2.11) ...\nAdding system user `mongodb' (UID 105) ...\nAdding new user `mongodb' (UID 105) with group `nogroup' ...\nNot creating home directory `/home/mongodb'.\nAdding group `mongodb' (GID 106) ...\nDone.\nAdding user `mongodb' to group `mongodb' ...\nAdding user mongodb to group mongodb\nDone.\nSystem has not been booted with systemd as init system (PID 1). Can't operate.\nFailed to connect to bus: Host is down\ndpkg: error processing package mongodb-org-server (--configure):\n installed mongodb-org-server package post-installation script subprocess returned error exit status 1\nProcessing triggers for libc-bin (2.31-0ubuntu9.1) ...\nErrors were encountered while processing:\n mongodb-org-server\nE: Sub-process /usr/bin/dpkg returned an error code (1)\nThe command '/bin/sh -c apt-get install -y mongodb-org-server' returned a non-zero code: 100\n", "text": "So, I have a Dockerfile for a container containing MongoDB:The problem is that when trying to build, I get the errorI recently had a somewhat similar problem asked in Problems installing MongoDB in a container regarding something similar with Singularity.\nIn there, the solution was to upgrade to Ubuntu 20.04, however in this case this is already done.Note that I also get three other errors/warnings along the way:But as far as I read those are harmless, including the last one (see docker - Avoid apt-utils warning in Debian Dockerfile - Server Fault).When I separately install the server part itself usingI get", "username": "Ksortakh_Kraxthar" }, { "code": "", "text": "Hi @Ksortakh_KraxtharIn the main you have already been answered on this topic Connection error when running MongoDB inside a container: connection refused - #2 by chrisYou seem intent on rolling your own, look at the dockerfile linked in that post for inspiration.Your next issue beyond that is one of platform compatibility. Look here for mongodb 4.2", "username": "chris" }, { "code": "Step 9/22 : RUN set -ex; \texport GNUPGHOME=\"$(mktemp -d)\"; \tfor key in $GPG_KEYS; do \t\tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \"$key\"; \tdone; \tgpg --batch --export $GPG_KEYS > /etc/apt/trusted.gpg.d/mongodb.gpg; \tcommand -v gpgconf && gpgconf --kill all || :; \trm -r \"$GNUPGHOME\"; \tapt-key list\n ---> Running in 47432bab1570\n+ mktemp -d\n+ export GNUPGHOME=/tmp/tmp.QuN2oscQ9m\n+ gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys 20691EEC35216C63CAF66CE1656408E390CFB1F5\ngpg: keybox '/tmp/tmp.QuN2oscQ9m/pubring.kbx' created\ngpg: keyserver receive failed: Cannot assign requested address\nThe command '/bin/sh -c set -ex; \texport GNUPGHOME=\"$(mktemp -d)\"; \tfor key in $GPG_KEYS; do \t\tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \"$key\"; \tdone; \tgpg --batch --export $GPG_KEYS > /etc/apt/trusted.gpg.d/mongodb.gpg; \tcommand -v gpgconf && gpgconf --kill all || :; \trm -r \"$GNUPGHOME\"; \tapt-key list' returned a non-zero code: 2\n", "text": "Main reason I use my own is that it’s kind of a MCVE. I tend to switch working environment a lot. But using the code in there is fine with me.So I took the full code mongo/Dockerfile at master · docker-library/mongo · GitHub and tried to get it to work (without any alteration), sadly what I get is", "username": "Ksortakh_Kraxthar" }, { "code": "gpg: keyserver receive failed: Cannot assign requested address--- Dockerfile.orig\t2020-12-15 09:49:49.793284295 -0500\n+++ Dockerfile\t2020-12-15 09:47:11.308997656 -0500\n@@ -62,6 +62,8 @@\n ENV GPG_KEYS 20691EEC35216C63CAF66CE1656408E390CFB1F5\n RUN set -ex; \\\n \texport GNUPGHOME=\"$(mktemp -d)\"; \\\n+ mkdir ~/.gnupg; \\\n+ echo \"disable-ipv6\" >> ~/.gnupg/dirmngr.conf; \\\n \tfor key in $GPG_KEYS; do \\\n \t\tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \"$key\"; \\\n \tdone; \\\n@@ -109,3 +111,4 @@\n \n EXPOSE 27017\n CMD [\"mongod\"]\n", "text": "Main reason I use my own is that it’s kind of a MCVE. I tend to switch working environment a lot. But using the code in there is fine with me.I still don’t understand how the official image does not suffice, even as a base. gpg: keyserver receive failed: Cannot assign requested addressThat is a gpg ipv6 issue.", "username": "chris" }, { "code": "Step 9/22 : RUN set -ex; \texport GNUPGHOME=\"$(mktemp -d)\"; \tmkdir ~/.gnupg; echo \"disable-ipv6\" >> ~/.gnupg/dirmngr.conf; \tfor key in $GPG_KEYS; do \t\tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \"$key\"; \tdone; \tgpg --batch --export $GPG_KEYS > /etc/apt/trusted.gpg.d/mongodb.gpg; \tcommand -v gpgconf && gpgconf --kill all || :; \trm -r \"$GNUPGHOME\"; \tapt-key list\n ---> Running in f2805787a89a\n+ mktemp -d\n+ export GNUPGHOME=/tmp/tmp.OC79cNnf7R\n+ mkdir /root/.gnupg\n+ echo disable-ipv6\n+ gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys 20691EEC35216C63CAF66CE1656408E390CFB1F5\ngpg: keybox '/tmp/tmp.OC79cNnf7R/pubring.kbx' created\ngpg: keyserver receive failed: Cannot assign requested address\nThe command '/bin/sh -c set -ex; \texport GNUPGHOME=\"$(mktemp -d)\"; \tmkdir ~/.gnupg; echo \"disable-ipv6\" >> ~/.gnupg/dirmngr.conf; \tfor key in $GPG_KEYS; do \t\tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \"$key\"; \tdone; \tgpg --batch --export $GPG_KEYS > /etc/apt/trusted.gpg.d/mongodb.gpg; \tcommand -v gpgconf && gpgconf --kill all || :; \trm -r \"$GNUPGHOME\"; \tapt-key list' returned a non-zero code: 2\n", "text": "It suffices, just saying why I didn’t do it at first.Added your lines, still the same error:", "username": "Ksortakh_Kraxthar" }, { "code": "RUN wget -qO - https://www.mongodb.org/static/pgp/server-4.4.asc | apt-key add -", "text": "I’m also reading that gpg keyservers are flaky, yes they are. So some retries might be needed.Or do the “bad” thing and do what you had for your previous Dockerfile:\nRUN wget -qO - https://www.mongodb.org/static/pgp/server-4.4.asc | apt-key add -", "username": "chris" }, { "code": "bitnami/mongodb:latest", "text": "Couldn’t get it to work in any configuration.\nTested out some other official dockerfiles from the github, couldn’t build any of them.\nInstead I switched to using bitnami/mongodb:latest directly, that works.", "username": "Ksortakh_Kraxthar" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Problems installing MongoDB in a docker container
2020-12-15T13:06:33.771Z
Problems installing MongoDB in a docker container
16,421
null
[ "replication", "performance" ]
[ { "code": "Thread 1 (process 3402):\n#0 0x0000558dd3fc1758 in __tree_walk_internal ()\n#1 0x0000558dd3ee2ff7 in __wt_evict_thread_run ()\n#2 0x0000558dd3f45e89 in __thread_run ()\n#3 0x00007f6436de1ea5 in start_thread () from /lib64/libpthread.so.0\n#4 0x00007f6436b0a8dd in clone () from /lib64/libc.so.6\n 31.23% mongod [.] __tree_walk_internal\n 4.62% mongod [.] __wt_row_search\n 3.34% mongod [.] snappy::internal::CompressFragment\n 2.89% libc-2.17.so [.] __memcpy_ssse3_back\n 2.81% mongod [.] tc_deletearray_aligned_nothrow\n 2.10% mongod [.] __wt_page_in_func\n 1.87% mongod [.] snappy::InternalUncompress<snappy::SnappyArrayWriter>\n 1.58% mongod [.] __wt_rec_row_leaf\n 1.28% mongod [.] __wt_row_leaf_key_work\n 1.20% mongod [.] __wt_search_insert\n 1.15% mongod [.] __wt_evict_thread_run\n 1.01% mongod [.] __wt_row_modify\n 1.01% libc-2.17.so [.] __strlen_sse2_pminub\n", "text": "Hi,We using Mongo 4.4.2. The hidden replica has lag (10-180 minutes) during working hours.One linux process at this time is always 100% busy on one processor core with. The stack trace of this process:The perf top showsThere is correlation: the metric “eviction empty score” is more than 95 if there is a lag.What could be the performance issue?", "username": "Konstantin" }, { "code": "", "text": "Hey Konstantin,There are many factors that could cause a replication lag.Can you provide more info about the topology of your replica set?\nWhat is the RTT between the hidden member and its sync source?\nDoes the hidden member have the same CPU/MEM/Network/Disk config as others in the rs?All the best,– Rodrigo", "username": "logwriter" }, { "code": "rs.status()rs.conf()", "text": "Hi @Konstantin welcome to the community!Along with what @logwriter have asked, I would also ask for a couple of additional info:Best regards,\nKevin", "username": "kevinadi" }, { "code": "", "text": "Hi Rodrigo,Can you provide more info about the topology of your replica set?I will provide the configuration in the next post.What is the RTT between the hidden member and its sync source?How can I measure RTT?Does the hidden member have the same CPU/MEM/Network/Disk config as others in the rs?Yes.Best regards,\nKonstantin", "username": "Konstantin" }, { "code": "{\n \"_id\" : \"testing\",\n \"version\" : 86588,\n \"term\" : 155,\n \"protocolVersion\" : NumberLong(1),\n \"writeConcernMajorityJournalDefault\" : true,\n \"members\" : [\n {\n \"_id\" : 5,\n \"host\" : \"mongo04:27017\",\n \"arbiterOnly\" : false,\n \"buildIndexes\" : true,\n \"hidden\" : true,\n \"priority\" : 0,\n \"tags\" : {\n\n },\n \"slaveDelay\" : NumberLong(0),\n \"votes\" : 0\n },\n {\n \"_id\" : 6,\n \"host\" : \"mongo15:27017\",\n \"arbiterOnly\" : false,\n \"buildIndexes\" : true,\n \"hidden\" : false,\n \"priority\" : 10,\n \"tags\" : {\n\n },\n \"slaveDelay\" : NumberLong(0),\n \"votes\" : 1\n },\n {\n \"_id\" : 7,\n \"host\" : \"mongo16:27017\",\n \"arbiterOnly\" : false,\n \"buildIndexes\" : true,\n \"hidden\" : false,\n \"priority\" : 15,\n \"tags\" : {\n\n },\n \"slaveDelay\" : NumberLong(0),\n \"votes\" : 1\n },\n {\n \"_id\" : 8,\n \"host\" : \"mongo14:27017\",\n \"arbiterOnly\" : false,\n \"buildIndexes\" : true,\n \"hidden\" : false,\n \"priority\" : 3,\n \"tags\" : {\n\n },\n \"slaveDelay\" : NumberLong(0),\n \"votes\" : 1\n },\n {\n \"_id\" : 9,\n \"host\" : \"mongo25:27017\",\n \"arbiterOnly\" : false,\n \"buildIndexes\" : true,\n \"hidden\" : false,\n \"priority\" : 5,\n \"tags\" : {\n\n },\n \"slaveDelay\" : NumberLong(0),\n \"votes\" : 1\n }\n],\n\"settings\" : {\n \"chainingAllowed\" : true,\n \"heartbeatIntervalMillis\" : 2000,\n \"heartbeatTimeoutSecs\" : 10,\n \"electionTimeoutMillis\" : 10000,\n \"catchUpTimeoutMillis\" : 60000,\n \"catchUpTakeoverDelayMillis\" : 30000,\n \"getLastErrorModes\" : {\n\n },\n \"getLastErrorDefaults\" : {\n \"w\" : 1,\n \"wtimeout\" : 0\n },\n \"replicaSetId\" : ObjectId(\"5d431b1658c69cd9030a7267\")\n}\n{\n \"set\" : \"testing\",\n \"date\" : ISODate(\"2020-12-11T10:19:39.417Z\"),\n \"myState\" : 2,\n \"term\" : NumberLong(155),\n \"syncSourceHost\" : \"mongo15:27017\",\n \"syncSourceId\" : 6,\n \"heartbeatIntervalMillis\" : NumberLong(2000),\n \"majorityVoteCount\" : 3,\n \"writeMajorityCount\" : 3,\n \"votingMembersCount\" : 4,\n \"writableVotingMembersCount\" : 4,\n \"optimes\" : {\n \"lastCommittedOpTime\" : {\n \"ts\" : Timestamp(1607681977->2020-12-11 13:19:37, 2547),\n \"t\" : NumberLong(155)\n },\n \"lastCommittedWallTime\" : ISODate(\"2020-12-11T10:19:37.601Z\"),\n \"readConcernMajorityOpTime\" : {\n \"ts\" : Timestamp(1607674381->2020-12-11 11:13:01, 4262),\n \"t\" : NumberLong(155)\n },\n \"readConcernMajorityWallTime\" : ISODate(\"2020-12-11T08:13:01.529Z\"),\n \"appliedOpTime\" : {\n \"ts\" : Timestamp(1607674381->2020-12-11 11:13:01, 4262),\n \"t\" : NumberLong(155)\n },\n \"durableOpTime\" : {\n \"ts\" : Timestamp(1607674381, 4262),\n \"t\" : NumberLong(155)\n },\n \"lastAppliedWallTime\" : ISODate(\"2020-12-11T08:13:01.529Z\"),\n \"lastDurableWallTime\" : ISODate(\"2020-12-11T08:13:01.529Z\")\n },\n \"lastStableRecoveryTimestamp\" : Timestamp(1607674103, 12186),\n \"members\" : [\n {\n \"_id\" : 5,\n \"name\" : \"mongo04:27017\",\n \"health\" : 1,\n \"state\" : 2,\n \"stateStr\" : \"SECONDARY\",\n \"uptime\" : 61502,\n \"optime\" : {\n \"ts\" : Timestamp(1607674381, 4262),\n \"t\" : NumberLong(155)\n },\n \"optimeDate\" : ISODate(\"2020-12-11T08:13:01Z\"),\n \"syncSourceHost\" : \"mongo15:27017\",\n \"syncSourceId\" : 6,\n \"infoMessage\" : \"\",\n \"configVersion\" : 86588,\n \"configTerm\" : 155,\n \"self\" : true,\n \"lastHeartbeatMessage\" : \"\"\n },\n {\n \"_id\" : 6,\n \"name\" : \"mongo15:27017\",\n \"health\" : 1,\n \"state\" : 2,\n \"stateStr\" : \"SECONDARY\",\n \"uptime\" : 61483,\n \"optime\" : {\n \"ts\" : Timestamp(1607681977, 3467),\n \"t\" : NumberLong(155)\n },\n \"optimeDurable\" : {\n \"ts\" : Timestamp(1607681977, 3467),\n \"t\" : NumberLong(155)\n },\n \"optimeDate\" : ISODate(\"2020-12-11T10:19:37Z\"),\n \"optimeDurableDate\" : ISODate(\"2020-12-11T10:19:37Z\"),\n \"lastHeartbeat\" : ISODate(\"2020-12-11T10:19:37.926Z\"),\n \"lastHeartbeatRecv\" : ISODate(\"2020-12-11T10:19:37.466Z\"),\n \"pingMs\" : NumberLong(0),\n \"lastHeartbeatMessage\" : \"\",\n \"syncSourceHost\" : \"mongo16:27017\",\n \"syncSourceId\" : 7,\n \"infoMessage\" : \"\",\n \"configVersion\" : 86588,\n \"configTerm\" : 155\n },\n {\n \"_id\" : 7,\n \"name\" : \"mongo16:27017\",\n \"health\" : 1,\n \"state\" : 1,\n \"stateStr\" : \"PRIMARY\",\n \"uptime\" : 61483,\n \"optime\" : {\n \"ts\" : Timestamp(1607681977, 2830),\n \"t\" : NumberLong(155)\n },\n \"optimeDurable\" : {\n \"ts\" : Timestamp(1607681977, 2448),\n \"t\" : NumberLong(155)\n },\n \"optimeDate\" : ISODate(\"2020-12-11T10:19:37Z\"),\n \"optimeDurableDate\" : ISODate(\"2020-12-11T10:19:37Z\"),\n \"lastHeartbeat\" : ISODate(\"2020-12-11T10:19:37.657Z\"),\n \"lastHeartbeatRecv\" : ISODate(\"2020-12-11T10:19:37.762Z\"),\n \"pingMs\" : NumberLong(0),\n \"lastHeartbeatMessage\" : \"\",\n \"syncSourceHost\" : \"\",\n \"syncSourceId\" : -1,\n \"infoMessage\" : \"\",\n \"electionTime\" : Timestamp(1607221044, 12),\n \"electionDate\" : ISODate(\"2020-12-06T02:17:24Z\"),\n \"configVersion\" : 86588,\n \"configTerm\" : 155\n },\n {\n \"_id\" : 8,\n \"name\" : \"mongo14:27017\",\n \"health\" : 1,\n \"state\" : 2,\n \"stateStr\" : \"SECONDARY\",\n \"uptime\" : 61483,\n \"optime\" : {\n \"ts\" : Timestamp(1607678728, 4808),\n \"t\" : NumberLong(155)\n },\n \"optimeDurable\" : {\n \"ts\" : Timestamp(1607678728, 4808),\n \"t\" : NumberLong(155)\n },\n \"optimeDate\" : ISODate(\"2020-12-11T09:25:28Z\"),\n \"optimeDurableDate\" : ISODate(\"2020-12-11T09:25:28Z\"),\n \"lastHeartbeat\" : ISODate(\"2020-12-11T10:19:37.925Z\"),\n \"lastHeartbeatRecv\" : ISODate(\"2020-12-11T10:19:37.760Z\"),\n \"pingMs\" : NumberLong(0),\n \"lastHeartbeatMessage\" : \"\",\n \"syncSourceHost\" : \"mongo16:27017\",\n \"syncSourceId\" : 7,\n \"infoMessage\" : \"\",\n \"configVersion\" : 86588,\n \"configTerm\" : 155\n },\n {\n \"_id\" : 9,\n \"name\" : \"mongo25:27017\",\n \"health\" : 1,\n \"state\" : 2,\n \"stateStr\" : \"SECONDARY\",\n \"uptime\" : 61483,\n \"optime\" : {\n \"ts\" : Timestamp(1607681977, 1618),\n \"t\" : NumberLong(155)\n },\n \"optimeDurable\" : {\n \"ts\" : Timestamp(1607681977, 1618),\n \"t\" : NumberLong(155)\n },\n \"optimeDate\" : ISODate(\"2020-12-11T10:19:37Z\"),\n \"optimeDurableDate\" : ISODate(\"2020-12-11T10:19:37Z\"),\n \"lastHeartbeat\" : ISODate(\"2020-12-11T10:19:37.657Z\"),\n \"lastHeartbeatRecv\" : ISODate(\"2020-12-11T10:19:37.432Z\"),\n \"pingMs\" : NumberLong(0),\n \"lastHeartbeatMessage\" : \"\",\n \"syncSourceHost\" : \"mongo16:27017\",\n \"syncSourceId\" : 7,\n \"infoMessage\" : \"\",\n \"configVersion\" : 86588,\n \"configTerm\" : 155\n }\n ],\n \"ok\" : 1,\n \"$clusterTime\" : {\n \"clusterTime\" : Timestamp(1607681978, 5249),\n \"signature\" : {\n \"hash\" : BinData(0,\"AAAAAAAAAAAAAAAAAAAAAAAAAAA=\"),\n \"keyId\" : NumberLong(0)\n }\n },\n \"operationTime\" : Timestamp(1607674381->2020-12-11 11:13:01, 4262)\n}\n", "text": "Hi @kevinadi,", "username": "Konstantin" }, { "code": "PING 172.16.13.9 (172.16.13.9): 56 data bytes\n64 bytes from 172.16.13.9: icmp_seq=0 ttl=64 time=0.882 ms\n64 bytes from 172.16.13.9: icmp_seq=1 ttl=64 time=0.384 ms\n64 bytes from 172.16.13.9: icmp_seq=2 ttl=64 time=0.570 ms\n64 bytes from 172.16.13.9: icmp_seq=3 ttl=64 time=0.370 ms\n64 bytes from 172.16.13.9: icmp_seq=4 ttl=64 time=0.502 ms\n^C\n--- 172.16.13.9 ping statistics ---\n5 packets transmitted, 5 packets received, 0.0% packet loss\nround-trip min/avg/max/stddev = 0.370/0.542/0.882/0.186 ms\n", "text": "You can measure the network RTT by connecting to your hidden member (mongo04) and sending a ping to its sync source (mongo15).Example:In the example above, the average RTT is 0.542 ms.All the best,– Rodrigo", "username": "logwriter" }, { "code": "8 packets transmitted, 8 received, 0% packet loss, time 7005ms\nrtt min/avg/max/mdev = 0.085/0.122/0.182/0.039 ms\n", "text": "ping mongo15", "username": "Konstantin" }, { "code": "", "text": "Your RTT is awesome . With network out of the way, we have to investigate other parts of the stack.Can you share the output of:All the best,– Rodrigo", "username": "logwriter" }, { "code": "insert query update delete getmore command dirty used flushes vsize res qrw arw net_in net_out conn set repl time\n *1071 *0 *11517 *12 0 6|0 20.6% 79.0% 0 237G 213G 0|16 1|0 2.21k 79.4k 10 testing SEC Dec 16 16:38:19.899\n *761 *0 *6264 *14 0 3|0 20.6% 78.9% 0 237G 213G 0|16 1|0 996b 35.7k 10 testing SEC Dec 16 16:38:21.188\n *970 *0 *13334 *33 0 6|0 20.6% 78.9% 0 237G 213G 0|16 1|0 2.13k 76.4k 10 testing SEC Dec 16 16:38:21.791\n *897 *0 *5141 *24 0 3|0 20.6% 78.8% 0 237G 213G 0|16 1|0 998b 35.8k 10 testing SEC Dec 16 16:38:23.078\n *956 *0 *9628 *19 0 5|0 20.6% 78.8% 0 237G 213G 0|15 1|0 1.67k 60.0k 10 testing SEC Dec 16 16:38:23.846\n *540 *0 *3242 *16 0 2|0 20.6% 78.7% 0 237G 213G 0|16 1|0 687b 24.6k 10 testing SEC Dec 16 16:38:25.715\n *529 *0 *3749 *9 0 3|0 20.6% 78.6% 0 237G 213G 0|15 1|0 1.15k 41.8k 10 testing SEC Dec 16 16:38:26.832\n *1074 *0 *4582 *18 0 5|0 20.6% 78.6% 0 237G 213G 0|16 1|0 1.38k 48.9k 10 testing SEC Dec 16 16:38:27.805\n *481 *0 *3802 *9 0 3|0 20.6% 78.5% 0 237G 213G 0|15 1|0 1.01k 36.2k 10 testing SEC Dec 16 16:38:29.078\n *405 *0 *3827 *13 0 5|0 20.6% 78.5% 0 237G 213G 0|16 1|0 1.66k 38.4k 10 testing SEC Dec 16 16:38:30.362\n", "text": "I noticed that if there is a lag, then the “qrw” is equal to “0|16”.", "username": "Konstantin" }, { "code": "", "text": "qrw is the number of clients waiting in a queue to read or write to the instance. The number on the left of the “|” is the number of clients waiting to read data; the number on the right of the “|” is the number of clients waiting to write data.I’m going to look at all these data, and let you know if I find anything interesting.", "username": "logwriter" }, { "code": "", "text": "Hey @Konstantin!What is the replication lag between mongo15 and the primary during working hours?It looks like your workload is predominant queries and updates:At a glance, I noticed mongo15 (sync source for mongo04) has been busy serving queries (~98% of its total opcount), while also having to keep up with a heavy replication update workload (~79% of its opReplCount).Keep in mind, these counters are counting the number of operations since each instance has been started. That was the reason why I asked for a couple of db.serverStatus samples from each node, therefore we could calculate the difference between two positions in time. I only had one sample per server, so … cut me some slack with this ends up not making any sense. First of all, a secondary main role should be to provide data availability in the cluster. With that being said, it’s ok to let the application query from a secondary if the query workload isn’t heavy and it isn’t causing any replication lag.My guess: mongo15 is too busy serving queries and it’s causing a replication lag. Then, mongo04 is also suffering the consequences, because mongo15 is its sync source.Make sense?– Rodrigo", "username": "logwriter" }, { "code": "", "text": "Hi @logwriter,What is the replication lag between mongo15 and the primary during working hours?A few seconds.According to mongostat, the number of clients waiting to write data is maximum (qw=16) when there is the lag. Could this mean that they are waiting for reading from mongo15 and not writing to mongo04?I’ll try to do something with mongo15 and mongo04.Thank you so much for your help!", "username": "Konstantin" }, { "code": "", "text": "", "username": "Stennie_X" } ]
Replica performance issues
2020-12-15T11:25:37.311Z
Replica performance issues
4,478
null
[ "realm-web" ]
[ { "code": "try {\n const user = await realmApp.logIn(credentials);\n console.log('signed in', user);\n} catch (e) {\n console.log(e);\n}\nError: Request failed (POST https://stitch.mongodb.com/api/client/v2.0/app/xxxx/auth/providers/local-userpass/login): invalid username/password (status 401)\n at Function.fromRequestAndResponse (bundle.dom.es.js:2696)\n at async Fetcher.fetch (bundle.dom.es.js:2864)\n at async Fetcher.fetchJSON (bundle.dom.es.js:2881)\n at async Authenticator.authenticate (bundle.dom.es.js:2592)\n at async App.logIn (bundle.dom.es.js:3073)\n at async handleSignIn (SignIn.js:37)\n", "text": "Hi guys Igot this code to login, the problem is that there is no way to know the reason why failed, I need to show info to the UI or know what type of issue I’m facing. Doc doesn’t say anything about errors.It print out:Programmatically I’m expecting an object with error_code Luke I see on the network http payload but I’m not able to retrieve it on realm-web and act to different situations like wrong password/username or email confirmation pending for example.", "username": "Juan_Jose_N_A" }, { "code": "", "text": "This is a cross post to a StackOverflow questionIt’s a good idea to post questions in one place to focus possible answers.", "username": "Jay" }, { "code": "", "text": "Yeah. I tried other community because the lack of support in stackoverflow. I don’t think it’s bad.", "username": "Juan_Jose_N_A" }, { "code": "", "text": "Not bad at all! If someone has as answer though, better to focus the attention on that answer instead of searching around for it.", "username": "Jay" }, { "code": "", "text": "@Juan_Jose_N_A This could definitely use improvement. In the web tutorial we parse the info out using a regex but we realize this shouldn’t be necessary. Instead we could throw a custom RealmAuthError that includes the error code and message as separate properties. We have added this to backlog and hope to get to it in the next quarter", "username": "Ian_Ward" }, { "code": "", "text": "I just spoke with @Ian_Ward offline and we actually do support this already in Realm Web.I’ve created a small CodeSandbox demonstrating how to get the individual properties from the error object:realm-web-error-codes by kraenhansen using react, react-dom, react-scripts, realm-web", "username": "kraenhansen" }, { "code": "", "text": "@kraenhansenIs MongoDBRealmError the ‘next generation’ of RealmError where you could compare it with the Enums to handle the error?See my ‘attempt’ at an answer to the same question on SO - if RealmError is gone then I will delete that as it’s no longer applicable.", "username": "Jay" }, { "code": "", "text": "I tested and yes it works already on web. Amazing. But it need to be documented now it is just a secret.", "username": "Juan_Jose_N_A" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to programatically get error code?
2020-12-13T05:12:40.660Z
How to programatically get error code?
4,209
null
[ "aggregation" ]
[ { "code": "", "text": "Hello!I hope I am posting this in the right place.I am learning mongoDB, following the MongoDB Basics course.And here is a problem I have. In Atlas, Collections, Aggregation using sample_training.trips.If I try a $match with :“tripduration”: {$lte:70}then 10 documents are selected.If I try it with :“tripduration”: {$gte:61}\nthen 20 documents are selected.But if I try with :“tripduration”: {$and:[$lte:70,$gte:50]}It does not work and I see:“Stage must be a property formatted document”I wonder what I am missing. Looking at the values for tripduration (61, 70, …) some documents shoud be selected. Or the format I am using is wrong (as the message suggests), but what is wrong?", "username": "Michel_Bouchet" }, { "code": "{ $and: [ { $lte: 70 }, { $gte: 50 } ] }", "text": "Hi @Michel_Bouchet! Welcome to the community!.I believe the issue with your query is a lack of braces.You’ll want to do it this way: { $and: [ { $lte: 70 }, { $gte: 50 } ] }. Here are the docs - there’s an example in there that shows the syntax. Let me know if that doesn’t work .Cheers,\nNaomi", "username": "Naomi_Pentrel" }, { "code": "", "text": "I see. Yes you are right. I also checked the docs.", "username": "Michel_Bouchet" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Atlas, Collections, Aggregation
2020-10-26T12:33:22.717Z
Atlas, Collections, Aggregation
1,396
null
[ "database-tools" ]
[ { "code": "", "text": "I have a difference between Windows10 & Linux doing the exact same operation:\nRestoring a ‘dump’ into a different database.I’m creating a ‘staging’ db from Live, so initially I create the new db using:mongorestore --host= --username --password --authenticationDatabase=admin --nsFrom=db.* --nsTo=new-db.* --db=db --gzip ./backups/2020-12-01/db --verbose** This works just fine on both platforms.For daily updates, I want to update all collections, except 1. Works fine in Windows (dev), but not Linux (production)mongorestore --host= --username --password --authenticationDatabase=admin --nsFrom=db.* --nsTo=new-db.* --db=db --gzip ./backups/2020-12-02/db --nsExclude db.settings --drop --preserveUUID ./backups/2020-12-03/db --verboseI’ve narrowed it down to the ‘–preserveUUID’.The error in Linux I get is:Failed: new-db.test: error creating collection new-db.test: applyOps: (Location40655) Invalid name new-db.test for UUID 1da7ab34-65b9-4a2a-b613-acf7ad2b5b5aI have absolutely no idea what this means, nor have I found any clues extensively searching Google and other forums.My Mongo is 4.2. I upgraded ‘mongorestore’ to 100.2.1 on Linux- no change. Am flying blind.Any ideas that I can pursue?\nThanks", "username": "Peter_van_der_Burg" }, { "code": "--preserveUUID--drop", "text": "Further to this. The documentation for mongorestore --preserveUUID says this:\nRestored collections use the UUID from the restore data instead of creating a new UUID for collections that are dropped and recreated as part of the restore.To use --preserveUUID , you must also include the --drop option.Further testing shows that the original problem (restoring the data from 1 database name to another) does not show up in mongorestore/mongodb 4.2.11 or 100.2.1/4.4.2. This is a good thing.But, the problem arises on these later versions (on Windows & Linux platforms) when I’m restoring the ‘production’ server dump into the ‘staging’ server to the database with the same name:\nmongorestore --gzip --db=db --dir=“\\db” --verbose --host= --drop --preserveUUIDIt only works if:It fails when --preserveUUID is included with --drop.It seems reasonable to me to preserve UUID’s when wanting to create an exact copy of the production server’s Live database on a separate Staging server.Thoughts anyone?\nThanks", "username": "Peter_van_der_Burg" }, { "code": "", "text": "The solution, and in hindsight, obvious, is that you can’t ‘preserveUUID’ in the same instance!UUID = “universal unique ID”- it can only exist once in an instance.The solution was to create a 2nd instance (and in my case, on the same machine) as per the instructions: https://docs.mongodb.com/manual/administration/configuration/#run-multiple-database-instances-on-the-same-systemJoy.", "username": "Peter_van_der_Burg" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongorestore preserveUUID
2020-12-04T08:12:51.443Z
Mongorestore preserveUUID
3,726
null
[ "upgrading" ]
[ { "code": "", "text": "Hi,\nWe have sharded MongoDB 3.2 cluster which was upgraded from 2.4 version about 3 year ago. Our config servers are using replica set and using wired tiger engine\nWe are planning to upgrade to 4.x\nIs there any direct path available from 3.2 to 4.x or we have to go in multiple steps 3.2 to 3.4 to 3.6 to 4.0Thanks\nSanjay", "username": "Sanjay_Gupta" }, { "code": "db.adminCommand( { setFeatureCompatibilityVersion: \"3.4\" } )mongodump", "text": "Hi @Sanjay_Gupta and welcome in the MongoDB Community !MongoDB 3.2 is really old now. It’s not supported anymore since September 2018. It’s a good idea to update it .To avoid a significant downtime, the easiest way is to update from one major version to the next, without skipping one.\nSo 3.2 => 3.4 => 3.6 => 4.0 => 4.2 => 4.4.Read each very carefully because they will require you to run some admin queries like db.adminCommand( { setFeatureCompatibilityVersion: \"3.4\" } ) at least. They might also contain some specific instructions which you might not have in others.Another solution might be do just mongodump the entire thing and restore in on a 4.4 cluster. But there is such a gap in the versions that I’m not even sure the dump would be valide in 4.4. Might be though. But this would require a maintenance window.If you ever get tired of this… I can’t recommend you more MongoDB Atlas which automates all this upgrade process without downtime and will take care of the minor version upgrade while you sleep .If you ever consider the MongoDB Atlas solution - there is actually an awesome trick you can pull off here!According to the Upgrade Path of the Atlas Live Migration documentation, you can directly move from a 3.2 cluster to a 4.4 cluster in Atlas. So with this solution, you just need to spawn a new sharded cluster in Atlas and migrate to it directly from 3.2. The only required downtime is when you will have to reconfigure the clients with the new Atlas connection string during the Cut Over process.Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoDB Upgrade path from 3.2 version
2020-12-16T23:01:08.250Z
MongoDB Upgrade path from 3.2 version
2,395
null
[ "connecting", "containers", "devops" ]
[ { "code": "", "text": "I followed the directions to create a mongoDb in a container here (How to Deploy and Manage MongoDB with Docker)In my code in VSCODE i am able to connect to the mongod in the container and pull and insert data in different dbs and collections; however I deploy this service in a container and and use postman to hit the endpoint in the service. It hits the service but when its doing a db.collection.find() i get the following error:pymongo.errors.ServerSelectionTimeoutError: 0.0.0.0:27017: [Errno 111] Connection refused", "username": "Paul_Truong" }, { "code": "", "text": "Your app and db need to share a docker network. Beyond that I would suggest looking at the docker documentation. Docker compose can help you create application stacks.", "username": "chris" }, { "code": "", "text": "I talked to a few other people and they also suggested the docker network. I tried to ping the the other container’s ip and didn’t get a response back. Based on the IP address structure they look to be on the same subnet.", "username": "Paul_Truong" }, { "code": "$ docker network create mdb\n821e11d5b1d05fd0ece3985e19cb05f98b9fa2c1cc5caaf9f9f8e3f51722a353\n$ docker run -d --network mdb --name mongo mongo:4.4\nbe432cf4c7e1abeed256640aecb3dcfdfa07ef9ddd589bdcbf5acc731803ea20\n$ docker run --rm --network mdb -it mongo:4.4 mongo --quiet mongo/local --eval 'db.startup_log.find({},{startTime:1, \"buildinfo.version\":1})'\n{ \"_id\" : \"be432cf4c7e1-1608161225828\", \"startTime\" : ISODate(\"2020-12-16T23:27:05Z\"), \"buildinfo\" : { \"version\" : \"4.4.1\" } }\n\n", "text": "Based on the IP address structure they look to be on the same subnet.A docker network inspect will show what containers are on the same network.An example in 3 commands:", "username": "chris" } ]
I can connect to my mongodb in a container, but when i try with my service in another container i get a connection refused
2020-12-14T21:50:22.615Z
I can connect to my mongodb in a container, but when i try with my service in another container i get a connection refused
12,888
https://www.mongodb.com/…262913f923b5.png
[ "graphql", "typescript" ]
[ { "code": "@deprecatedcodegen.ymlschema.graphqlhttps://realm.mongodb.com/api/client/v2.0/app/something/graphqlschema:\n - ${REALM_GRAPHQL_ENDPOINT}:\n headers:\n apiKey: ${REALM_API_KEY}\n@deprecated", "text": "GraphQL Code Generator is a great, widely-used, must-have tool for every developer who uses both TypeScript and GraphQL.\nHowever, there’s a minor problem if you use it in combination with Realm GraphQL.All of the fields of the generated types will be marked with @deprecated directives like this:\nThat will have your editor strikes through the params, which look like this:\nFirst of all, when you use the generator, you will specify the location of the GraphQL schema in its config file (e.g. codegen.yml). And the location can be either a remote GraphQL endpoint or a locally downloaded schema file.So there are two options for Realm GraphQL developers.If you choose #1 option, there’s no problem. But if you choose #2 option, the deprecation will happen.FYI: as you know, you will have to set correct headers to fetch the remote schema. My codegen config file starts with like:And it works without problem, except for the @deprecated thing.If I’m missing something, I’d appreciate your pointing out.", "username": "Toshi" }, { "code": "", "text": "Hey @Toshi - thanks for calling this out, did you see this start happening recently?It seems like GraphQL Codegen adds this flag when we specify a ‘@deprecated’ directive in the GQL schema. However, MongoDB Realm does not use directives at the moment and downloading my schema from the endpoint (e.g. from GraphQL Playground) does not seem to indicate this either.It would make sense to file an issue with GraphQL Codegen and understand whether this is a bug/regression/feature request for them, or at least give us more information to why our service doesn’t work with their tooling.", "username": "Sumedha_Mehta1" }, { "code": "", "text": "Hi Sumedha, thanks for the immediate feedback like always!Until reading your comment, I was thinking Realm guys intentionally marked the schema deprecated for some reason, because I have never experienced this issue when I work with other services/frameworks which exposes graphql endpoints.Now I think, however, it should make sense as you said – the cause may exist on the graphql-codegen side. I’ll go to their repo and find a next step.FYI, as for your question:did you see this start happening recently?No, I have experienced this @deprecated issue since the first time I started to work with Realm GraphQL, which was August. (It’s only August so you can say it’s “recent” )", "username": "Toshi" }, { "code": "apollonpx apollo client:download-schema myschema.graphql \\\n--endpoint=\"https://realm.mongodb.com/api/client/v2.0/app/yourapp/graphql\" \\\n--header=\"apiKey: XXXyyyZZZ\"\napollo schema:download", "text": "I just came up with an idea to give it a shot with another tooling, which is different from GraphQL Code Generator, to see whether the issue will be reproduced. And it seems reproduced. Below is what I tried with apollo CLI: GitHub - apollographql/apollo-tooling: ✏️ Tooling for development and production Apollo workflowsAll I did was running a single line command to download the remote schema:Then a myschema.graphql file was downloaded, and it was deprecated like this:\n*Just in case, I tried with another command apollo schema:download to get a .json file instead, the result was the same.\n\nimage357×528 30.4 KB\nWhat do you think?", "username": "Toshi" }, { "code": "", "text": "Thanks for digging deeper into this - we’re investigating where in our remote schema these “deprecated” flags might be coming from and will update this thread as soon as we find out.", "username": "Sumedha_Mehta1" }, { "code": "", "text": "Thanks! Looking forward to it to be solved.", "username": "Toshi" }, { "code": "deprecationReason", "text": "1/15/2020 Update - This should be resolved now, cc: @Toshi@Toshi It looks like the tools started using deprecationReason to assign the @deprecated flag and our GraphQL library was not updated to reflect this change. The fix should be released early next month.", "username": "Sumedha_Mehta1" }, { "code": "", "text": "Very cool! Thank you for updating. I’m looking forward to seeing the fix!", "username": "Toshi" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Using GraphQL Code Generator with Realm Will Produce "@deprecated"
2020-11-13T15:22:19.003Z
Using GraphQL Code Generator with Realm Will Produce &ldquo;@deprecated&rdquo;
5,897
null
[ "golang", "beta" ]
[ { "code": "", "text": "The MongoDB Go Driver Team is pleased to announce the release of 1.5.0-beta1 of the MongoDB Go Driver.This release contains driver specific improvements. For more information please see the release notes.You can obtain the driver source from GitHub under the v1.5.0-beta1 tag.General documentation for the MongoDB Go Driver is available on pkg.go.dev and on the MongoDB Documentation site. BSON library documentation is also available on pkg.go.dev. Questions can be asked through the MongoDB Developer Community and bug reports should be filed against the Go project in the MongoDB JIRA. Your feedback on the Go driver is greatly appreciated.Thank you,The Go Driver Team", "username": "Isabella_Siu" }, { "code": "", "text": "", "username": "system" } ]
MongoDB Go Driver 1.5.0-beta1 Released
2020-12-16T22:20:54.686Z
MongoDB Go Driver 1.5.0-beta1 Released
3,253
null
[ "legacy-realm-cloud" ]
[ { "code": "", "text": "I am trying to cancel the legacy $30/month Realm Cloud subscription. I sent a request to [email protected] as suggested in this thread and received an automated reply.Please contact Sales by visiting us at this website: How To Buy | MongoDB.I contacted the MongoDB team through Messenger. The person I spoke to didn’t know anything about the Realm Cloud subscription and suggested me to contact Support Portal. And this site tells me to contact MongoDB. I am in a Kafkaesque infinite loop.", "username": "lonesometraveler" }, { "code": "", "text": "After a few exchanges (and some unnecessary frustration) with MongoDB support via Messenger, I was able to cancel my subscription.", "username": "lonesometraveler" }, { "code": "", "text": "I just emailed support at [email protected]. They were very prompt in closing out my Realm cloud account - can’t complain.Richard Krueger", "username": "Richard_Krueger" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How can we cancel the legacy Realm Cloud subscription?
2020-12-14T12:47:07.332Z
How can we cancel the legacy Realm Cloud subscription?
3,811
https://www.mongodb.com/…4803ec43f753.png
[]
[ { "code": "", "text": "If you’re interested in becoming a conference speaker or just upping your speaker game, check out the new course just released by our University team on Crafting Conference Abstracts:Discover our MongoDB Database Management courses and begin improving your CV with MongoDB certificates. Start training with MongoDB University for free today.Then, submit your abstract to the MongoDB.live 2021 CFP, open now until February 11, 2021! MongoDB.live 2021 - Call For Speakers | MongoDB", "username": "Jamie" }, { "code": "", "text": "", "username": "Stennie_X" } ]
New University Course! "Crafting Conference Abstracts" is now available
2020-12-16T18:57:58.012Z
New University Course! &ldquo;Crafting Conference Abstracts&rdquo; is now available
3,104
https://www.mongodb.com/…e868bddb6a59.png
[]
[ { "code": "\"Alergias:\" [\n {\n \"$oid\": \"5fd83705fc9e4a97dc261a68\"\n \"Tipo\": \"Food Allergie\".\n \"Date\": 2020-01-24 ...\n }, \n\n {\n \"$oid\": \"5fd83705fc9e4a98yc261a70\"\n \"Tipo\": \"Another Type\".\n \"Date\": 2020-01-30...\n }\n]\n", "text": "Hi,I am new designing DBs with Mongo.I am creating a Db in Mongo, not sure if my schema is designed correctly, I need some help.I have some arrays inside a document that I need them to reference other documents from the DataBase- I am doing this reference via Object ID. Despite, I would like to know if it is possible to add more element on arrays where object IDs are present.For example:As you may see, “domicilio” has several elements embeded inside of it. I tried to do that on the case of diagnosis, “alergias”, “Citas”. For example something like this:Is it possible to create such an arrangement as the one above? Please help, I tried the method above but I was not successful. Currently using the Compass GUI.Regards,", "username": "Luis_Guzman" }, { "code": "“Alergias” : [\n{\n\"id\" : { “$oid”: “5fd83705fc9e4a97dc261a68”},\n“Tipo”: “Food Allergie”,\n“Date”: 2020-01-24 …\n},\n\n {\n\"id\" : { \"$oid\": \"5fd83705fc9e4a98yc261a70\"},\n \"Tipo\": \"Another Type\",\n\"Date\": 2020-01-30...\n }\n]\n", "text": "Hi @Luis_Guzman,Of course its possible to add or save rich embedded docs in an array.I think your main problem is with the extended json represtation of the objectId ($oid). It cannot be used that way in a complex object as you specify it as a field name rather than a type. Please try:Also try to use compass field editor rather than free text.Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Add more elements to an array with an object ID
2020-12-16T07:07:38.243Z
Add more elements to an array with an object ID
11,687
null
[ "server", "release-candidate" ]
[ { "code": "", "text": "MongoDB 4.0.22-rc0 is out and is ready for testing. This is a release candidate containing only fixes since 4.0.21. The next stable release 4.0.22 will be a recommended upgrade for all 4.0 users.\nFixed in this release:", "username": "Jon_Streets" }, { "code": "", "text": "", "username": "Stennie_X" } ]
MongoDB 4.0.22-rc0 is released
2020-12-16T18:07:20.361Z
MongoDB 4.0.22-rc0 is released
2,083
null
[ "server", "release-candidate" ]
[ { "code": "", "text": "MongoDB 4.4.3-rc0 is out and is ready for testing. This is a release candidate containing only fixes since 4.4.2. The next stable release 4.4.3 will be a recommended upgrade for all 4.4 users.\nFixed in this release:", "username": "Jon_Streets" }, { "code": "", "text": "", "username": "Stennie_X" } ]
MongoDB 4.4.3-rc0 is released
2020-12-16T18:04:51.499Z
MongoDB 4.4.3-rc0 is released
2,188
https://www.mongodb.com/…d472b14b8e72.png
[ "queries" ]
[ { "code": "", "text": "Hello,\nI have a question about how to query partial matches in an array (new to MongoDB).I have the following structure in my database where each recipe is built up similar to this:\n\nNow I am trying to match a given ingredients list for example: [‘water’, “tomatoes”], but I am stuck on how to properly query this. It should return me each recipe in the database that has one or more partial matching strings in the ingredients array in the database.\nWhat would be the cleanest solution for this problem?Thanks in advance,\nSebastiaan", "username": "Sebastiaan_Vanspauwe" }, { "code": "watingredientsingredientswater", "text": "@Sebastiaan_Vanspauwe do you mean “partial matches” as in “some ingredients from the list” or “partial matches” as in wat matches ingredients when ingredients contains water?", "username": "Jack_Woehr" }, { "code": "", "text": "Hi @Jack_Woehr,\nExactly the last thing you mentioned. With partial match I mean “tomatoes” should match “6 tomatoes” and “wat” should match “water”.", "username": "Sebastiaan_Vanspauwe" }, { "code": "aggregate$unwind$regex", "text": "Sound like your aggregate wants to $unwind your arrays and do a $regex", "username": "Jack_Woehr" } ]
Return partial matches on string in array
2020-12-15T23:02:27.911Z
Return partial matches on string in array
3,723
null
[ "sharding", "performance" ]
[ { "code": "{\"t\":{\"$date\":\"2020-12-10T11:08:27.462+01:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn1217790\",\"msg\":\"Slow query\",\n\"attr\":{\"type\":\"command\",\"ns\":\"config.changelog\",\"appName\":\"MongoDB Shell\",\"command\":{\"aggregate\":\"changelog\",\"pipeline\":[{\"$match\":{\"time\":{\"$gt\":{\"$date\":\"2020-12-09T10:08:26.775Z\"}},\n\"what\":\"moveChunk.from\",\"details.errmsg\":{\"$exists\":false},\"details.note\":\"success\"}},{\"$group\":{\"_id\":{\"msg\":\"$details.errmsg\"},\"count\":{\"$sum\":1.0}}},{\"$project\":{\"_id\":{\"$ifNull\":[\"$_id.msg\",\"Success\"]},\"count\":\"$count\"}}],\"fromMongos\":false,\"cursor\":{\"batchSize\":101},\"readConcern\":{\"provenance\":\"implicitDefault\"},\"writeConcern\":{\"w\":1,\"wtimeout\":0,\"provenance\":\"implicitDefault\"},\"lsid\":{\"id\":{\"$uuid\":\"5bad02ed-7a8d-419e-8b94-9750be30558d\"},\"uid\":{\"$binary\":{\"base64\":\"YtJ8CVGJPpojGlBhlVfpmkB+TWiGCwPUvkGEjp5tty0=\",\"subType\":\"0\"}}},\"$clusterTime\":{\"clusterTime\":{\"$timestamp\":{\"t\":1607594920,\"i\":1}},\"signature\":{\"hash\":{\"$binary\":{\"base64\":\"F/KZAbai8D8Qcs/qQWKjLV0BTvM=\",\"subType\":\"0\"}},\"keyId\":6880911790037794833}},\"$audit\":{\"$impersonatedUsers\":[{\"user\":\"mongo-admin\", \"db\":\"admin\"}],\"$impersonatedRoles\":[{\"role\":\"root\",\"db\":\"admin\"}]},\"$client\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.1\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"18.04\"},\"mongos\":{\"host\":\"mongop_rtr1:27017\",\"client\":\"10.100.22.100:36242\",\"version\":\"4.4.1\"}},\"$configServerState\":{\"opTime\":{\"ts\":{\"$timestamp\":{\"t\":1607594919,\"i\":55}},\"t\":7}},\n\"$db\":\"config\"},\n\"planSummary\":\"COLLSCAN\",\n\"keysExamined\":0,\n\"docsExamined\":473718,\n\"cursorExhausted\":true,\"numYields\":473,\"nreturned\":1,\"queryHash\":\"ABD7C887\",\"planCacheKey\":\"5B43D47C\",\"reslen\":359,\"locks\":{\"ReplicationStateTransition\":{\"acquireCount\":{\"w\":475}},\"Global\":{\"acquireCount\":{\"r\":475}},\"Database\":{\"acquireCount\":{\"r\":475}},\"Collection\":{\"acquireCount\":{\"r\":475}},\"Mutex\":{\"acquireCount\":{\"r\":2}}},\"readConcern\":{\"provenance\":\"implicitDefault\"},\"writeConcern\":{\"w\":1,\"wtimeout\":0,\"provenance\":\"implicitDefault\"},\"storage\":{},\"protocol\":\"op_msg\",\n\"durationMillis\":685}}\n", "text": "We run the community version 4.4.1 and we split our data over weekly collections.We have 4 new collections per week, and some 1200M records are stored in them every week.Each of the collections is split into 3600 chunks, which are moved evenly over 108 shards of 3 shard-servers each, while the collection is freshly created and thus entirely empty (=no documents). Auto-balancer is disabled of course.\nSo per week we have 4x3600 = 14400 chunks.\nWe have now some 19 weeks of 4 collections/week, and target is to have 52 weeks of collections.Thus now we have some 19x14400 = 273K chunksBut where our chunk-movements went very fast when we started with this setup, it now goes increasingly slow, even while there is no single other activity going on to/from our cluster (no loading, no queries):\nEvery chunk movement takes roughly 40 seconds.When I check the logs of our config servers for some indication of this slowness, I see following logs:I identify the “moveChunk” actions that we initiate, a “config.changelog” namespace, a “COLLSCAN” (collection-scan?), the “docsExamined”=473718, and most suspicious is the “keysExamined”=0\nFinally the “durationMillis”=685Does this mean there is no optimal configuration to scan the list of chunks in the configuration database?\n“KeysExamined” sounds to me like there is no index used, or whatever I should interprete as “keys”I find no hint in the documentation or this forum about how I can check for any detailed causes of slow chunk movements.Hoping that any of you can shed some light in this darkness thx in advance!\nRob", "username": "Rob_De_Langhe" }, { "code": "{\"t\":{\"$date\":\"2020-12-16T15:49:21.492+01:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn1736393\",\"msg\":\"Slow query\",\"attr\": {\"type\":\"command\",\"ns\":\"config.changelog\",\"appName\":\"MongoDB Shell\",\"command\":{\"aggregate\":\"changelog\",\"pipeline\":[{\"$match\":{\"time\":{\"$gt\":{\"$date\":\"2020-12-15T14:49:20.088Z\"}}, \n\"what\":\"moveChunk.from\",\n\"$or\":[{\"details.errmsg\":{\"$exists\":true}},{\"details.note\":{\"$ne\":\"success\"}}]}},{\"$group\":{\"_id\":{\"msg\":\"$details.errmsg\",\"from\":\"$details.from\",\"to\":\"$details.to\"},\"count\":{\"$sum\":1.0}}},{\"$project\": {\"_id\":{\"$ifNull\":\n[\"$_id.msg\",\"aborted\"]},\n\"from\":\"$_id.from\",\"to\":\"$_id.to\",\"count\":\"$count\"}}],\"cursor\":{},\"lsid\":{\"id\":{\"$uuid\":\"e1c5cc31-476e-44bb-be55-db1687d3b7e4\"}},\"$clusterTime\":{\"clusterTime\":{\"$timestamp\":{\"t\":1608130173,\"i\":28}},\"signature\":{\"hash\":{\"$binary\":{\"base64\":\"DYpcel5ZTCQ5+Sn+iFO/V5Pvz8g=\",\"subType\":\"0\"}},\"keyId\":6880911790037794833}},\"$db\":\"config\"},\"nShards\":1,\ncursorExhausted\":true,\"numYields\":0,\n\"nreturned\":0,\"reslen\":230,\"protocol\":\"op_msg\",\"durationMillis\":802}}\n", "text": "I notice that many of these ‘moveChunk’ commands have been kind of queued, but not executed:Log sample:These logs entries keep on going on, as if there is an awful long queue of chunk-movement attempts that are queued for long time, then aborting for some reason (“cursorExhausted=true” ?)\nAny new attempt to move another chunk is terminating within a few millisecs, but nothing is effectively done when I check “sh.status”.\nSo to me it looks like chunk movements get queued, but that queue is not processed (blocked for some reason?) and finally these attempts are aborted.\n=> how to get rid of this queue?\n=> how to find the reason why the moveChunk commands don’t get executed?rgds\nRob", "username": "Rob_De_Langhe" }, { "code": "", "text": "ok, turns out I had to add the new (since release 4.4) option “forceJumbo: true” since my chunks are otherwise too big.-> resolved", "username": "Rob_De_Langhe" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Slow chunk movements when there are already MANY chunks in config database
2020-12-10T10:31:57.369Z
Slow chunk movements when there are already MANY chunks in config database
2,576
null
[ "atlas-online-archive" ]
[ { "code": "", "text": "We are using archiving feature with aws connectivitity, we have already connected mongo atlas over vpc peering which is working fine with main cluster, but while connecting archive db url with mongo shell getting error for connection.2020-12-12T05:41:15.283+0000 W NETWORK [thread1] Failed to connect to xx.xxx.xx.xx:port after 5000ms milliseconds, giving up. 2020-12-12T05:41:15.283+0000 E QUERY [thread1] Error: couldn’t connect to server atlas-online-archive-5da8542bcf09a2545f54fa24-jsraa.a.query.mongodb.net:27017, connection attempt failed : connect@src/mongo/shell/mongo.js:275:13 @(connect):1:6 exception: connect failedkindly assist me over this.Thank you", "username": "Shailesh_Karle" }, { "code": "", "text": "Hello Shailesh,Are you attempting to connect to your archive over VPC Peering as well? Currently Online Archive connections are only supported with Standard Connections with TLS - https://docs.atlas.mongodb.com/online-archive/manage-online-archive#how-service-archives-data .We are looking at supporting Private Link as well but that is not yet available.Best,Ben", "username": "Benjamin_Flast" } ]
Archive DB connection issue with vpc peering
2020-12-15T06:11:55.188Z
Archive DB connection issue with vpc peering
4,179
null
[ "devops" ]
[ { "code": "", "text": "Hi,I setup Realm hosting using a git repository and this successfully replaces files on the hosting service when I make a change. However, this also seems to undo changes I made in the UI for non-hosting files.I don’t really want to go back to picking files one-by-one in the UI for front-end deploys. I want it to be automated CD. Is there any way to get changes made in the Realm UI to sync back to github? It seems like a one-way binding.Is there a way to have both worlds? I want to use the Realm web UI + automate Front-end deploys.", "username": "Travis_N_A" }, { "code": "", "text": "Hey Travis,Have you considered using the automated GitHub deploys for both hosted files and other changes to your Realm app (like functions, triggers, etc)? More information is available at https://docs.mongodb.com/realm/deploy/deploy-automatically-with-github.I’ve successfully used this for my project and found it really helpful. All of the details of how I setup the auto deploys as well as a video demo are included in the project readme: GitHub - mongodb-developer/SocialStats.", "username": "Lauren_Schaefer" }, { "code": "hosting/files", "text": "Hey thanks @Lauren_Schaefer !Yes, I’m using github deploys for the hosting - and that’s where I ran into trouble.\nI build+copy my hosted files over from the front-end app into the hosting/files directory, commit and push and it auto-deploys.The problem I run into is that it overrides the changes (like say, a login or schema/graphql change) that I made in the UI, reverting them back to previous settings.\nThose changes don’t seem to get back to github. Should they? Do you think I set something up incorrectly?", "username": "Travis_N_A" }, { "code": "", "text": "I am going to take a look at that project you referenced though. It looks super thorough and I’d love to get a 4-stage pipeline going. Thanks again!", "username": "Travis_N_A" }, { "code": "", "text": "I think that what you’re describing may be expected. Let me ask around, and I’ll get back to you.", "username": "Lauren_Schaefer" }, { "code": "", "text": "@Travis_N_A I was able to replicate the issue you’re hitting. I talked with the team, and this is a bug. We’ve filed a ticket around the GraphQL changes being lost.In general, changes made in the UI that have been deployed should not be lost when you push changes to your GitHub repo. The changes made in the UI will not be pushed to your GitHub repo.", "username": "Lauren_Schaefer" }, { "code": "", "text": "Thanks @Lauren_Schaefer - is the bug list somewhere online? I just ran into the issue again when re-deploying and would like to track it so I know when its resolved.Also took a look at your example repo/video and learned a lot, thanks! It’ll be a while before I can setup that kind of pipeline but eventually…", "username": "Travis_N_A" }, { "code": "", "text": "Hi @Travis_N_A! Sorry for the significant delay in my response! I was out a week for Thanksgiving and then a week for illness.I checked the ticket, and it’s in progress. I can’t find an external version of the ticket. I’ve asked, and I’ll let you know if I find one.I’m glad you found the repo and video helpful! Hopefully, the repo will help you get your pipeline setup more quickly!", "username": "Lauren_Schaefer" }, { "code": "", "text": "Hey @Travis_N_A - I spoke with an engineer on the team today. He made a fix related to this problem that he thought would fix the issue. It didn’t fix the issue I was hitting, but maybe it will fix yours.The issue I was hitting was more a misunderstanding of how the system works. So, below is what I learned.Custom resolvers are stored as code in the GitHub repo. When you make changes in the web ui, those changes are not stored in the GitHub repo. Whenever you push to your repo and trigger an auto-deploy, any changes not stored in the GitHub repo (like custom resolvers) get tossed away. So once you start using auto-deploys you need to either:I’m passing feedback on to the product manager that we need some warnings in the web ui that your changes will be lost in your next deploy, or, even better, have the option of pushing the changes to your associated GitHub repo.", "username": "Lauren_Schaefer" }, { "code": "realm-cli export --app-id=my-appid --for-source-control --include-hostinggit checkout -b changes-from-ui\ncp -a ../new-realm-export/* . # over-write files with the new download */\n\ngit checkout hosting/* # I'm replacing these with a new front-end version in the next step\ngit statusservices/mongodb-atlas/rules/functionsvaluesfunctionsvaluesgit add .\ngit commit -m \"import ui changes from realm\"\nhostinghosting", "text": "Thank you for trying to help with this. Sadly this didn’t work.In this case I did what you said, exporting to a new directory:realm-cli export --app-id=my-appid --for-source-control --include-hostingThen in my original repo for realm I:git status at this point showed that there were changes to config.json which seemed reasonable and to 2 files in services/mongodb-atlas/rules/. There were also new files in functions and values.Anyway, assuming that all of that is correct I proceeded:Then I replaced the hosting directory with the latest version of my front-end app. That results in changes to my hosting directory ONLY, which I also commit.Then I pushed it all to master.The Deploy tab showed that it failed with the message “Failed: No download link found for values/auth_providers”. Since only realm-UI changes were made there I expect that it should have JUST worked.I performed the above steps twice and got the same error message both times. At this point, I’m no longer able to deploy front-end changes.If I could make one suggestion it would be to allow using a separate front-end repo.", "username": "Travis_N_A" }, { "code": "Failed: No download link found for values/auth_providers--for-source-controlrm -fr values\nrealm-cli --for-source-control", "text": "Out of frustration I tried again and I was able to deploy with just the front-end changes ignoring all of the above recommendations. But it once again overrode the significant number of changes I’d made in the Realm UI and as a result broke the graphql queries the front-end was using. That was to be expected. However…I tried just pushing the downloaded copy (from realm-cli) but that hits the same Failed: No download link found for values/auth_providers deploy error. If I could give any feedback at this point, it is that the code that realm-cli exports with the --for-source-control flag really must be deployable and it is not.I thought I was dead in the water here - unable to deploy the previous back-end and unable to develop the new front-end since it required changes that had been made in that back-end. But I have found a way forward. This time I did the same steps as above but I added a step:This makes it deployable. Again I want to emphasize that realm-cli --for-source-control really should result in deployable code and it does not.Obviously I’m a bit frustrated at this point, please forgive me if I haven’t been as cordial as usual.", "username": "Travis_N_A" }, { "code": "", "text": "Hey @Travis_N_A,Sorry - I didn’t get a notification about your reply. (I’m going to dig into the settings to see if I can get a notification when someone replies to a post I’ve commented in without explicitly tagging me.)And sorry this has been so frustrating for you!!! It sounds like you’ve made a way forward. Are you still stuck on a piece of this?I’ll pass on your feedback to the product manager. I spoke with him last week about the issues we were seeing. He showed me the plan for the future, and the plan looks good. Positive changes are coming. This will get easier.", "username": "Lauren_Schaefer" } ]
CD for hosting while using UI for Realm
2020-11-11T18:43:15.909Z
CD for hosting while using UI for Realm
2,126
null
[]
[ { "code": "", "text": "VPC peering limit to our shared network in Google Cloud has been reached.\nWe have been creating mongo atlas project for each customer / environment (dev/prod)\n1 customer - 2 projects. 10 customers - 20 projects. Peering limit is 25.\nWhat could be the suggestions to decrease the amount of peering connections without having to migrate clusters between the projects or having significant downtime?\nCan I define a whitelist just for a specific cluster inside the project?", "username": "Andrei_Bandarovich" }, { "code": "", "text": "Hi Andrei,Can you break out the app tier into VPC per customer just like you’ve structured the data tier?Alternatively, you can use public IP access list management instead of VPC peering: the downside of this is that you’d want to get static IP(s) for your app tier but there is no additional latency or cost in this model (often times folks assume the opposite).Cheers\n-Andrew", "username": "Andrew_Davidson" } ]
VPC peering limit
2020-12-15T17:17:49.620Z
VPC peering limit
1,597
https://www.mongodb.com/…4_2_1024x576.png
[ "realm-web" ]
[ { "code": "", "text": "Screen Shot 2020-12-16 at 11.17.57 (3)1920×1080 292 KBAny help would be massively appreciated ", "username": "Salman_Alam" }, { "code": "", "text": "I do believe this is the same underlying issue as you’ve posted about here: Issue with Realm Web SDK", "username": "kraenhansen" } ]
Realm Web SDK & VueJS
2020-12-16T05:43:06.023Z
Realm Web SDK &amp; VueJS
2,500
null
[ "aggregation" ]
[ { "code": "{\n\t\"name\": \"Test\",\n\t\"votes\": [\n\t\t{\n\t\t\t\"user\": \"joe\",\n\t\t\t\"action\": \"up\"\n\t\t},\n\t\t{\n\t\t\t\"user\": \"john\",\n\t\t\t\"action\": \"up\"\n\t\t},\n\t\t{\n\t\t\t\"user\": \"emma\",\n\t\t\t\"action\": \"down\"\n\t\t}\t\t\n\t]\n}\n{\n\t\"up\": 2,\n\t\"down\": 1\n}\n", "text": "HelloIs there a more efficient way to count specific values from an array of object rather than sending the entire array to the client and “unwind” it there?Test data:I’d like to get something like:so i can calculate a rating. BTW, I’m using GO. I think I could translate the script from shell and most other languages.Thanks", "username": "Roger_Bieri" }, { "code": "", "text": "Stages of a pipeline, including $unwind are done on the server. The array is not sent to the client, just the final result of the pipeline. This means if $unwind is the last stage of the pipeline, then yes the array is sent over. If you have a $group, like to count specific values, the client only receives the counts.", "username": "steevej" }, { "code": "", "text": "helloYou can use $reduce ,and keep the array as it isAnd make the counting ,without the need to $unwind on server or do something like “unwind” on the driver,on driver only the reduce results could be sended.", "username": "Takis" }, { "code": "", "text": "could somebody please give an example for that? I’m all new to Mongo…", "username": "Roger_Bieri" }, { "code": "db.collection.aggregate([\n {\n \"$project\": {\n \"_id\": 0,\n \"upDown\": {\n \"$reduce\": {\n \"input\": \"$votes\",\n \"initialValue\": [\n 0,\n 0\n ],\n \"in\": {\n \"$let\": {\n \"vars\": {\n \"votes\": \"$$value\",\n \"vote\": \"$$this\"\n },\n \"in\": {\n \"$cond\": [\n {\n \"$eq\": [\n \"$$vote.action\",\n \"up\"\n ]\n },\n [\n {\n \"$add\": [\n {\n \"$arrayElemAt\": [\n \"$$votes\",\n 0\n ]\n },\n 1\n ]\n },\n {\n \"$arrayElemAt\": [\n \"$$votes\",\n 1\n ]\n }\n ],\n [\n {\n \"$arrayElemAt\": [\n \"$$votes\",\n 0\n ]\n },\n {\n \"$add\": [\n {\n \"$arrayElemAt\": [\n \"$$votes\",\n 1\n ]\n },\n 1\n ]\n }\n ]\n ]\n }\n }\n }\n }\n }\n }\n },\n {\n \"$project\": {\n \"up\": {\n \"$arrayElemAt\": [\n \"$upDown\",\n 0\n ]\n },\n \"down\": {\n \"$arrayElemAt\": [\n \"$upDown\",\n 1\n ]\n }\n }\n }\n])\ndb.collection.aggregate([\n {\n \"$project\": {\n \"_id\": 0,\n \"up\": {\n \"$size\": {\n \"$filter\": {\n \"input\": \"$votes\",\n \"as\": \"vote\",\n \"cond\": {\n \"$eq\": [\n \"$$vote.action\",\n \"up\"\n ]\n }\n }\n }\n },\n \"down\": {\n \"$size\": {\n \"$filter\": {\n \"input\": \"$votes\",\n \"as\": \"vote\",\n \"cond\": {\n \"$eq\": [\n \"$$vote.action\",\n \"down\"\n ]\n }\n }\n }\n }\n }\n }\n])\ndb.collection.aggregate([\n {\n \"$unwind\": {\n \"path\": \"$votes\"\n }\n },\n {\n \"$replaceRoot\": {\n \"newRoot\": {\n \"$cond\": [\n {\n \"$eq\": [\n \"$votes.action\",\n \"up\"\n ]\n },\n {\n \"up\": 1\n },\n {\n \"down\": 1\n }\n ]\n }\n }\n },\n {\n \"$group\": {\n \"_id\": null,\n \"up\": {\n \"$sum\": \"$up\"\n },\n \"down\": {\n \"$sum\": \"$down\"\n }\n }\n },\n {\n \"$project\": {\n \"_id\": 0\n }\n }\n])\n", "text": "HelloReduce read the array and sums\nFilter read the array 2 times,one sum the up the other sums the downs\nUnwind the votes,replace each document with {up 1} or {down 1} depend on what it voted.\nGroup and sum.I think the easier is the filter,just reads the array 2 times(reduce reads it 1 time)\nI think it does what you want.1)Run Reduce2)Run Filter3)Run unwind and group", "username": "Takis" }, { "code": "", "text": "wow that’s way more complex than I expected. Going to be tricky to “translate” this for the Golang driver. Thanks ", "username": "Roger_Bieri" }, { "code": "db.collection.aggregate([ \n { \"$unwind\": \"$votes\" },\n { \"$group\": {\n \"_id\": \"$name\",\n \"ups\": {$sum: {$cond: {\"if\": {\"$eq\": [\"$votes.action\",\"up\"]},then: 1,else: 0}}},\n \"not_ups\": {$sum: {$cond: {\"if\": {\"$ne\": [\"$votes.action\",\"up\"]},then: 1,else: 0}}},\n \"score\": {$sum: {$cond: {\"if\": {\"$eq\": [\"$votes.action\",\"up\"]},then: 1,else: -1}}}}\n }])\n", "text": "{\n“name”: “Test”,\n“votes”: [\n{\n“user”: “joe”,\n“action”: “up”\n},\n{\n“user”: “john”,\n“action”: “up”\n},\n{\n“user”: “emma”,\n“action”: “down”\n}\n]\n}Something like:should get you there…\nYou might want to account for “dirty” data where an action is neither up nor down in some way too?Check out Mongo playground", "username": "Nuri_Halperin" }, { "code": "", "text": "I have a simpler answer for you:db.test01.aggregate([\n… { $unwind: “$votes” },\n… { $sortByCount: “$votes.action” }\n… ])\n{ “_id” : “up”, “count” : 2 }\n{ “_id” : “down”, “count” : 1 }All the best,– Rodrigo", "username": "logwriter" }, { "code": "name", "text": "Does it allocate vote per name field?", "username": "Nuri_Halperin" }, { "code": "", "text": "It sorts by and counts across all documents in the collection.Collection:db.test01.find()\n{ “_id” : ObjectId(“5fd906d828eb4534ccbceb9a”), “name” : “Test”, “votes” : [ { “user” : “joe”, “action” : “up” }, { “user” : “john”, “action” : “up” }, { “user” : “jane”, “action” : “down” } ] }\n{ “_id” : ObjectId(“5fd90b3728eb4534ccbceb9b”), “name” : “Test1”, “votes” : [ { “user” : “joe”, “action” : “up” }, { “user” : “john”, “action” : “up” }, { “user” : “jane”, “action” : “down” } ] }Aggregation result:db.test01.aggregate([ { $unwind: “$votes” }, { $sortByCount: “$votes.action” } ])\n{ “_id” : “up”, “count” : 4 }\n{ “_id” : “down”, “count” : 2 }Just add a $match to get per name:db.test01.aggregate([ { $match: { name: “Test” } }, { $unwind: “$votes” }, { $sortByCount: “$votes.action” } ])\n{ “_id” : “up”, “count” : 2 }\n{ “_id” : “down”, “count” : 1 }All the best,– Rodrigo", "username": "logwriter" }, { "code": "", "text": "One of the thing that I do is to never have my aggregation pipeline in the native language I am using. I always keep them as a json document that I parsed at run time. This way I can use the same source pipeline code whatever I playing with the shell or coding in a specific language. My pipelines become like a dynamic library.", "username": "steevej" }, { "code": "$match", "text": "Nice! I missed the $match the first time around.", "username": "Nuri_Halperin" }, { "code": "", "text": "Do you think this logic can be integrated with a FindOne Projection? As you can see in my sample data, the “votes” array is part (nested) of another “parent” object (I’ve included only a “name” field here) - my goal is to read that parent document - probably as a projection - and aggregate those votes…", "username": "Roger_Bieri" }, { "code": "> db.test01.aggregate([\n... { $match: { name: \"Test\" } },\n... { $facet: {\n... \"parent_fields\": [\n... { $project: { name: 1 } }\n... ],\n... \"counted_votes\": [\n... { $unwind: \"$votes\" },\n... { $sortByCount: \"$votes.action\" }\n... ]\n... }\n... }\n... ])\n{ \"parent_fields\" : [ { \"_id\" : ObjectId(\"5fd906d828eb4534ccbceb9a\"), \"name\" : \"Test\" } ], \"counted_votes\" : [ { \"_id\" : \"up\", \"count\" : 2 }, { \"_id\" : \"down\", \"count\" : 1 } ] }\n>\n", "text": "Hey,I don’t believe you can use FindOne. But here is a slightly modified version that allows you to grab all information you want from the parent document and also count the votes.I hope it helps.All the best,– Rodrigo", "username": "logwriter" } ]
Count values in array of objects
2020-12-14T19:47:47.192Z
Count values in array of objects
16,317
null
[ "aggregation", "dot-net" ]
[ { "code": "db.collection(\"A\")\n .aggregate()\n .lookup(\n \"B\",\n \"array.id\", //localField\n \"id\", //foreignField\n \"b_full\", //result list\n )\ndb.CollectionA //IMongoDbCollection\n .Aggregate()\n .LookUp<A, B, AwB>(\n db.CollectionB, //IMongoDbCollection\n a => a.Array.Id, // PROBLEM HERE => I can't select the property Id of the property \"Array\"\n b => b.Id,\n a => BCollectionFull\n )\n", "text": "Hello,\nI have this situation and I need to find out if it possible to resolve.This is what I want to do (this is working in mongodb compass)but I can’t with typed classThe property “Array” of collection “A” is a List< MyClass > where MyClass is a simple class with 3 properties Id, Description, Date\nThe connection between collection A and collection B is with the Id inside the ListThanks in advance\nKind Regards", "username": "Andrea_Zanini" }, { "code": "", "text": "Also, it is possible to pre filter CollectionB and use it in the Lookup ?\n(always with typed classes and no bsondocument)Thanks", "username": "Andrea_Zanini" } ]
C# Typed Lookup with localField as List<MyClass>
2020-12-13T14:32:01.571Z
C# Typed Lookup with localField as List&lt;MyClass&gt;
4,916
null
[ "queries" ]
[ { "code": " [\n { \n _id: \"5fd90412350de61e44657c8b\"\n description: 'Test1',\n completed: true\n },\n { \n _id: \"5fd9040a350de61e44657c8a\"\n description: 'Test2',\n completed: false\n },\n { \n _id: \"5f1d7d723bfe781024f734d9\"\n description: 'Test3',\n completed: true\n }\n ]\n completed: truecompleted: falsedb.Task.find()\n", "text": "What I am trying to accomplish here, is to retrieve data from MongoDB filtering first by the documents that contain completed: true, and the documents with completed: false should appear last.\nIs it possible?", "username": "Soulaimane_Benmessao" }, { "code": "> db.test02.find( { } ).sort( { completed: -1} )\n{ \"_id\" : ObjectId(\"5fd927ab63959199d5596799\"), \"description\" : \"Test1\", \"completed\" : true }\n{ \"_id\" : ObjectId(\"5fd927c463959199d559679d\"), \"description\" : \"Test5\", \"completed\" : true }\n{ \"_id\" : ObjectId(\"5fd927b363959199d559679a\"), \"description\" : \"Test2\", \"completed\" : false }\n{ \"_id\" : ObjectId(\"5fd927b963959199d559679b\"), \"description\" : \"Test3\", \"completed\" : false }\n{ \"_id\" : ObjectId(\"5fd927be63959199d559679c\"), \"description\" : \"Test4\", \"completed\" : false }\n>\n", "text": "Hey,You could create an index for the “completed” field, and then run a find({ }).sort({ completed: -1}).", "username": "logwriter" }, { "code": "", "text": "It works perfectly for me. Thank you very much for your help!", "username": "Soulaimane_Benmessao" } ]
Sort when retrieving data
2020-12-15T20:42:51.899Z
Sort when retrieving data
1,620
https://www.mongodb.com/…cb4219ca222b.png
[]
[ { "code": "", "text": "I am unable to access data using the Realm Web SDK.This particular line is throwing error s1724×227 19.5 KBMy browser gives an error “Cannot read property ‘mongodb’ of undefined”I checked that “app” is correct and that I am able to successfully authenticate as well", "username": "Salman_Alam" }, { "code": "const mongo = user.mongoClient(\"<atlas service name>\");\n", "text": "@Salman_Alam I suspect you’re using the recently released 1.0.0-rc.1 version of Realm Web, which introduced a breaking change to the API: Release Realm Web v1.0.0-rc.1 · realm/realm-js · GitHub. The mongodb client should be accessed viaDo you mind sharing a link to the tutorial that you’re following?", "username": "kraenhansen" }, { "code": "", "text": "Hi I am just coming back to this again after quite a while and almost made the same post. This is the main tutorial on MongoDB Realm docs under “Web SDK”.Here is the link: https://docs.mongodb.com/realm/web/mongodb", "username": "Salman_Alam" }, { "code": "userapp", "text": "Hi Salman,We (still) have an issue on tutorial and I’ve forwarded a request to get this updated as soon as possible.\nI see you also posed Realm Web SDK & VueJS - which refers to the same underlying issue.Did you try my suggestion on getting the MongoDB Client from the user object instead of the app?", "username": "kraenhansen" }, { "code": "", "text": "Hey @kraenhansen. Yes! Thank you so much! I wasted 2 hours trying to figure out what is wrong then I came back to check this old issue.Yes, please if you can somehow elevate that request to keep the docs for Realm Web SDK and Nodejs SDK to be up to date.We have a production app on Stitch legacy SDK, we thought when Realm first came out we should start migrating however back then I encountered similar issues. I thought by this time the docs should be up to date and all the gaps will be sorted out.It’s quite disheartening seeing that hasn’t happened yet Thank you so much nonetheless!", "username": "Salman_Alam" } ]
Issue with Realm Web SDK
2020-10-11T10:20:36.739Z
Issue with Realm Web SDK
3,579
null
[]
[ { "code": "", "text": "Hi,I’ll need some help to understand MongoDB Atlas encryption.\nI have an Atlas subscription for M10 cluster, encryption at rest enabled on it using Azure Key Vault and a database is created on the same cluster. My understanding is as the data is encrypted at rest, plain text in database should not be displayed when I access the database from Atlas or Compass (Enterprise). But as my IP is whitelist I am able to read the data as is, and if it’s not, connection to the cluster fails from MongoDB Compass.Is there a way to prevent users from reading plain text without performing CSFLE? OR\nDoes MongoDB Atlas provide a feature that doesn’t display plain text to any user role?If yes, can that be enabled for admin user role as well? OR can custom role setup restrict users from reading plain text?As per the documentations, disk level encryption ensures that the files at rest are encrypted, is there a way to access those encrypted files on/from Atlas itself?Also, can we arrange a call with the support team to discuss our queries in detail? If yes, could you please provide the details to contact support team directly and arrange a call, if and when required?Thank you!", "username": "Priyanka_Singh" }, { "code": "", "text": "Hi @Priyanka_Singh,The encryption at rest in Atlas encrypt the database files on disk and backups, so if someone would get there hands on this data they will not be able to restore it.However, the data is decrypted when authenticated and whitelisted clients like compass access the data. This decryption/encryption is by design and for the whole instance.The recommended features to encrypt client visible data is FLE :By the way any connection to atlas is TLS encrypted by default.", "username": "Pavel_Duchovny" }, { "code": "", "text": "Hi @Pavel_Duchovny, I have a question. What is the limitation of FLE (search, filter, regex, …)?", "username": "Quang_Huy" }, { "code": "", "text": "Hi @Quang_Huy,I suggest you read the operations manual\nhttps://docs.mongodb.com/drivers/security/client-side-field-level-encryption-guide#e.-perform-encrypted-read-write-operationsOne note I found isYou cannot directly query for documents on a randomly encrypted field, however you can use another field to find the document that contains an approximation of the randomly encrypted field data.Thanks\nPavel", "username": "Pavel_Duchovny" } ]
MongoDB Atlas Encryption - database level or collection level
2020-10-15T07:18:10.515Z
MongoDB Atlas Encryption - database level or collection level
3,363
null
[ "security" ]
[ { "code": "", "text": "Hi,I need to create processes that check when a user will be created the password need to follow this characteristic:The password has a minimum length of 16.\nThe password is not the same as the username.\nThe password has at least one alpha, one numeric, and one punctuation mark character.\nThe password is not a simple or obvious word, such as welcome, account, database, or user.\nThe password differs from the previous password by at least 3 characters.But i didn´t find how i can configure this", "username": "LUCAS_MAROLA" }, { "code": "", "text": "Hi @LUCAS_MAROLA welcome to the community!Currently there is no built in method to enforce password complexity requirement. However this is a feature request in SERVER-7363. Please watch/upvote the ticket for updates on this.Alternatively, you can use LDAP Authorization to enforce password complexity via LDAP. However, note that LDAP features requires the use of MongoDB Enterprise Advanced, which requires a subscription.Best regards,\nKevin", "username": "kevinadi" }, { "code": "", "text": "@kevinadi,Tks for your feedback.", "username": "LUCAS_MAROLA" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Configure strong password user
2020-12-15T01:21:36.288Z
Configure strong password user
8,260
null
[ "atlas-device-sync" ]
[ { "code": "", "text": "My app is getting integration errors and does not sync with Atlas.Here is the error message:Failed to integrate download with non-retryable error: error applying downloaded changesets to mongodb: error applying array operations as pure set, ArrayInsert.prior_size was 12 but built-up array was only of length 13Some details:\nMy macOS writer app processed CSV files and inserted 230k documents. Although the sync was very slow, the Realm server was writing to Atlas. But after writing about 190k documents, it started getting the integration error. My reader app can connect to the server. But it doesn’t seem to download the data.What would you recommend to fix this? Should I terminate the sync, delete the local file, and reinsert the remaining documents?", "username": "lonesometraveler" }, { "code": "", "text": "@lonesometraveler What SDK and version are you using?", "username": "Ian_Ward" }, { "code": "", "text": "I use realm/realm-cocoa v10.4.0.", "username": "lonesometraveler" }, { "code": "", "text": "@lonesometraveler OK - thats the latest, can you try terminating/re-enabling sync and see if you can reproduce the error again?", "username": "Ian_Ward" }, { "code": "", "text": "After terminating and reenabling sync, I am getting TranslatorFatalErrors.Error:initial sync failed: initial sync worker for ns=‘duet.Song’ stopped with error: failed to insert change into history during initial sync of duet.Song after copying 0 documents: error adding changes for ns=‘duet.Song’ to client history: context deadline exceeded\nSource:Error syncing MongoDB writeError:initial sync failed: initial sync worker for ns=‘duet.Artist’ stopped with error: failed to insert change into history during initial sync of duet.Artist after copying 0 documents: error adding changes for ns=‘duet.Artist’ to client history: connection(cluster0-shard-00-02.jryma.mongodb.net:27017[-6619474]) incomplete read of message header: context deadline exceeded\nSource:Error syncing MongoDB write", "username": "lonesometraveler" }, { "code": "TranslatorFatalErrorOtherClientError", "text": "@Ian_Ward Besides TranslatorFatalError, the log is now showing OtherClientError.Error:Ending session with error: integrating changesets failed: error inserting history entry batch for transaction batch ID 5fd8a342355bca1678eb5eb5: (NoSuchTransaction) Transaction 1 has been aborted. (ProtocolErrorCode=101)Source:Ending sync session to MongoDBIs there anything I can do to fix this?", "username": "lonesometraveler" }, { "code": "", "text": "@lonesometraveler Can you share your Realm Cloud URL please so we can investigate ?", "username": "Ian_Ward" }, { "code": "", "text": "@Ian_Ward Here is the URL.\nhttps://realm.mongodb.com/groups/5f8cd3781105f71e2433edd7/apps/5fd38bb899b7e23151cf0d79/dashboardThank you very much for your help.", "username": "lonesometraveler" }, { "code": "", "text": "@lonesometraveler You’re out of storage bud and trying to sync too much data - see logs:initial sync failed: initial sync worker for ns=‘duet.Song’ stopped with error: failed to insert change into history during initial sync of duet.Song after copying 64910 documents: error adding changes for ns=‘duet.Song’ to client history: (AtlasError) error incrementing the version counter for (appID=“5fd38bb899b7e23151cf0d79”, fileIdent=1); (AtlasError) you are over your space quota, using 517 MB of 512 MB", "username": "Ian_Ward" }, { "code": "", "text": "@Ian_Ward Thanks! Upgrading the cluster and terminating/reenabling sync fixed the issue.", "username": "lonesometraveler" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Failed to integrate download with non-retryable error: error applying downloaded changesets to mongodb
2020-12-14T15:06:22.692Z
Failed to integrate download with non-retryable error: error applying downloaded changesets to mongodb
3,639
null
[ "replication" ]
[ { "code": "", "text": "Hi,I have a replica set on which replication was going fine. Suddenly I am seeing below value in replication status output and looks like there is an issue with replication. What might be causing it and any resolution for this please?optimes\" : { “lastCommittedOpTime” : { “ts” : { “$timestamp” : { “t” : 0, “i” : 0 } }, “t” : -1 },Thanks,\nAkshaya Srinivasan", "username": "Akshaya_Srinivasan" }, { "code": "lastCommittedOpTimers.status()", "text": "Welcome @Akshaya_Srinivasan!To help investigate this problem can you please share:Thanks,\nStennie", "username": "Stennie_X" }, { "code": "lastCommittedOpTimers.status() myShard_0:PRIMARY> rs.status()\n {\n \"set\" : \"myShard_0\",\n \"date\" : ISODate(\"2020-03-16T06:53:24.078Z\"),\n \"myState\" : 1,\n \"term\" : NumberLong(57),\n \"heartbeatIntervalMillis\" : NumberLong(2000),\n \"optimes\" : {\n \"lastCommittedOpTime\" : {\n \"ts\" : Timestamp(0, 0),\n \"t\" : NumberLong(-1)\n },\n \"appliedOpTime\" : {\n \"ts\" : Timestamp(1584341601, 1),\n \"t\" : NumberLong(57)\n },\n \"durableOpTime\" : {\n \"ts\" : Timestamp(1584341601, 1),\n \"t\" : NumberLong(57)\n }\n },\n \"members\" : [\n {\n \"_id\" : 0,\n \"name\" : \"xxxx:27017\",\n \"health\" : 1,\n \"state\" : 2,\n \"stateStr\" : \"SECONDARY\",\n \"uptime\" : 256214,\n \"optime\" : {\n \"ts\" : Timestamp(1584084717, 1),\n \"t\" : NumberLong(2)\n },\n \"optimeDurable\" : {\n \"ts\" : Timestamp(1584084717, 1),\n \"t\" : NumberLong(2)\n },\n \"optimeDate\" : ISODate(\"2020-03-13T07:31:57Z\"),\n \"optimeDurableDate\" : ISODate(\"2020-03-13T07:31:57Z\"),\n \"lastHeartbeat\" : ISODate(\"2020-03-16T06:53:22.760Z\"),\n \"lastHeartbeatRecv\" : ISODate(\"2020-03-16T06:53:22.777Z\"\n ),\n \"pingMs\" : NumberLong(0),\n \"syncingTo\" : \"xxxx:27018\",\n \"configVersion\" : 104289\n },\n {\n \"_id\" : 1,\n \"name\" : \"xxxx:27018\",\n \"health\" : 1,\n \"state\" : 1,\n \"stateStr\" : \"PRIMARY\",\n \"uptime\" : 256217,\n \"optime\" : {\n \"ts\" : Timestamp(1584341601, 1),\n \"t\" : NumberLong(57)\n },\n \"optimeDate\" : ISODate(\"2020-03-16T06:53:21Z\"),\n \"electionTime\" : Timestamp(1584086043, 1),\n \"electionDate\" : ISODate(\"2020-03-13T07:54:03Z\"),\n \"configVersion\" : 104289,\n \"self\" : true\n }\n ],\n \"ok\" : 1\n }\n", "text": "Hi Stennie,", "username": "Akshaya_Srinivasan" }, { "code": "lastCommittedOpTimers.status()lastCommittedOpTimers.conf().protocolVersion", "text": "the specific version(s) of MongoDB server used in your replica set → MongoDB 3.4.5I would strongly recommend upgrading to the latest (and final) version of MongoDB 3.4 (3.4.24). There have been a significant number of bug fixes and stability improvements in the 3.4 release series and upgrading will confirm you are not encountering a known issue since 3.4.5 was released back in June, 2017. Minor releases do not introduce any backward-breaking changes.i would also note that MongoDB 3.4 reached end of life in January, 2020 so I would also plan to upgrade to 3.6 for continued stability & security improvements.the roles of the members showing lastCommittedOpTime set to 0 → I do not have authentication enabled on this server. Server is started and access as Windows administrator user.I was referring to the roles of replica set members (primary, secondary, arbiter). However, that isn’t relevant since you have provided the rs.status() info.A lastCommittedOpTime of -1 is expected if you are running a replica set with the older replication protocol 0 (pv0).You can confirm your protocol version using rs.conf().protocolVersion.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Thanks Stennie.Protocol version:\nmyShard_0:PRIMARY> rs.conf().protocolVersion\nNumberLong(1)\nmyShard_0:PRIMARY>", "username": "Akshaya_Srinivasan" }, { "code": "lastCommittedOpTimetrs.conf()", "text": "rs.conf().protocolVersion\nNumberLong(1)Hrm, in this case the lastCommittedOpTime should normally have a t value.Can you also share the output of rs.conf() from your primary with the hostnames redacted?In your original post you mentioned replication was going fine previously. Did anything specific change in your deployment (configuration, upgrades, election, …) to prompt noticing this change in the replication status output?Thanks,\nStennie", "username": "Stennie_X" }, { "code": "myShard_0:PRIMARY> rs.conf()\n{\n \"_id\" : \"myShard_0\",\n \"version\" : 104289,\n \"protocolVersion\" : NumberLong(1),\n \"members\" : [\n {\n \"_id\" : 0,\n \"host\" : \"xxx:27017\",\n \"arbiterOnly\" : false,\n \"buildIndexes\" : true,\n \"hidden\" : false,\n \"priority\" : 1,\n \"tags\" : {\n\n },\n \"slaveDelay\" : NumberLong(0),\n \"votes\" : 1\n },\n {\n \"_id\" : 1,\n \"host\" : \"cccwin:27018\",\n \"arbiterOnly\" : false,\n \"buildIndexes\" : true,\n \"hidden\" : false,\n \"priority\" : 0.5,\n \"tags\" : {\n\n },\n \"slaveDelay\" : NumberLong(0),\n \"votes\" : 1\n }\n ],\n \"settings\" : {\n \"chainingAllowed\" : true,\n \"heartbeatIntervalMillis\" : 2000,\n \"heartbeatTimeoutSecs\" : 10,\n \"electionTimeoutMillis\" : 10000,\n \"catchUpTimeoutMillis\" : 2000,\n \"getLastErrorModes\" : {\n\n },\n \"getLastErrorDefaults\" : {\n \"w\" : 1,\n \"wtimeout\" : 0\n },\n \"replicaSetId\" : ObjectId(\"5e6b3245ccc189cd553cf737\")\n }\n}\nmyShard_0:PRIMARY>", "text": "Thanks Stennie. I just did role reversal. Primary to secondary and vice versa.", "username": "Akshaya_Srinivasan" }, { "code": "", "text": "How can I resolve this @Stennie_X. Please help.Thanks,\nAkshaya Srinivasan", "username": "Akshaya_Srinivasan" }, { "code": "", "text": "Hi,I am seeing this issue again. i am using MongoDB version 3.6.20 , OS is RHEL 6.7 . Please can some one help? Also protocolVersion is not listed in the rs.conf() output.Secondary node:[root@akscent1 Log_Files]# /root/mongodb-linux-x86_64-rhel62-3.6.20/bin/mongo --port 27097 -u xxxx -p’xxxx’ --authenticationDatabase admin --eval “rs.status().optimes.lastCommittedOpTime”MongoDB shell version v3.6.20{ “ts” : Timestamp(0, 0), “t” : NumberLong(-1) }Secondary node:[root@akscent1 Log_Files]# /root/mongodb-linux-x86_64-rhel62-3.6.20/bin/mongo --port 27098 -u xxxx -p’xxxx’ --authenticationDatabase admin --eval “rs.status().optimes.lastCommittedOpTime”MongoDB shell version v3.6.20{ “ts” : Timestamp(0, 0), “t” : NumberLong(-1) }Primary node:[root@akscent1 Log_Files]# /root/mongodb-linux-x86_64-rhel62-3.6.20/bin/mongo --port 27099 -u xxxx -p’xxxxx’ --authenticationDatabase admin --eval “rs.status().optimes.lastCommittedOpTime”MongoDB shell version v3.6.20{ “ts” : Timestamp(1607574936, 1), “t” : NumberLong(-1) }Thanks,\nAkshaya Srinivasan", "username": "Akshaya_Srinivasan" }, { "code": "rs.status()rs.conf()rs.printReplicationInfo()", "text": "Hi @Akshaya_SrinivasanWhat’s the whole output of rs.status() and rs.conf()? Also please show the output of rs.printReplicationInfo().Best regards,\nKevin", "username": "kevinadi" } ]
Replication status shows lastCommittedOptime as 0
2020-03-15T05:46:06.417Z
Replication status shows lastCommittedOptime as 0
3,032
null
[ "aggregation" ]
[ { "code": "{\n\"_id\" : \"20110418123336260\",\n\"radar\" : {\n \"mileage\" : \"15\",\n \"direction\" : \"ascending\",\n \"speed limit\" : \"180\"\n },\n\"Record\" : {\n \"date\" : \"18/04/2007\",\n \"time\" : \"12:33:36.260\", //Hour<24 and .260 are ms\n \"speed\" : \"193\"\n}\ndb.traffic.aggregate( \n [\n {\n $group:\n {\n _id: \"$radar\",\n poppularHour: { $max: \"$Record.time\" }\n }\n }\n ]\n)\ndb.traffic.aggregate( [ { $unwind: \"$radar\" },{ $sortByCount: \"$Record.time\" } ] )\ndb.trafico.aggregate([\n { \"$unwind\": \"$radar\" },\n {\n \"$group\": {\n \"_id\": \"$Record.time\",\n \"count\": { \"$sum\": 1 }\n }\n },\n { \"$sort\": { \"count\": -1 } },\n { \"$limit\": 1 } \n])\n", "text": "Hello everyone,I’m learning MongoDB and I’m practicing with a traffic database, and I’d like to know how to get for each radar the time of the day (“Record.time”) when the most speeding tickets are recorded (the most repeated hour for each radar, something like radar X (with its attributes) and its “happy hour” )This is how a document looks like in the database:The closest I’ve ever come is to find out the latest hour that each radar has fined with this query:Also I found all the occurrences of all the hours with:or the most repeated hour with it’s occurences with:but as I said, I’m looking for each radar his most repeated hour, and even if I try everything I can’t get it.Could please someone help me?Thank you in advance!!PD: When I say the most repeated hour I mean the same string Record.time (“HH:MM:SS:FFF”)", "username": "ReyBinario" }, { "code": "db.trafico.aggregate([\n { \"$unwind\": \"$radar\" },\n {\n \"$group\": {\n \"_id\": {\"radar\": \"$radar\", \"time\" : \"$Record.time\"},\n \"count\": { \"$sum\": 1 }\n }\n },\n { \"$sort\": { \"_id.radar\" :1, \"count\": -1 } },\n \"$group\": {\n \"_id\": {\"radar\": \"$_id.radar\", \"count\" : \"$count\"},\n topTimePerRadar: { $first: \"$$ROOT\"}\n }\n }\n])\n", "text": "Hi @ReyBinario,Welcome to MongoDB community!If you need the time per radar why not to group per radar and per time in a compound expression:Haven’t tested the query but the idea should work.Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "db.trafico.aggregate([{\n $project: {\n radar: {\n mileage: '$radar.mileage',\n direction: '$radar.direction',\n 'speed limit': '$radar.speed limit'\n },\n Record: {\n date: '$Record.date',\n time: '$Record.time',\n speed: '$Record.speed'\n }\n }\n}, {\n $group: {\n _id: {\n mileage: '$radar.mileage',\n direction: '$radar.direction',\n 'speed limit': '$radar.speed limit',\n hora: '$Record.time'\n },\n total: {\n $sum: 1\n }\n }\n}, {\n $project: {\n _id: {\n mileage: '$_id.mileage',\n direction: '$_id.direction',\n 'speed limit': '$_id.speed limit'\n },\n hora: '$_id.hora',\n total: '$total',\n maxQuantity: {\n $max: '$total'\n }\n }\n}, {\n $project: {\n _id: {\n mileage: '$_id.mileage',\n direction: '$_id.direction',\n 'speed limit': '$_id.speed limit'\n },\n hora: '$_id.hora',\n total: '$total',\n maxQuantity: '$maxQuantity',\n maximum: {\n $eq: ['$total', '$maxQuantity']\n }\n }\n}, {\n $match: {\n maximum: true\n }\n}\n]);\n", "text": "Hi @Pavel_Duchovny,Thank you so much for your answer!! Your query works perfect.\nAlso I have learned new operators like “first” or “root” that I didn’t know, so thanks again for your time.In order to improve the thread of the conversation, I would like to show you (and to everyone) the latest version I was working on in case someone else gets into this thread with doubts and helps to solve them.*There is a small bug that keeps repeating radars even though in the last filter-stage the “count” should be equal to the maxQuantity, if you could solve the bug I would really appreciate.Thanks in advance,\nRB.", "username": "ReyBinario" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How can I count and return the most repeated attribute in MongoDB?
2020-12-15T01:21:05.677Z
How can I count and return the most repeated attribute in MongoDB?
9,600
null
[ "aggregation", "queries" ]
[ { "code": "db.getCollection('crawler').find( { link: { $regex: /^https:\\/\\//i } } )", "text": "For example, I am getting results with this query:db.getCollection('crawler').find( { link: { $regex: /^https:\\/\\//i } } )How can I also make ‘link’ distinct?Much appreciated", "username": "Russell_Jones" }, { "code": "db.crawler.aggregate([\n {$match: {\"link\": /^https:\\/\\//}}, \n {$group: {\"_id\": \"$link\"}}\n])\nhttps://", "text": "will give you all links that start with https://, distinct. May not be very efficient.", "username": "Nuri_Halperin" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to combine regex find and distinct?
2020-12-15T17:17:31.351Z
How to combine regex find and distinct?
3,454
null
[ "mongodb-live-2021" ]
[ { "code": "", "text": "MongoDB.live is where the world’s fastest-growing data community comes to connect, explore, and learn. We’re looking for speakers who can inspire attendees by introducing them to new technologies, ideas, and solutions.Whether you want to do a 30 minute conference session, a 60-90 minutes deep dive tutorial, or a 10 minutes lightning talk - give us your talk ideas. If you have a great idea but don’t feel ready for the stage / stream: don’t worry. We offer speaker storyline workshops, speaker coaching sessions, and more.Call for speakers closes February 11th 2021.If you have any questions please comment on the thread - We’re here to help.\nSubmit your talk today", "username": "Sven_Peters" }, { "code": "", "text": "", "username": "Stennie_X" } ]
MongoDB.live 2021 Call for speakers is open!
2020-12-15T14:44:23.591Z
MongoDB.live 2021 Call for speakers is open!
1,993
null
[ "atlas-device-sync" ]
[ { "code": "const realm = new Realm({\n schema:[ItemSchema],\n sync:{\n user:user,\n partitionValue: user.id,\n }\n })\n", "text": "Hello everyone,I am new to MongoDB Realm, however had some experience with MongoDB.Before any comments about the same problem, I already have been to all discussions and nothing have helped me yet.I have consulted these discussions:But sadly, either the mentioned solutions doesn’t work for me or I am not able to understand those.I am currently using Realm in Nodejs. I have partition key as restroName while activating sync. But constantly I keep getting 212 error while I connect using:failed to validate upload changesets: SET instruction had incorrect partition value for key “restroName” (ProtocolErrorCode=212)When i try to use partionValue: “anything”, it gives:user does not have permission to sync on partition (ProtocolErrorCode=206)So I assume it needs to be the user.id on the partitionValue but I want to store data in the realms with restroName.Also I always clear the realm data (wipe it) before I make changes.I hope I can get help.", "username": "Maneez_Paudel" }, { "code": "", "text": "What version of realm-js are you using? What do the server-side logs say? What does your sync configuration look like?For permissions you can take a look at this doc -\nhttps://docs.mongodb.com/realm/sync/permissions", "username": "Ian_Ward" }, { "code": "Error:\n\nfailed to validate upload changesets: SET instruction had incorrect partition value for key \"restroName\" (ProtocolErrorCode=212)\nPartition:\nmyuserid\n\nWrite Summary:\n{\n \"item\": {\n \"inserted\": [\n \"5fd1d25d53e7d937bcc478bb\",\n \"5fd31c6b14e8f1e40bc40bb8\",\n \"5fd34db34824056b8c62445a\",\n \"5fd1d22be3b5216bdb0af01b\"\n ],\n \"deleted\": [\n \"5fd1d22be3b5216bdb0af01c\",\n \"5fd1d25d53e7d937bcc478bc\",\n \"5fd31c6b14e8f1e40bc40bb9\",\n \"5fd34db34824056b8c62445b\"\n ]\n }\n}\nSDK:\nnode.js vRealmJS/10.0.1\nPlatform Version:\nv14.15.0\nattempted to bind on illegal realm partition (\"\\\"5fbe16ffa476d2d81049ef6d\\\"\"): expected partition to have type objectId but found string (ProtocolErrorCode=204)\nError:\n\nEnding session with error: user does not have write access to partition (ProtocolErrorCode=206)\nLogs:\n[\n \"Session was active for: 1s\"\n]\nPartition:\n\nObjectID(\"5fd1d22979ac94bd7d61fab7\")\nSession Metrics:\n{\n \"downloads\": 1\n}\nSDK:\nnode.js vRealmJS/10.0.1\nPlatform Version:\nv14.15.0\n{\n \"%%partition\": \"%%user.id\"\n}\n", "text": "I am using realm-js version 10.0.1.\nThe server log saysI see the problem here because I have the partition key as restroName and instead I gave partition value my user.id.\nWhen I set the partition key as _id, I receive the error:I have set the id as BSON.objectId, then I get this:I have tried my best to do everything I found on other discussions.\nOk, here is the real deal.\nI would like to create a Realm App where the users who work in the same restaurant can read and write the data.\nFor this I configured partition key in RealmSync as “restroName”.\nThen for permissions, where I think I messed up, I set permission as users can read and write their own data.The errors when I change the partition key are mentioned above.\nWhat would be the best thing for me to do? Should I make custom authentication function and store restroName in Atlas and set permissions as “%%partition”: “%%user.customData.restroName”. I am currently trying to do so but I think that won’t help either.\nPlease guide me through it, I think I am misunderstanding some things in the documentation.", "username": "Maneez_Paudel" }, { "code": "", "text": "@Maneez_Paudel If I understand your use case correctly then yes I would set the partitionKey as the restaurant name and use custom user data as a way to store which restaurants a user has access to. You can see an example of this here which uses projects instead of restaurants -You’re on the right path with the permissions expression but it is flipped - see here:\nhttps://docs.mongodb.com/realm/sync/permissions#permissions-based-on-user-data", "username": "Ian_Ward" }, { "code": "", "text": "Thanks @Ian_Ward, I already did so, and it worked using the custom data", "username": "Maneez_Paudel" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
BadChangeSet Error: failed to validate upload changesets: ProtocolErrorCode=212
2020-12-07T07:25:37.555Z
BadChangeSet Error: failed to validate upload changesets: ProtocolErrorCode=212
3,686
null
[]
[ { "code": "", "text": "I have 3 replicas, the other 2 are fine.Here are some of the logs, from here all the connections are rejected.\nHow can I get this replica back?2020-12-14T17:37:59.453 +0000 I REPL_HB [replexec-1] Heartbeat to failed after 2 retries, response status: InterruptedAtShutdown: interrupted at shutdown2020-12-14T17:37:59.453 +0000 I REPL [replexec-1] Member is now in state RS_DOWN - interrupted at shutdown2020-12-14T17:38:01.455 +0000 I REPL_HB [replexec-1] Heartbeat to failed after 2 retries, response status: InterruptedAtShutdown: interrupted at shutdown2020-12-14T17:38:03.457 +0000 I REPL_HB [replexec-2] Heartbeat to failed after 2 retries, response status: InterruptedAtShutdown: interrupted at shutdown2020-12-14T17:38:05.459 +0000 I REPL_HB [replexec-3] Heartbeat to failed after 2 retries, response status: InterruptedAtShutdown: interrupted at shutdown2020-12-14T17:38:07.461 +0000 I REPL_HB [replexec-3] Heartbeat to failed after 2 retries, response status: InterruptedAtShutdown: interrupted at shutdown2020-12-14T17:38:10.572 +0000 I CONNPOOL [Replication] Ending connection to host due to bad connection status: HostUnreachable: Connection reset by peer; 0 connections to that host remain open2020-12-14T17:38:10.572 +0000 I CONNPOOL [Replication] Connecting to 2020-12-14T17:38:10.574 +0000 I REPL_HB [replexec-3] Heartbeat to failed after 2 retries, response status: HostUnreachable: Error connecting to (192.168.248.8:27017) :: caused by :: Connection refused", "username": "CJ_Jiang" }, { "code": "", "text": "Hi @CJ_Jiang,If this is an Atlas cluster I suggest you to contact our support.Seems that a member is down but there is no enough detail here to indicate why.Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "It is resolved. Thanks a lot.", "username": "CJ_Jiang" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
One of my replica is down for a few hours, how can I get it back?
2020-12-15T04:44:12.361Z
One of my replica is down for a few hours, how can I get it back?
2,645
null
[ "sharding", "indexes" ]
[ { "code": "_id_id_id_id", "text": "https://docs.mongodb.com/manual/core/index-unique/#sharded-clusters-and-unique-indexesIf the _id field is not the shard key or the prefix of the shard key, _id index only enforces the uniqueness constraint per shard and not across shards.If the _id field is not the shard key nor the prefix of the shard key, MongoDB expects applications to enforce the uniqueness of the _id values across the shards.What is the difference?", "username": "Ke_Wei_Tan" }, { "code": "", "text": "Hi @Ke_Wei_TanWelcome to MongoDB community.There is no contradiction between the two. They both say that if you must have uniqueness in _id field using default MongoDB generation it must be a prefix of the shard key. Otherwise there is possible duplicates.If your application does not care about the _id field it is not significant for you.Thanks\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Sharded Clusters and Unique Indexes
2020-12-15T04:44:16.337Z
Sharded Clusters and Unique Indexes
2,277
null
[]
[ { "code": "", "text": "how to make mongodb index on query which contains lookup", "username": "harsh_sharma" }, { "code": "", "text": "Hi @harsh_sharma,Welcome to MongoDB communityIts hard to give specific recommendations without seeing the query discussed, but general one is to index all fields in previous to the lookup $match or $sort stages. In orderof fields from Equility , Sort and Finally Range.The last field/s should be the field used in source collection for lookup.\nThe target collection fields used to join should be indexed as well.Thanks\nPavel", "username": "Pavel_Duchovny" } ]
Indexing on mongodb
2020-12-15T04:44:22.762Z
Indexing on mongodb
1,390
null
[]
[ { "code": "$merge", "text": "If I am using a mongodb transaction to update two documents at the same time.One of the documents I would like to update numerous documents at the same time. And I want to use mongodb aggregation to structure the documents in a certain way. Will $merge be part of the transaction and update all docs or is there a posibility of failure?", "username": "Perminus_Gaita" }, { "code": "db.collection.aggregate()$merge", "text": "No the transaction will not work. According to mongodb docs db.collection.aggregate() is allowed in transaction but the $merge stage is excluded. See more here.", "username": "Perminus_Gaita" } ]
Will transactions work with mongodb aggregation $merge?
2020-12-14T17:56:19.406Z
Will transactions work with mongodb aggregation $merge?
2,574
null
[ "queries" ]
[ { "code": "", "text": "Hi I’m new to MongoDB and I’m facing an issue with the deleteMany method by using $nin.\nIrrespective of using $nin, the method is deleting all records in database.Please help me out with this issue. Thank you in Advance.", "username": "Raghu_Varma" }, { "code": "", "text": "Looks like a misspelled field name.A few, well formatted, sample documents and the exact query would be useful to provide a better diagnostic.", "username": "steevej" } ]
$nin is not working as expected,
2020-12-14T19:48:40.119Z
$nin is not working as expected,
2,013
null
[ "monitoring" ]
[ { "code": "", "text": "In my previous version of MongoDB, mongod was not a Windows service and I had to start it. This allowed me to watch its console outputs. For example, while creating an index, I could watch the index creation progress. Now that mongod is a service, I have no idea how I can watch its output like in the example above. Any ideas", "username": "Marcel_Oliveira" }, { "code": "", "text": "All this information should be in the log file.You cam also disable the service and start mongod manually if you are used to work this way.", "username": "steevej" }, { "code": "", "text": "Thanks Steeve. The log will do the Job.", "username": "Marcel_Oliveira" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongod Console Output
2020-12-14T17:55:39.179Z
Mongod Console Output
1,883
https://www.mongodb.com/…b0f30799cac.jpeg
[]
[ { "code": "", "text": "Hi folks,I am new working with mongo db. I created a Cluster in Atlas and I was trying to connect to it via shell for windows, but I couldn´t. When trying to connect, the shell just displays ´…´.Dont know what could be happening for it to fail.Here I lend some details of what I have made that maybe could help understand the issue:This is the session information for my mongo db for windows after initiating:Mongo Shell Info display962×322 65.7 KBMay someone help me detect what I might be doing wrong?\nPlease Help.Kind Regards,\nLuis", "username": "Luis_Guzman" }, { "code": "", "text": "Hey,I’m just very novice to this, but I finally understand it and figure it out.If your’e new to this just like me, add the directory of where the mongo server files are installed in your program files, to the advanced settings of your windows environment variables. Once that’s added, just copy and paste the link provided to connect to your cluster for shell in the cmd, don’t do anything else. (of course just change the db name to a valid one). Enter your passwrod, and that’s it, you´ll be in.Thanks for reading this, hope it helps someone.Regards from Costa Rica!", "username": "Luis_Guzman" }, { "code": "", "text": "I’m just very novice to this, but I finally understand it and figure it out.Good you were able to resolve it yorself and thanks for sharing the fix\nWant to add few lines\nMongo and mongod commands should be run at os prompt\nIf your run these commands while already connected to mongo shell you will get syntax error or error like you got\n3 dots(…) means command is not complete and it is waiting for more\nIn your case you somehow were at mongo prompt (\">\" symbol) indicates mongo prompt in WindowsI could reproduce the error on WindowsCase 1:mongo “mongodb+srv://sandbox.xyz.mongodb.net/test” -u m001-student\n2020-12-14T20:28:03.360+0530 E QUERY [js] SyntaxError: missing ; before statement @(shell):1:6Case 2:mongo mongodb+srv://sandbox.muehb.mongodb.net/test -username m001-student\n…\nWhen i add -p and hit enter it gives syntax errormongo mongodb+srv://sandbox.muehb.mongodb.net/test -username m001-student\n… -p\n2020-12-14T20:30:53.706+0530 E QUERY [js] SyntaxError: missing ; before statement @(shell):1:6So correct way to runC:\\Users>mongo mongodb+srv://sandbox.xyz.mongodb.net/test -username m001-student on Windows", "username": "Ramachandra_Tummala" } ]
Shell Connection to Atlas Cluster not workig
2020-12-14T03:54:37.963Z
Shell Connection to Atlas Cluster not workig
1,967
null
[ "aggregation" ]
[ { "code": "", "text": "HelloIs it possible to group an array using reduce(without unwind+group),in the general case?For simple example (instead of numbers could be documents etc)\n[1 2 3 1 2 10 2] to become {“1” [1 1] , “2” [2 2 2] ,“3” [“3”] , “10” [10]}Also is it possible to reference a field,by constructing the reference inside the pipeline,\nlike concatStrings(\"$\" “myfield”)?\nIf i could do the second,i could check if the member is key to the reduced map,and update it\nbut i cant construct the reference based on what i see in the reduce.I can use $function stage and javascript but is there a way to do it with mongoQL?Thank you", "username": "Takis" }, { "code": "", "text": "Hi @Takis,I want to help but don’t understand the use case from your example.Can you provide a source document and the way the end desired documents should look like?Best pavel", "username": "Pavel_Duchovny" }, { "code": "let people = [\n { name: 'Alice', age: 21 },\n { name: 'Max', age: 20 },\n { name: 'Jane', age: 20 }\n];\n\nfunction groupBy(objectArray, property) {\n return objectArray.reduce(function (acc, obj) {\n let key = obj[property]\n if (!acc[key]) {\n acc[key] = []\n }\n acc[key].push(obj)\n return acc\n }, {})\n}\n\nlet groupedPeople = groupBy(people, 'age')\n// groupedPeople is:\n// { \n// 20: [\n// { name: 'Max', age: 20 }, \n// { name: 'Jane', age: 20 }\n// ], \n// 21: [{ name: 'Alice', age: 21 }] \n// }\n", "text": "Hello @Pavel_Duchovny thank you for your response.\nI want to do what this simple javascript code does, but with mongo.\nReduce over an array and group,without using uwnind+group stages.\nThe problem is that i cant do this\nlet key = obj[property];\nacc[key];\nIn mongoDB when you reference a field,you must provide its name\nbefore the pipeline runs\n(you cant say something like $$value =“myfield”; toReference(\"$\"+$$value))\nIn javascript i found the needed key while reducing.An example would be something like this javascript code,its simple code,just reduce\nover an array and group.I hope there is a simple and fast way to do it like the javascript code does.\nI think not being able to construct references inside the pipeline is causing many problems\nnot being able to group by is one of them.Thank you", "username": "Takis" }, { "code": "$objectToArraydb.people.insertMany([{ name: 'Alice', age: 21 }, { name: 'Max', age: 20 }, { name: 'Jane', age: 20 }])\n{ acknowleged: 1,\n insertedIds: \n { '0': '5f508cb9773ad5bb561ae1e2',\n '1': '5f508cb9773ad5bb561ae1e3',\n '2': '5f508cb9773ad5bb561ae1e4' } }\n// Optional do not project _id\n[{$project: {\n _id : 0\n}},\n// Grouping all documents by age and pushing them into \"ages\" array\n {$group: {\n _id: \"$age\",\n \"ages\": {\n $push : \"$$ROOT\"\n }\n}},\n// Building a [ k: <age> v: <docs array of associated ages>]\n {$group: {\n \"_id\": null,\n \"data\": {\n \"$push\": { \"k\": {$toString: \"$_id\"}, \"v\": \"$ages\" }\n }\n }},\n // Replacing the new root to be the age as a field and people under that age as array of docs\n {$replaceRoot: {\n newRoot: { \"$arrayToObject\": \"$data\" }\n}}]\ndb.people.aggregate(// Optional do not project _id\n[{$project: {\n _id : 0\n}},\n// Grouping all documents by age and pushing them into \"ages\" array\n {$group: {\n _id: \"$age\",\n \"ages\": {\n $push : \"$$ROOT\"\n }\n}},\n// Building a [ k: <age> v: <docs array of associated ages>]\n {$group: {\n \"_id\": null,\n \"data\": {\n \"$push\": { \"k\": {$toString: \"$_id\"}, \"v\": \"$ages\" }\n }\n }},\n // Replacing the new root to be the age as a field and people under that age as array of docs\n {$replaceRoot: {\n newRoot: { \"$arrayToObject\": \"$data\" }\n}}])\n[ { '20': [ { name: 'Max', age: 20 }, { name: 'Jane', age: 20 } ],\n '21': [ { name: 'Alice', age: 21 } ] } ]\n", "text": "Hi @Takis,Ok I did not find a good way to do this without using a simple $objectToArray command as I need values to become keys and this is the only way I know how to do this.Here is my example:Now my pipline is:The aggregation:Let me know if this is what you are looking for.Best regards,\nPavel", "username": "Pavel_Duchovny" }, { "code": "(def doc {\"myarray\" [\n { \"name\" \"Alice\" \"age\" 21 }\n { \"name\" \"Max\" \"age\" 20 }\n { \"name\" \"Jane\" \"age\" 20 }\n ]})\n\n(prn (reduce (fn [grouped-array doc]\n (let [cur-key (str (get doc \"age\"))] ;;the key,here is the age value,i make it string,to be like valid json\n (if (contains? grouped-array cur-key) ;;if the reduced map so far contains the key\n (update grouped-array cur-key conj doc) ;;update to add the doc to the already array {key [.. doc] ...}\n (assoc grouped-array cur-key [doc])))) ;;else {key [doc]} ,its the first doc found with that key\n {}\n (get doc \"myarray\")))\n\n;;prints {\"21\" [{\"name\" \"Alice\", \"age\" 21}], \"20\" [{\"name\" \"Max\", \"age\" 20} {\"name\" \"Jane\", \"age\" 20}]}\n\"pipeline\": [\n {\n \"$project\": {\n \"_id\": 0,\n \"mygroupedarray\": {\n \"$function\": {\n \"args\": [\n \"$myarray\"\n ],\n \"lang\": \"js\",\n \"body\": \"function groupBy(objectArray) {\\n return objectArray.reduce(function (acc, obj) {\\n let key = obj[\\\"age\\\"]+\\\"\\\";\\n if (!acc[key]) {\\n acc[key] = [];\\n }\\n acc[key].push(obj);\\n return acc;\\n }, {})\\n}\"\n }\n }\n }\n }\n ]\n\"mygroupedarray\": {\n \"20\": [\n {\n \"name\": \"Max\",\n \"age\": 20\n },\n {\n \"name\": \"Jane\",\n \"age\": 20\n }\n ],\n \"21\": [\n {\n \"name\": \"Alice\",\n \"age\": 21\n }\n ]\n }\nFor example for document {\"myarray\" [1,2,3]} ,this doesn't work.\n[{\"$project\":{\"myarray1\":{\"$concat\":[{\"$literal\":\"$\"},\"myarray\"]}}}]\nthis works,because i already know the name of the field reference before pipeline starts.\n[{\"$project\":{\"myarray1\":\"$myarray\"}}]\n", "text": "HelloThank you for trying to help me,but this isn’t what i want to do.\nI just think that mongoDB doesnt support it yet (refer to a field,when its value its computed during the pipeline)\nI am Clojure programmer and in clojure to reduce an array to a map is so easy.MongoDB doesn’t allow clojure code so one solution is to use the $function operator and javascript code\ni did it using the next command,after i inserted the above 1 document to the my collection {“myarray” …}Worked fine i took the same resultBut i want to do this using mongo query language reduce,without javascript.\nI want array -> reduce -> document (the represent the group) (not use unwind,not use group)Its not that i have a application that really needs it,but i dont know a way to make mongoDB,\nrefer to a field,that its name is found doing the pipeline.For example in javascript code\nif(!acc[key]) meaned that if the calculated key,is not contained in the object(document) acc.\nIn mongoDB you cant do this,refer to a field that you don’t know its value from before is not\npossible(objectToArray is a solution to this,treating the key as value,but can it help here?\nand can work in simple and fast way?)This problem can happen anytime i want to refer to a field,that its name is calculated during\nthe pipepline timeIf the above worked i could also group using reduce only i think.Thank you.", "username": "Takis" }, { "code": "\n\"mygroupedarray\": {\n \"20\": [\n {\n \"name\": \"Max\",\n \"age\": 20\n },\n {\n \"name\": \"Jane\",\n \"age\": 20\n }\n ],\n \"21\": [\n {\n \"name\": \"Alice\",\n \"age\": 21\n }\n ]\n }\n", "text": "Hi @Takis,The query I gave you produce the following output:The other outputs you show are not a valid JSON format therefore MongoDB will never output them like that and your application have to do the adjustments.Playing with arrayToObject and objectToArray is the only way I know to use a value and transform it to a field name…Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Thank you for trying to help me,i think its not possible to group an array only by using reduce in mongoql.\nI will make a new topic,asking about the root of the problem.Thank you alot", "username": "Takis" }, { "code": " db.people.find()\n { \"_id\" : 0, \"myarray\" : [ { \"name\" : \"Alice\", \"age\" : 21 }, { \"name\" : \"Max\", \"age\" : 20 }, { \"name\" : \"Jane\", \"age\" : 20 } ] } \n\n db.people.aggregate({$project:{ ages: {$map:{\n input:{$setUnion:\"$myarray.age\"}, \n as: \"a\", \n in: {\n age: \"$$a\", \n people: { $filter:{ input:\"$myarray\", cond:{$eq:[\"$$a\", \"$$this.age\"]}}}\n }\n }}}})\n { \"_id\" : 0, \"ages\" : [ \n { \"age\" : 20, \"people\" : [ { \"name\" : \"Max\", \"age\" : 20 }, { \"name\" : \"Jane\", \"age\" : 20 } ] }, \n { \"age\" : 21, \"people\" : [ { \"name\" : \"Alice\", \"age\" : 21 } ] } \n ] }\n", "text": "@TakisIt’s definitely possible to do what you are asking about. Starting with a single document with your array of people, here’s what it looks like:", "username": "Asya_Kamsky" }, { "code": "function (objectArray,property) {\n return objectArray.reduce(function (acc, obj) {\n var key = obj[property].valueOf().toString();\n if (!acc[key]) {\n acc[key] = [];\n }\n acc[key].push(obj);\n return acc;\n }, {})\n}\n10 members => js 5x slower\n100 members => js 3x slower\n1000 members => same\n10000 members => js 2x faster\n100000 members => js 3x faster\nput(doc,$$k,$$v) O(1)\ncontains?(doc,$$k) O(1) //in our case was O(n) because no other way\nget(doc,$$k) O(1)\nremove(doc,$$k) O(1)\nkeys(doc)\nvalues(doc)\n.....\n", "text": "HelloThank you for the reply,i know its possible with serial search but its slower than javascript.\nUsing $function operator i wrote a reduce in javascript (with O(1) contains(doc,key)) i got the below results.To do those fast we need document operators,with variables as arguments.I made put with variables using, $arrayToObject and $mergeObjects but its slow.\nSeems that $mergeObjects even if you add 1 key/value its O(n)k,v are variables\n{$mergeObjects doc {$arrayToObject [[k v]}}Its ok i will wait for document operators accepting variables to be added to MQL.\nAlso i think we dont have $push(array,$$v) in aggregation(only in group we have),\nand $concatArrays(array,[$$v]) is slow not O(1).", "username": "Takis" } ]
How to group an array using reduce, without unwind+group?
2020-08-31T17:34:29.839Z
How to group an array using reduce, without unwind+group?
10,526
null
[]
[ { "code": "", "text": "Hi there,can I repeat a lab after putting the wrong answer three times:(BR", "username": "Serkan_Kaya" }, { "code": "", "text": "Hi @Serkan_Kaya,You only get a maximum of three attempts in lab and final exam.~ Shubham", "username": "Shubham_Ranjan" }, { "code": "", "text": "There was a question in Chapter 4 Lab 2: Querying Arrays and Sub-Documents,\nThe question was really basic about “NEW YORK”, but i entered in the query as “New York”, i didn’t expect that there were two different keyword for new york, so i entered the answer as 5 and got it wrong, i just wanted to ask will it affect the final score? Does the certificate has percentage of score on it?", "username": "Pushyaraj_Asnani" }, { "code": "", "text": "Score will not be displayed on the course completion confirmation docIf you have exhausted 3 attempts you will not get any credit\nAll labs will add to final score\nYes it will affect final score but that’s fine as long as you score minimum required to pass the course", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Hi @Pushyaraj_Asnani,Thanks for sharing this. We will update the question to make it clear.And I hope you found @Ramachandra_37567’s response helpful. Let us know if you still have any questions.~ Shubham", "username": "Shubham_Ranjan" }, { "code": "", "text": "", "username": "system" } ]
Repeat a wrongly answered lab
2020-08-25T14:14:07.346Z
Repeat a wrongly answered lab
2,501
null
[ "mongodb-shell" ]
[ { "code": "", "text": "Hi,I was trying out MongoDB Atlas and was lead by the wizard to install the Mongo Shell via the Homebrew script “brew install mongodb/brew/mongodb-community-shell”.May I ask how do I uninstall the MongoDB shell? Is there another shell for uninstalling?I tried to search the internet and MangoDB FAQ but I can hardly find much information about Mango Shell. All the results are more for the DB than the shell!", "username": "Stan_Suen" }, { "code": "brew uninstallbrew installbrew helpbrew help uninstall", "text": "Welcome to the MongoDB community @Stan_Suen!May I ask how do I uninstall the MongoDB shell? Is there another shell for uninstalling?Use brew uninstall instead of brew install. Either of these should work:brew uninstall mongodb/brew/mongodb-community-shellbrew uninstall mongodb-community-shellFor more options see brew help and brew help uninstall.Regards,\nStennie", "username": "Stennie_X" }, { "code": "brew reinstall mongodb-community-shell", "text": "Hi,I actually did the “brew uninstall …” statement first before I reach for help. The reason was that the uninstallation only deleted the download package and did not seems to clean up all the long list of stuff /usr/local/bin or /usr/local/var. So out of curiosity I try to “reinstall” Mongo Shell again with the “brew install” again. Only that the second time brew did not work and came up with aWarning: mongodb/brew/mongodb-community-shell 4.2.0 is already installed and up-to-date\nTo reinstall 4.2.0, run brew reinstall mongodb-community-shellSo I thought the uninstallation process only deleted the download packages but did not do a real uninstall of mongo shell. It’s no big deal to leave mongo shell in my system but I am very curious in how to properly clean it up. Homebrew does a great job in spoiling people like me not to understand how things work underneath. ", "username": "Stan_Suen" } ]
How to uninstall Mongo Shell via homebrew
2020-12-13T05:12:26.398Z
How to uninstall Mongo Shell via homebrew
10,723
null
[ "connecting", "database-tools" ]
[ { "code": "mongod --dbpath /data --logpath /data/mongodb.log --fork\nError parsing command line: unrecognised option '--fork'\ntry 'mongod --help' for more information\n", "text": "I am trying to execute below command to connect MongoDB in Windows OS,Error:I knew the --fork option is only support in Linux OS, Is there any option to use in Windows OS?", "username": "turivishal" }, { "code": "", "text": "You may start mongod as a Windows service.", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
How to use --fork in Windows OS?
2020-12-13T07:53:32.147Z
How to use &ndash;fork in Windows OS?
3,659
null
[ "installation" ]
[ { "code": "Job for mongod.service failed because a timeout was exceeded. See \"systemctl status mongod.service\" and \"journalctl -xe\" for details.systemctl status mongod.service gives me this:\n`● mongod.service - MongoDB Database Server\n Loaded: loaded (/usr/lib/systemd/system/mongod.service; enabled; vendor preset: disabled)\n Active: failed (Result: timeout) since Fri 2020-12-11 04:49:27 UTC; 1min 58s ago\n Docs: https://docs.mongodb.org/manual\n Process: 26266 ExecStart=/usr/bin/mongod $OPTIONS (code=exited, status=0/SUCCESS)\n Process: 26263 ExecStartPre=/usr/bin/chmod 0755 /var/run/mongodb (code=exited, status=0/SUCCESS)\n Process: 26260 ExecStartPre=/usr/bin/chown mongod:mongod /var/run/mongodb (code=exited, status=0/SUCCESS)\n Process: 26257 ExecStartPre=/usr/bin/mkdir -p /var/run/mongodb (code=exited, status=0/SUCCESS)\njournalctl -f _COMM=mongod -- Logs begin at Mon 2020-03-23 05:08:30 UTC. -- Dec 05 02:36:33 chess.gates.com mongod[14579]: child process started successfully, parent exiting Dec 11 03:20:15 chess.gates.com mongod[22034]: about to fork child process, waiting until server is ready for connections. Dec 11 03:20:15 chess.gates.com mongod[22034]: forked process: 22036 Dec 11 03:20:17 chess.gates.com mongod[22034]: child process started successfully, parent exiting Dec 11 03:27:44 chess.gates.com mongod[22405]: about to fork child process, waiting until server is ready for connections. Dec 11 03:27:44 chess.gates.com mongod[22405]: forked process: 22407 Dec 11 03:27:45 chess.gates.com mongod[22405]: child process started successfully, parent exiting Dec 11 04:47:57 chess.gates.com mongod[26266]: about to fork child process, waiting until server is ready for connections. Dec 11 04:47:57 chess.gates.com mongod[26266]: forked process: 26268 Dec 11 04:47:58 chess.gates.com mongod[26266]: child process started successfully, parent exiting# mongod.conf\n\n# for documentation of all options, see:\n# http://docs.mongodb.org/manual/reference/configuration-options/\n\n# where to write logging data.\nsystemLog:\n destination: file\n logAppend: true\n path: /var/log/mongodb/mongod.log\n\n# Where and how to store data.\nstorage:\n dbPath: /var/lib/mongo\n journal:\n enabled: true\n# engine:\n# wiredTiger:\n\n# how the process runs\nprocessManagement:\n fork: true # fork and run in background\n pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile\n timeZoneInfo: /usr/share/zoneinfo\n\n# network interfaces\nnet:\n port: 27017\n bindIp: 127.0.0.1 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.\n\n#security:\n#operationProfiling:\n#replication:\n#sharding:\n## Enterprise-Only Options\n#auditLog:\n#snmp:\n{\"t\":{\"$date\":\"2020-12-11T04:47:58.316+00:00\"},\"s\":\"W\", \"c\":\"FTDC\", \"id\":23913, \"ctx\":\"initandlisten\",\"msg\":\"Error checking directory '{sysBlockPathStr}': {ec_message}\",\"attr\":{\"sysBlockPathStr\":\"/sys/block\",\"ec_message\":\"No such file or directory\"}}mkdir: cannot create directory ‘block’: No such file or directory", "text": "Hey everyone, I’m not really much of a MongoDB user, but I have a web application, part of which relies on Mongo, that I’m trying to install. I have a CentOS 7 server running WHM/cPanel and I’ve followed the setup instructions and it all seems to be installed, but when I try to start the service I get Job for mongod.service failed because a timeout was exceeded. See \"systemctl status mongod.service\" and \"journalctl -xe\" for details.Dec 11 04:47:57 chess.gates.com mongod[26266]: about to fork child process, waiting until server is ready for connections.\nDec 11 04:47:57 chess.gates.com mongod[26266]: forked process: 26268\nDec 11 04:47:58 chess.gates.com mongod[26266]: child process started successfully, parent exiting`and journalctl:journalctl -f _COMM=mongod -- Logs begin at Mon 2020-03-23 05:08:30 UTC. -- Dec 05 02:36:33 chess.gates.com mongod[14579]: child process started successfully, parent exiting Dec 11 03:20:15 chess.gates.com mongod[22034]: about to fork child process, waiting until server is ready for connections. Dec 11 03:20:15 chess.gates.com mongod[22034]: forked process: 22036 Dec 11 03:20:17 chess.gates.com mongod[22034]: child process started successfully, parent exiting Dec 11 03:27:44 chess.gates.com mongod[22405]: about to fork child process, waiting until server is ready for connections. Dec 11 03:27:44 chess.gates.com mongod[22405]: forked process: 22407 Dec 11 03:27:45 chess.gates.com mongod[22405]: child process started successfully, parent exiting Dec 11 04:47:57 chess.gates.com mongod[26266]: about to fork child process, waiting until server is ready for connections. Dec 11 04:47:57 chess.gates.com mongod[26266]: forked process: 26268 Dec 11 04:47:58 chess.gates.com mongod[26266]: child process started successfully, parent exitingMy mongod.conf is just the default:mongod.logThis bit seems relevant:{\"t\":{\"$date\":\"2020-12-11T04:47:58.316+00:00\"},\"s\":\"W\", \"c\":\"FTDC\", \"id\":23913, \"ctx\":\"initandlisten\",\"msg\":\"Error checking directory '{sysBlockPathStr}': {ec_message}\",\"attr\":{\"sysBlockPathStr\":\"/sys/block\",\"ec_message\":\"No such file or directory\"}}And it’s right, there’s no /sys/block folder on my server and it won’t let me create it either (mkdir: cannot create directory ‘block’: No such file or directory), and I’m not sure what the function of this folder is meant to be or why it’s not there.But the bit that actually stops it running seems to be this:{“t”:{“$date”:“2020-12-11T04:49:27.287+00:00”},“s”:“I”, “c”:“CONTROL”, “id”:23377, “ctx”:“SignalHandler”,“msg”:“Received signal”,“attr”:{“signal”:15,“error”:“Terminated”}}\n{“t”:{“$date”:“2020-12-11T04:49:27.287+00:00”},“s”:“I”, “c”:“CONTROL”, “id”:23378, “ctx”:“SignalHandler”,“msg”:“Signal was sent by kill(2)”,“attr”:{“pid”:1,“uid”:0}}\n{“t”:{“$date”:“2020-12-11T04:49:27.287+00:00”},“s”:“I”, “c”:“CONTROL”, “id”:23381, “ctx”:“SignalHandler”,“msg”:“will terminate after current cmd ends”}`I don’t know what this could mean other than the system is terminating mongo for some reason. This is where I could use some help: where do I start in figuring out what’s killing it? Has anyone successfully run mongo in a similar environment before with any success?Sorry for the long message and general lack of knowledge, and thanks in advance to anyone who can help me", "username": "Alex_Baker" }, { "code": "mongodmongosdiagnosticDataCollectionEnabled: falsesetParametersetParameter:\n diagnosticDataCollectionEnabled: false\n", "text": "It is very weird that your Centos7 system would not have a /sys/block/ directory. This is something you should look in to.The Full Time Diagnostic Data Capture (FTDC) requires access to record block device performance.I do not recommend doing the following but rather refer you to my first comment. However FTDC can be disabled.To disable FTDC, start up the mongod or mongos with the diagnosticDataCollectionEnabled: false option specified to the setParameter setting in your configuration file:", "username": "chris" }, { "code": "{\"t\":{\"$date\":\"2020-12-13T16:15:43.212+00:00\"},\"s\":\"W\", \"c\":\"FTDC\", \"id\":23913, \"ctx\":\"initandlisten\",\"msg\":\"Error checking directory '{sysBlockPathStr}': {ec_message}\",\"attr\":{\"sysBlockPathStr\":\"/sys/block\",\"ec_message\":\"No such file or directory\"}} {\"t\":{\"$date\":\"2020-12-13T16:15:43.212+00:00\"},\"s\":\"I\", \"c\":\"FTDC\", \"id\":20625, \"ctx\":\"initandlisten\",\"msg\":\"Initializingfull-time diagnostic data capture\",\"attr\":{\"dataDirectory\":\"/var/lib/mongo/diagnostic.data\"}}", "text": "Thanks for the reply. Okay, I didn’t realise how strange that was (didn’t know what /sys/block was for until this issue) but I’ll contact my hosting provider (GoDaddy vps) and see if they have any idea.I tried adding that parameter to my mongod.conf, but it didn’t change anything? I still have the same error, and in my log it still says{\"t\":{\"$date\":\"2020-12-13T16:15:43.212+00:00\"},\"s\":\"W\", \"c\":\"FTDC\", \"id\":23913, \"ctx\":\"initandlisten\",\"msg\":\"Error checking directory '{sysBlockPathStr}': {ec_message}\",\"attr\":{\"sysBlockPathStr\":\"/sys/block\",\"ec_message\":\"No such file or directory\"}} {\"t\":{\"$date\":\"2020-12-13T16:15:43.212+00:00\"},\"s\":\"I\", \"c\":\"FTDC\", \"id\":20625, \"ctx\":\"initandlisten\",\"msg\":\"Initializingfull-time diagnostic data capture\",\"attr\":{\"dataDirectory\":\"/var/lib/mongo/diagnostic.data\"}}Is there something else I need to do to get it to recognise that I’ve changed something in the config or something like that?Thanks", "username": "Alex_Baker" }, { "code": "", "text": "Actually something I noticed: I have /sys/dev/block but not /sys/block. I don’t really understand the relationship between these two and don’t know if this changes anything?", "username": "Alex_Baker" }, { "code": "Type=forking[service]rpmverify mongodb-org-serveryum reinstall mongodb-org-server", "text": "I was expecting FTDC not to run with that parameter. But I guess I am wrong.Anyway as you rightly point out mongod is being killed.Did you install mongodb using the package manager or tarball ?Looking at the provided default configuration for mongodb on centos7 by the packages mongod is setup as forking. Your logs show this is happening on the mongod side but it might be that some changes occurred to the systemd unit. The timeout seems like the Type=forking is missing from the [service] section.If you run rpmverify mongodb-org-server the only thing output should be your /etc/mongod.conf file(if it has been edited). A yum reinstall mongodb-org-server would resolve any unexpected deviations.", "username": "chris" } ]
Need help getting MongoDB working on CentOS 7: 'Job for mongod.service failed because a timeout was exceeded.'
2020-12-11T05:43:04.403Z
Need help getting MongoDB working on CentOS 7: &lsquo;Job for mongod.service failed because a timeout was exceeded.&rsquo;
14,127
https://www.mongodb.com/…dcdb93724a1f.png
[]
[ { "code": "", "text": "Hi, good day to you all.I am currently having a hard time restoring the MongoDB database. Somehow, I ruined the MongoDB server and it no longer opens the collections & documents data. I currently have a copy of MongoDB database data which was downloaded from my server. Here is the file structure. Could anyone can help me so I can open these collections & documents on my local MongoDB server?250×520 4.75 KB", "username": "Axiom_88" }, { "code": "", "text": "If that is complete, you can start a new mongod with this location(or a copy) as the data directory.", "username": "chris" } ]
Recovering Mongodb collections and documents data
2020-12-13T19:19:40.133Z
Recovering Mongodb collections and documents data
1,320
https://www.mongodb.com/…0_2_1024x882.png
[ "compass" ]
[ { "code": "", "text": "I was able to connect properly before (a week before) using Compass on the mongoDB database hosted in a private network.I would need to ssh first to an instance and then use mongo 192.168.155.153 if I was doing this normally, which works (on my Python code, on CMD and on node.js)For some reason, within this week, it doesn’t work anymore and I would either get a connection timed out error or an address in use error.I’m not entirely sure what’s going wrong, or if I should go back to a previous update.Screen Shot 2020-12-11 at 1.27.46 AM1234×1064 47.6 KB\nScreen Shot 2020-12-11 at 1.27.56 AM1228×1194 53.2 KBAnything wrong? Should I revert to older MongoDB Compass?", "username": "Seaver_Choy" }, { "code": "", "text": "I just post the same issue.\nI think it have something to do with the auto topology discovery", "username": "Ran_Schindler" }, { "code": "", "text": "Same issue reported on mongodb Jira\nhttps://jira.mongodb.org/projects/COMPASS/issues/COMPASS-4534?filter=allopenissues", "username": "Ran_Schindler" } ]
Compass fails to connect through ssh tunnel after update to 1.24.1
2020-12-10T19:53:54.868Z
Compass fails to connect through ssh tunnel after update to 1.24.1
8,307
null
[ "compass" ]
[ { "code": "", "text": "While creating new connection on Mongo Compass 1.24.1 the UI try to discover the entire replica set topology and connect to the primary/secondary IP.Is there a way to create a DIRECT connection to an HOST/IP+Port just like the clients api and disable the topology discovery step?From pymongo documentation: directConnection (optional): if True, forces this client to connect directly to the specified MongoDB host as a standalone . If false, the client connects to the entire replica set of which the given MongoDB host(s) is a part", "username": "Ran_Schindler" }, { "code": "", "text": "Same issue reported on mongodb Jira\nhttps://jira.mongodb.org/projects/COMPASS/issues/COMPASS-4534?filter=allopenissues", "username": "Ran_Schindler" } ]
Mongo Compass 1.24.1 force replicaset topology discovery on connection
2020-12-10T19:54:26.178Z
Mongo Compass 1.24.1 force replicaset topology discovery on connection
2,172
null
[ "aggregation", "performance" ]
[ { "code": "", "text": "HelloIs there somewhere information of the Complexity of array/set/object operators? or how they work internally?I am doing some benchmarks and i get some not expected results.For example i have an array of integers and i reduce that array to itself\ndoing nothing just re-construct the array using reduce and $concatArrays\nEverytime i do {$concatArrays $$newArray [anumber] }\nThought it would be similar with Java’s ArrayList that is O(1) with dynamic array but its not.\nThere is no $push(works only in $group the one we have) in aggregation framework so i used $concatArraysAdding one member each time is so slow,unusable for large arrays\n($concatArrays is super fast if arrays are big,but one by one adding is so slow)\nFor 200000 members it takes 240sec\nFor 100000 members it takes 23sec\nFor 50000 members it takes 6 sec\nFor 10000 members 168.411066 msecHow to add an element to an array to the end or in specific location?\nIs it possible without $concatArrays which is slow for repeated calls?Also mongoDB offers set operators without sets but on arrays,\nEverytime i do one set operation i should think it as array->set->array\nIf i have a reduce that does set operations on an array at every step all array will become set\nand then back to array? Or will remain set(internally)?\nIf its not kept internally as set it will be very slow to repeat many set operations on an array.Thank you", "username": "Takis" }, { "code": "", "text": "The one thing that I suspect is that by growing the array it has to be moved to other blocks on disk over and over again.Thought it would be similar with Java’s ArrayList that is O(1) with dynamic array but its not.Order 1 is true when the array slots are allocated ahead of time. Otherwise the same issue about moving the array to other blocks occurs. But moving blocks in RAM is not as perceptible to us human, but disk is.I think that allocating the array ahead of time should work for MongoDB too.", "username": "steevej" }, { "code": "", "text": "Maybe for each call of $concatArrays there is new array allocation\nI thought it would be like dynamic array,add with O(1) and when allocate ,allocate extra space.\nBut even if i preallocate i cant add at specific position of an array,what aggregation operator\nto use?I only know $concatArrays ($push is only for groups)In general its useful to know the complexity of array/set/object operators,and how they work internally if possible.", "username": "Takis" }, { "code": "{ $set : { \"Path.To.Array.Index\" : ValueOrObject } }\n", "text": "i cant add at specific position of an array,I cannot test right now, but I am pretty and update withshould work.In general its useful to know the complexity of array/set/object operators,and how they work internally if possible.Agree. Hopefully someone with that knowledge can shed a light on the topic.", "username": "steevej" }, { "code": "{ $set : { \"Path.To.Array.Index\" : ValueOrObject } }", "text": "{ $set : { \"Path.To.Array.Index\" : ValueOrObject } }I dont think this works on aggregation at aggregation $arrayElemAt only works i think and only to read not to set.Also the index will come from a variable if reducing.\nIf we find a way to add to array at end or at specific position without $concatArrays , would be helpful", "username": "Takis" } ]
Aggregation operators Time Complexity
2020-12-11T04:14:07.191Z
Aggregation operators Time Complexity
2,903
null
[ "connecting", "database-tools" ]
[ { "code": "", "text": "Trying to connect to my cloud server using mongoimport but its throwing an error \" error connecting to host: could not connect to server: connection() : auth error: sasl conversation error: unable to authenticate using mechanism “SCRAM-SHA-1”: (AtlasError) bad auth : Authentication failed. \" seem to be backend error please help", "username": "hue_man_N_A" }, { "code": "", "text": "Make sure you have whitelisted your own IP on Atlas under Network Access.\n", "username": "Jack_Woehr" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Importing data to mongod instance using mongoimport
2020-12-12T21:37:47.720Z
Importing data to mongod instance using mongoimport
7,202
null
[ "connector-for-bi" ]
[ { "code": "", "text": "Hello,I am trying to setup up MongoDB Atlas with Power Bi. However, I am not able to create a DSN. I have followed the instructions at - https://docs.mongodb.com/bi-connector/master/tutorial/create-system-dsn.After adding the password and the name of database that I am trying to connect to, I click test only to see the message:\nConnection Failed\n[MongoDB][ODBC 1.0(w) Driver] SSL connection error: protocol version mismatch.The database user I am trying to connect with has password based access (that works fine else where, eg. with NodeJS driver). I have added 0.0.0.0/0 in the network access section to enable access from anywhere. Any clues on how to resolve this will be highly appreciated.Thanks", "username": "Sagar_Setu" }, { "code": "", "text": "Updating to MongoDB ODBC Driver version 1.4.0 fixed this.", "username": "Sagar_Setu" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Issue with DSN configuration for BI
2020-12-12T19:30:28.638Z
Issue with DSN configuration for BI
3,859
null
[ "compass", "connecting" ]
[ { "code": "getaddrinfo ENOTFOUND 1ec7f415e93fmongodb://10.xxx.xxx.xxx:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false", "text": "My Compass for macOS has been running for days and when I just got an update prompt I let it install its update. The first time around the icons in the app were all just squares as if an icon font was missing. It asked me again to install an update, which I did. It didn’t automatically restart, by the way. I started it manually, I can see I am now on version 1.24.1, but I can no longer connect to my MongoDB server running on a different computer as Docker container on the local network. My Node.js app running on my Mac can connect to it without issues, only Compass is struggling.When I click the “connect” button, it shows the green loading bar at the top and after a few seconds it displays a red error box: getaddrinfo ENOTFOUND 1ec7f415e93fMy connection string: mongodb://10.xxx.xxx.xxx:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=falseConnecting to Atlas works.", "username": "Nick" }, { "code": "{\"t\":{\"$date\":\"2020-12-10T13:15:30.537+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"10.10.10.2:60247\",\"connectionId\":887,\"connectionCount\":6}}\n{\"t\":{\"$date\":\"2020-12-10T13:15:30.541+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn887\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"10.10.10.2:60247\",\"client\":\"conn887\",\"doc\":{\"driver\":{\"name\":\"nodejs\",\"version\":\"3.6.3\"},\"os\":{\"type\":\"Darwin\",\"name\":\"darwin\",\"architecture\":\"x64\",\"version\":\"20.1.0\"},\"platform\":\"'Node.js v12.4.0, LE (unified)\",\"application\":{\"name\":\"MongoDB Compass\"}}}}\n{\"t\":{\"$date\":\"2020-12-10T13:15:30.548+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn887\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"10.10.10.2:60247\",\"connectionId\":887,\"connectionCount\":5}}\n", "text": "Additional information, not sure if it’s useful or not… The Docker container is logging this when I try to connect through Compass:", "username": "Nick" }, { "code": "", "text": "How do you start the container? Can you share the command you run or the docker/docker-compose file?", "username": "Massimiliano_Marcon" }, { "code": "docker run --name my-mongodb --restart unless-stopped -d -p 27017:27017 mongo:4 --replSet rs0\ndocker run -it --rm --link my-mongodb:mongo mongo:4 mongo --host mongo --eval \"rs.initiate()\"\nmongo mongodb://10.x.x.x:27017", "text": "Two commands:PS: Connecting with MongoDB shell version v4.2.0 from Mac to Linux/Docker also works without issues: mongo mongodb://10.x.x.x:27017", "username": "Nick" }, { "code": "", "text": "I can reproduce the problem. Let us look into it.", "username": "Massimiliano_Marcon" }, { "code": "docker run -it --rm --link my-mongodb:mongo mongo:4 mongo --host mongo --eval \"rs.initiate({_id: 'rs0', members: [{_id:1, host:'10.x.x.x:27017'}]})\"\n10.x.x.x", "text": "Ok, I think I know what the problem is.When you initialize the replica set with the default settings, because you are inside docker, the replica set member uses the docker host name, which is unreachable from outside docker, and the latest version of the node driver that Compass uses doesn’t like that.To work around that, you can initialize the replica set like this:where 10.x.x.x is the IP you use when connecting to it from Compass.That should work.", "username": "Massimiliano_Marcon" }, { "code": "", "text": "Awesome – it worked! Thank you so much for providing a solution so quickly! ", "username": "Nick" }, { "code": "", "text": "I’m having the same issue and would like to know when the fix will be added to a future release?", "username": "laurence_brazeau" }, { "code": " {\n \"_id\" : 0,\n \"host\" : \"localhost:27017\",\n \"arbiterOnly\" : false,\n \"buildIndexes\" : true,\n \"hidden\" : false,\n \"priority\" : 1,\n \"tags\" : {\n\n },\n \"slaveDelay\" : NumberLong(0),\n \"votes\" : 1\n }", "text": "Hello! The right config should use localhost:27017. I was able to connect via SSH until I updated to 1.24.1.\nNow I see:Could you tell me how to fix this issue?", "username": "Mihail_N_A" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Can't connect after upgrading to Compass 1.24.1
2020-12-10T13:07:28.886Z
Can&rsquo;t connect after upgrading to Compass 1.24.1
8,879
null
[]
[ { "code": "", "text": "Within my account I have 2 Projects each with their own single cluster. One of the Clusters (ClusterA) is an M10 cluster in AWS Region eu-west-2. The other Cluster (ClusterB) is an M0 in AWS Region ue-west-1.I want to establish a PrivateLink to each of these Clusters. I have a working link to ClusterA but even though the Private Endpoint to ClusterB states that it is Available, the Connect options have the ‘Private Endpoint’ option ‘greyed’ out.Is it not possible to establish a PrivateLink to an M0 cluster or is this a problem with working in multiple Regions?", "username": "Chris_Hills" }, { "code": "M0M2M5", "text": "I think it is not available in Free Tier M0NOTEFeature unavailable in Free and Shared-Tier ClustersThis feature is not available for M0 (Free Tier), M2 , and M5 clusters. To learn more about which features are unavailable, see Atlas M0 (Free Tier), M2, and M5 Limitations.", "username": "Ramachandra_Tummala" }, { "code": "", "text": "Thanks for the links, it does appear to be pretty clear that this is not available with M0", "username": "Chris_Hills" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Unable to make a Private EndPoint Connection to an M0 Cluster
2020-12-12T14:42:19.845Z
Unable to make a Private EndPoint Connection to an M0 Cluster
2,649
null
[ "queries" ]
[ { "code": " {\n \"_id\":{\"$oid\":\"5fa4386f3a93dc470c920d76\"},\n \"characters\":[\n \t{\"$oid\":\"5fa4389e3a93dc470c920d78\"},\n \t{\"$oid\":\"5fa4389e3a88888888888888\"},\n \t{\"$oid\":\"5fa4389e3a88888888888888\"},\n \t{\"$oid\":\"5fa4389e3a88888888888888\"}\n \t]\n }\n User.findByIdAndUpdate(\"5fa4386f3a93dc470c920d76\", { $pull: { characters: \"5fa4389e3a88888888888888\" } }, (err) => {});", "text": "Hello,I tryied to remove only one row of object id 5fa4389e3a88888888888888 in my characters array.\nBy doing… User.findByIdAndUpdate(\"5fa4386f3a93dc470c920d76\", { $pull: { characters: \"5fa4389e3a88888888888888\" } }, (err) => {});The problem is that every row with “5fa4389e3a88888888888888” value are removed.\nAnd I’d just like to pull 1.Is it possible ?Thanks for helping,", "username": "Julien_LeGa" }, { "code": "{ \"_id\" : 1, \"characters\" : [ \"9999\", \"8888\", \"5555\", \"8888\" ] }\"8888\"db.collection.updateOne( \n { _id: 1 },\n [ \n { $set: { \n characters: {\n $let: {\n vars: { ix: { $indexOfArray: [ \"$characters\", \"8888\" ] } },\n in: { $concatArrays: [\n { $slice: [ \"$characters\", 0, \"$$ix\"] },\n [ ],\n { $slice: [ \"$characters\", { $add: [ 1, \"$$ix\" ] }, { $size: \"$characters\" } ] }\n ]\n }\n }\n }\n }}\n] )", "text": "Hello @Julien_LeGa, welcome to the MongoDB Community forum.Here is an example about how to update / pull the first matching element from the array.Consider a document: { \"_id\" : 1, \"characters\" : [ \"9999\", \"8888\", \"5555\", \"8888\" ] }You want to remove the first matching \"8888\". Use the following aggregation update (requires MongoDB v4.2 or greater):", "username": "Prasad_Saya" }, { "code": "User.findByIdAndUpdate(\"5fa4386f3a93dc470c920d76\",\n[\n {\n $set: {\n characters: {\n $function: {\n body: function(characters) {\n for (var i=0; i<characters.length; i++) {\n if (characters[i] == \"5fa4389e3a88888888888888\") {\n delete characters[i];\n break;\n }\n }\n return characters;\n },\n args: [\"$characters\"],\n lang: \"js\"\n }\n }\n }\n }\n])\n", "text": "I respect answer by @Prasad_Saya, I am just adding the another option Starting from MongoDB v4.4,You can use update with aggregation pipeline, and use the $function operator to define custom functions to implement behavior not supported by the MongoDB Query Language.", "username": "turivishal" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Pull only one item in an array of instance in MongoDB?
2020-12-11T23:47:36.205Z
Pull only one item in an array of instance in MongoDB?
6,348
https://www.mongodb.com/…4_2_1024x512.png
[ "queries" ]
[ { "code": "", "text": "As outlined in this document:As the references are stored in a array, it makes sense to preserve the order.", "username": "Adriano_Tirloni" }, { "code": "", "text": "Hi @Adriano_Tirloni,Welcome to MongoDB communityMongoDB only gurantee order of documents based on a sort operation. Therefore, even if some queries will return the order you need it won’t be guaranteed for every query.Thanks\nPavel", "username": "Pavel_Duchovny" } ]
Does the $in operator preserves the order of the array?
2020-12-11T20:46:54.765Z
Does the $in operator preserves the order of the array?
3,908
null
[]
[ { "code": "db.x.find( { path: { $gt: \"/1\"} } ).sort({ path:1 })\n\n// x collection field path type string:\n/1607721617028\n/1607721618266\n/1607721619028\n", "text": "Why do i get empty result then compare strings with slash?", "username": "alexov_inbox" }, { "code": "$gt$regex", "text": "I don’t think $gt works that way. You might want to use $regex instead or do an aggregation that strips the leading slash and then do a numeric comparison.", "username": "Jack_Woehr" }, { "code": "", "text": "", "username": "alexov_inbox" }, { "code": "> db.test.insertOne( { path : \"/1607721617028\" } )\n{\n\t\"acknowledged\" : true,\n\t\"insertedId\" : ObjectId(\"5fd41c6a3be099655d525122\")\n}\n> db.test.insertOne( { path : \"/1607721618266\" } )\n{\n\t\"acknowledged\" : true,\n\t\"insertedId\" : ObjectId(\"5fd41c913be099655d525123\")\n}\n> db.test.find( { path: { $gt: \"/1\"} } )\n{ \"_id\" : ObjectId(\"5fd41c6a3be099655d525122\"), \"path\" : \"/1607721617028\" }\n{ \"_id\" : ObjectId(\"5fd41c913be099655d525123\"), \"path\" : \"/1607721618266\" }\n", "text": "When I use your sample data and your query I have the expected result.There is not reason why I have output and you don’t. I suspect that there is something you are not sharing that point us to the wrong direction.", "username": "steevej" } ]
Mongodb string comparison with slash
2020-12-11T22:56:28.790Z
Mongodb string comparison with slash
5,179
null
[ "performance", "configuration" ]
[ { "code": "", "text": "Hello,Recently we upgraded our MongoDB cluster from 3.4 to 3.6. It is sharded PSA ( Priimary, Secondary, Arbitor) setup with an extra hidden secondary for backups.After about 2 months of running we started to get secondaries ( including the hidden secondary ) start to fail with out of file descriptors. A simple lsof on the process shows it is at its max of 64K.When restarting the secondary nodes with high number of file descriptors the logs are getting filled with messages of closing the map reduce temporary tables like2020-12-04T18:55:49.537+0000 I REPL [signalProcessingThread] Completing collection drop for customerwtbtestcustomerorgid86.system.drop.1607080266i359t-1.tmp.mr.masters_28783 with drop optime { ts: Timestamp(1607080266, 359), t: -1 } (notification optime: { ts: Timestamp(4294967295, 4294967295), t: 9223372036854775807 })2020-12-04T18:55:49.538+0000 I STORAGE [signalProcessingThread] Finishing collection drop for customerwtbtestcustomerorgid86.system.drop.1607080266i359t-1.tmp.mr.masters_28783 (no UUID).Checking the optime timstamp they go back the 2 months since the upgrade.We are able to monitor the leakage of file descriptors on our replicas including the hidden secondaries with this this metric\ndb.serverStatus().wiredTiger.connection. on the Files Currently Open", "username": "Jonathan_Stairs" }, { "code": "", "text": "Do you use XFS file system or EXT4?Check this out:", "username": "jellyx" }, { "code": "", "text": "Thank you for the response, we are using XFS. I will check out the link.", "username": "Jonathan_Stairs" }, { "code": "", "text": "Hi @Jonathan_Stairs, welcome to the community!This may be a long shot, but there was an issue where a sharded map-reduce fail to clean up temporary collections in some cases (SERVER-36966). According to the ticket, this was fixed in MongoDB 4.0.5. Is it possible for you to upgrade to at least the latest release in the 4.0 series (4.0.21) and see if the issue persists? Note that MongoDB 3.6 series will be out of support soon (April 2021).Best regards,\nKevin", "username": "kevinadi" }, { "code": "", "text": "Hello Kevin,Thank you for the response, we are planning the next upgrade to 4.0 in February will have to watch for this fix then.", "username": "Jonathan_Stairs" } ]
Mongo 3.6 WiredTiger with Map Reduce High Number of Open Files on Replica Sets
2020-12-08T21:05:40.073Z
Mongo 3.6 WiredTiger with Map Reduce High Number of Open Files on Replica Sets
2,317
null
[ "atlas-device-sync" ]
[ { "code": "", "text": "Hi,I’m in the quest of building a small application in React Native. Imagine it’s a book in the form of an application, and I’ve been questioning myself if the chosen solution is the right way to build it.The app must work offline, meaning when you first install the app, you already have all the content of the book. Then when you come online, it should check for updates and new content from API serverMy questions are:Local storage : I’m leaning more into using Realm. Would it be the best/easiest solution? Is handling data sync with Realm in React Native straight forward?Prepopulate data : my initial idea is to prepopulate data is to ship the app with a bunch of JSON files (~100 files in my case), then read and insert all data from JSON files to Realm after installing the app. And content will be read from Realm afterward.Cache images from server for offline used:\nseems to be the one? Does anyone have experience with using it?I keep wondering if my solution is right? or there is a better way to handle prepopulating data?Hope you can help to point me out the direction with some pieces of adviceMany thanks in advance,", "username": "Winfield_Kohler" }, { "code": "", "text": "@Winfield_Kohler Welcome! Realm Sync seems to be what you are looking for, once the data is downloaded the user can go offline, modify it if they have permission to, come back online and push their changes up and get any queued changes from the server-side.I’d encourage you to take a look at our tutorial here -If you want to use sync though, I would not bundle a realm with the app installation, instead I would download the data on first app launch. Presumably, when people first install the app they then open it, in which case they would have a connection and be able to download the data.You cannot bundle a synced realm with app installed from the App Store. This is because the Server and Client need to exchange a variety of metadata in order to keep their histories straight and for sync to work.We typically do not recommend caching images using Realm Sync as the operation log which Realm Sync depends on will start to really slow down when binary data is transferred. Instead, store them on disk and just store the reference to the path in Realm.Hope this helps\n-Ian", "username": "Ian_Ward" } ]
How would you build an offline app which occasionally synced with the server
2020-12-03T06:59:37.503Z
How would you build an offline app which occasionally synced with the server
2,512
null
[ "golang" ]
[ { "code": "", "text": "Unable to use mtools commands inside go script.\n\"“exit status 2\"”", "username": "Aayushi_Mangal" }, { "code": "exec", "text": "Hi @Aayushi_Mangal,Can you provide more information on what you are trying to do, including a Go code snippet and relevant software versions?I suspect you may be referring to using a Go package like exec to invoke one of the mtools Python scripts.With a better understanding of what you are trying to achieve, there may be an alternative approach to recommend.Thanks,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Hello @Stennie_X,With a better understanding of what you are trying to achieve, there may be an alternative approach to recommend.We wanted to execute mtools like “mloginfo logfilename.log” from the go script, is that possible?We tried like this:\nout, err = exec.Command(“mloginfo /path/to_file/mongo.log”).Output()output will be:\nfork/exec /usr/local/go/bin/mloginfo /path/to_file/mongo.log.log: no such file or directory", "username": "Aayushi_Mangal" }, { "code": "", "text": "Hi @Stennie_X,With respect to @Aayushi_Mangal’s reply above, when trying like this\nout, err = exec.Command(\"/bin/bash\",\"-c\",“mloginfo /path/to_file/mongo.log”).Output()we are getting “exit status 2” as error.\nPlease suggest if any alternate approach for running mtool commands from golang.", "username": "Pankaj_Gupta" }, { "code": "exec.Command()", "text": "We tried like this:\nout, err = exec.Command(“mloginfo /path/to_file/mongo.log”).Output()Hi @Aayushi_Mangal,I suspect the issue is that you need to pass arguments like the log file path separately to exec.Command(). The first parameter should only be the name of the command to execute: exec package - os/exec - Go Packages.Regards,\nStennie", "username": "Stennie_X" }, { "code": "", "text": "Hi @Stennie_XWe tried passing arguments to exec.Command in all ways but we are still getting “exit status 2” while trying to run any mtools command.\nPlease suggest if any alternate approach for running mtool commands from golang.Thanks,\nPankaj", "username": "Pankaj_Gupta" }, { "code": "", "text": "Verify existence and write permission of directory /path/to_file/.I am really surprised that you pass /path/to_file/mongo.log yet the error message specifies /path/to_file/mongo.log.log.", "username": "steevej" }, { "code": "", "text": "I am really surprised that you pass /path/to_file/mongo.log yet the error message specifies /path/to_file/mongo.log.log .That is typo, it is path to file, I mentioned that manually.\nIn short we would like to run mtools utility using golang. please provide any working sample code snipet if you could that will be helpful.", "username": "Aayushi_Mangal" }, { "code": "exec.Command( \"mloginfo\" , \"/path/to_file/mongo.log\" )\n", "text": "That is typo, it is path to file, I mentioned that manually.Does it means that the code snippet and the error message you shared is not exactly the one you got?It is hard to help if what you shared is something you manually typed rather that a real cut-n-paste from real code and real error message. Because we are getting side tracked from the real issue.I just went and look at the link providedThe first parameter should only be the name of the command to execute: exec package - os/exec - Go Packages.and indeed your usage is wrong. Try", "username": "steevej" }, { "code": "", "text": "Hi @steevej,The issue is real, only file and path is changed due to some data security policy.\nIt would be great help if we get help to execute mtools from golang.\nWe checked for the file permission also.", "username": "Aayushi_Mangal" }, { "code": "", "text": "I just edited my previous post with some information.", "username": "steevej" } ]
Mtools from go script is not working
2020-12-08T09:42:15.600Z
Mtools from go script is not working
4,488
null
[ "replication", "containers", "devops" ]
[ { "code": "{\"t\":{\"$date\":\"2020-12-10T19:02:25.896+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23285, \"ctx\":\"main\",\"msg\":\"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.898+00:00\"},\"s\":\"W\", \"c\":\"ASIO\", \"id\":22601, \"ctx\":\"main\",\"msg\":\"No TransportLayer configured during NetworkInterface startup\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.898+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4648601, \"ctx\":\"main\",\"msg\":\"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize.\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.898+00:00\"},\"s\":\"W\", \"c\":\"ASIO\", \"id\":22601, \"ctx\":\"main\",\"msg\":\"No TransportLayer configured during NetworkInterface startup\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.898+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4615611, \"ctx\":\"initandlisten\",\"msg\":\"MongoDB starting\",\"attr\":{\"pid\":1,\"port\":27032,\"dbPath\":\"/data/db\",\"architecture\":\"64-bit\",\"host\":\"f74b3b84f2e8\"}}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.898+00:00\"},\"s\":\"W\", \"c\":\"CONTROL\", \"id\":20720, \"ctx\":\"initandlisten\",\"msg\":\"Available memory is less than system memory\",\"attr\":{\"availableMemSizeMB\":512,\"systemMemSizeMB\":3933}}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.898+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23403, \"ctx\":\"initandlisten\",\"msg\":\"Build Info\",\"attr\":{\"buildInfo\":{\"version\":\"4.4.2\",\"gitVersion\":\"15e73dc5738d2278b688f8929aee605fe4279b0e\",\"openSSLVersion\":\"OpenSSL 1.1.1 11 Sep 2018\",\"modules\":[],\"allocator\":\"tcmalloc\",\"environment\":{\"distmod\":\"ubuntu1804\",\"distarch\":\"x86_64\",\"target_arch\":\"x86_64\"}}}}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.898+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":51765, \"ctx\":\"initandlisten\",\"msg\":\"Operating System\",\"attr\":{\"os\":{\"name\":\"Ubuntu\",\"version\":\"18.04\"}}}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.898+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":21951, \"ctx\":\"initandlisten\",\"msg\":\"Options set by command line\",\"attr\":{\"options\":{\"net\":{\"bindIp\":\"*\",\"port\":27032},\"replication\":{\"enableMajorityReadConcern\":false,\"replSet\":\"jm-replica\"},\"storage\":{\"dbPath\":\"/data/db\",\"journal\":{\"enabled\":true}}}}}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"E\", \"c\":\"STORAGE\", \"id\":20557, \"ctx\":\"initandlisten\",\"msg\":\"DBException in initAndListen, terminating\",\"attr\":{\"error\":\"DBPathInUse: Unable to lock the lock file: /data/db/mongod.lock (Resource temporarily unavailable). Another mongod instance is already running on the /data/db directory\"}}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":4784900, \"ctx\":\"initandlisten\",\"msg\":\"Stepping down the ReplicationCoordinator for shutdown\",\"attr\":{\"waitTimeMillis\":10000}}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":4784901, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the MirrorMaestro\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"SHARDING\", \"id\":4784902, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the WaitForMajorityService\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":20562, \"ctx\":\"initandlisten\",\"msg\":\"Shutdown: going to close listening sockets\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4784905, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the global connection pool\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4784906, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the FlowControlTicketholder\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"-\", \"id\":20520, \"ctx\":\"initandlisten\",\"msg\":\"Stopping further Flow Control ticket acquisitions.\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":4784907, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the replica set node executor\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4784918, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the ReplicaSetMonitor\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"SHARDING\", \"id\":4784921, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the MigrationUtilExecutor\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":4784925, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down free monitoring\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"FTDC\", \"id\":4784926, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down full-time data capture\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4784927, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down the HealthLog\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4784929, \"ctx\":\"initandlisten\",\"msg\":\"Acquiring the global lock for shutdown\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"-\", \"id\":4784931, \"ctx\":\"initandlisten\",\"msg\":\"Dropping the scope cache for shutdown\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":20565, \"ctx\":\"initandlisten\",\"msg\":\"Now exiting\"}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23138, \"ctx\":\"initandlisten\",\"msg\":\"Shutting down\",\"attr\":{\"exitCode\":100}}\n{\"t\":{\"$date\":\"2020-12-10T19:02:25.899+00:00\"},\"s\":\"E\", \"c\":\"STORAGE\", \"id\":20557, \"ctx\":\"initandlisten\",\"msg\":\"DBException in initAndListen, terminating\",\"attr\":{\"error\":\"DBPathInUse: Unable to lock the lock file: /data/db/mongod.lock (Resource temporarily unavailable). Another mongod instance is already running on the /data/db directory\"}}\n\"members\" : [\n\t{\n\t\t\"_id\" : 0,\n\t\t\"name\" : \"mongo-1:27030\",\n\t\t\"health\" : 0,\n\t\t\"state\" : 8,\n\t\t\"stateStr\" : \"(not reachable/healthy)\",\n\t\t\"uptime\" : 0,\n\t\t\"optime\" : {\n\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\"t\" : NumberLong(-1)\n\t\t},\n\t\t\"optimeDurable\" : {\n\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\"t\" : NumberLong(-1)\n\t\t},\n\t\t\"optimeDate\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\"optimeDurableDate\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\"lastHeartbeat\" : ISODate(\"2020-12-11T11:34:57.208Z\"),\n\t\t\"lastHeartbeatRecv\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\"pingMs\" : NumberLong(0),\n\t\t\"lastHeartbeatMessage\" : \"Error connecting to mongo-1:27030 :: caused by :: Could not find address for mongo-1:27030: SocketException: Host not found (authoritative)\",\n\t\t\"syncSourceHost\" : \"\",\n\t\t\"syncSourceId\" : -1,\n\t\t\"infoMessage\" : \"\",\n\t\t\"configVersion\" : -1,\n\t\t\"configTerm\" : -1\n\t},\n\t{\n\t\t\"_id\" : 1,\n\t\t\"name\" : \"mongo-2:27031\",\n\t\t\"health\" : 1,\n\t\t\"state\" : 2,\n\t\t\"stateStr\" : \"SECONDARY\",\n\t\t\"uptime\" : 107,\n\t\t\"optime\" : {\n\t\t\t\"ts\" : Timestamp(1607686093, 1),\n\t\t\t\"t\" : NumberLong(41)\n\t\t},\n\t\t\"optimeDate\" : ISODate(\"2020-12-11T11:28:13Z\"),\n\t\t\"syncSourceHost\" : \"\",\n\t\t\"syncSourceId\" : -1,\n\t\t\"infoMessage\" : \"\",\n\t\t\"configVersion\" : 95732,\n\t\t\"configTerm\" : -1,\n\t\t\"self\" : true,\n\t\t\"lastHeartbeatMessage\" : \"\"\n\t},\n\t{\n\t\t\"_id\" : 2,\n\t\t\"name\" : \"mongo-3:27032\",\n\t\t\"health\" : 0,\n\t\t\"state\" : 8,\n\t\t\"stateStr\" : \"(not reachable/healthy)\",\n\t\t\"uptime\" : 0,\n\t\t\"optime\" : {\n\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\"t\" : NumberLong(-1)\n\t\t},\n\t\t\"optimeDurable\" : {\n\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\"t\" : NumberLong(-1)\n\t\t},\n\t\t\"optimeDate\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\"optimeDurableDate\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\"lastHeartbeat\" : ISODate(\"2020-12-11T11:34:57.096Z\"),\n\t\t\"lastHeartbeatRecv\" : ISODate(\"2020-12-11T11:34:56.923Z\"),\n\t\t\"pingMs\" : NumberLong(0),\n\t\t\"lastHeartbeatMessage\" : \"replica set IDs do not match, ours: 5fd22a776b0ed74077c65ca8; remote node's: 5fd29e358b77ac2c30973a4c\",\n\t\t\"syncSourceHost\" : \"\",\n\t\t\"syncSourceId\" : -1,\n\t\t\"infoMessage\" : \"\",\n\t\t\"configVersion\" : -1,\n\t\t\"configTerm\" : -1\n\t}\n],\nreplica set IDs do not match{\n\t\"set\" : \"jm-replica\",\n\t\"date\" : ISODate(\"2020-12-11T11:45:57.587Z\"),\n\t\"myState\" : 2,\n\t\"term\" : NumberLong(42),\n\t\"syncSourceHost\" : \"\",\n\t\"syncSourceId\" : -1,\n\t\"heartbeatIntervalMillis\" : NumberLong(2000),\n\t\"majorityVoteCount\" : 2,\n\t\"writeMajorityCount\" : 2,\n\t\"votingMembersCount\" : 3,\n\t\"writableVotingMembersCount\" : 3,\n\t\"optimes\" : {\n\t\t\"lastCommittedOpTime\" : {\n\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\"t\" : NumberLong(-1)\n\t\t},\n\t\t\"lastCommittedWallTime\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\"appliedOpTime\" : {\n\t\t\t\"ts\" : Timestamp(1607686302, 2),\n\t\t\t\"t\" : NumberLong(41)\n\t\t},\n\t\t\"durableOpTime\" : {\n\t\t\t\"ts\" : Timestamp(1607686302, 2),\n\t\t\t\"t\" : NumberLong(41)\n\t\t},\n\t\t\"lastAppliedWallTime\" : ISODate(\"2020-12-11T11:31:42.115Z\"),\n\t\t\"lastDurableWallTime\" : ISODate(\"2020-12-11T11:31:42.115Z\")\n\t},\n\t\"members\" : [\n\t\t{\n\t\t\t\"_id\" : 0,\n\t\t\t\"name\" : \"mongo-1:27030\",\n\t\t\t\"health\" : 0,\n\t\t\t\"state\" : 8,\n\t\t\t\"stateStr\" : \"(not reachable/healthy)\",\n\t\t\t\"uptime\" : 0,\n\t\t\t\"optime\" : {\n\t\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\t\"t\" : NumberLong(-1)\n\t\t\t},\n\t\t\t\"optimeDurable\" : {\n\t\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\t\"t\" : NumberLong(-1)\n\t\t\t},\n\t\t\t\"optimeDate\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\t\"optimeDurableDate\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\t\"lastHeartbeat\" : ISODate(\"2020-12-11T11:45:57.535Z\"),\n\t\t\t\"lastHeartbeatRecv\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\t\"pingMs\" : NumberLong(0),\n\t\t\t\"lastHeartbeatMessage\" : \"replica set IDs do not match, ours: 5fd29e358b77ac2c30973a4c; remote node's: 5fd22a776b0ed74077c65ca8\",\n\t\t\t\"syncSourceHost\" : \"\",\n\t\t\t\"syncSourceId\" : -1,\n\t\t\t\"infoMessage\" : \"\",\n\t\t\t\"configVersion\" : -1,\n\t\t\t\"configTerm\" : -1\n\t\t},\n\t\t{\n\t\t\t\"_id\" : 1,\n\t\t\t\"name\" : \"mongo-2:27031\",\n\t\t\t\"health\" : 0,\n\t\t\t\"state\" : 8,\n\t\t\t\"stateStr\" : \"(not reachable/healthy)\",\n\t\t\t\"uptime\" : 0,\n\t\t\t\"optime\" : {\n\t\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\t\"t\" : NumberLong(-1)\n\t\t\t},\n\t\t\t\"optimeDurable\" : {\n\t\t\t\t\"ts\" : Timestamp(0, 0),\n\t\t\t\t\"t\" : NumberLong(-1)\n\t\t\t},\n\t\t\t\"optimeDate\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\t\"optimeDurableDate\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\t\"lastHeartbeat\" : ISODate(\"2020-12-11T11:45:57.524Z\"),\n\t\t\t\"lastHeartbeatRecv\" : ISODate(\"1970-01-01T00:00:00Z\"),\n\t\t\t\"pingMs\" : NumberLong(0),\n\t\t\t\"lastHeartbeatMessage\" : \"replica set IDs do not match, ours: 5fd29e358b77ac2c30973a4c; remote node's: 5fd22a776b0ed74077c65ca8\",\n\t\t\t\"syncSourceHost\" : \"\",\n\t\t\t\"syncSourceId\" : -1,\n\t\t\t\"infoMessage\" : \"\",\n\t\t\t\"configVersion\" : -1,\n\t\t\t\"configTerm\" : -1\n\t\t},\n\t\t{\n\t\t\t\"_id\" : 2,\n\t\t\t\"name\" : \"mongo-3:27032\",\n\t\t\t\"health\" : 1,\n\t\t\t\"state\" : 2,\n\t\t\t\"stateStr\" : \"SECONDARY\",\n\t\t\t\"uptime\" : 96,\n\t\t\t\"optime\" : {\n\t\t\t\t\"ts\" : Timestamp(1607686302, 2),\n\t\t\t\t\"t\" : NumberLong(41)\n\t\t\t},\n\t\t\t\"optimeDate\" : ISODate(\"2020-12-11T11:31:42Z\"),\n\t\t\t\"syncSourceHost\" : \"\",\n\t\t\t\"syncSourceId\" : -1,\n\t\t\t\"infoMessage\" : \"\",\n\t\t\t\"configVersion\" : 1,\n\t\t\t\"configTerm\" : 41,\n\t\t\t\"self\" : true,\n\t\t\t\"lastHeartbeatMessage\" : \"\"\n\t\t}\n\t],\n\t\"ok\" : 1,\n\t\"$clusterTime\" : {\n\t\t\"clusterTime\" : Timestamp(1607687155, 1),\n\t\t\"signature\" : {\n\t\t\t\"hash\" : BinData(0,\"AAAAAAAAAAAAAAAAAAAAAAAAAAA=\"),\n\t\t\t\"keyId\" : NumberLong(0)\n\t\t}\n\t},\n\t\"operationTime\" : Timestamp(1607686302, 2)\n}\n", "text": "Hi everyone,I’m using a Docker image “mongo” which is created by Docker officials.Everything works great except when I increase number of replicas. In logs, can see the following:Probably the most interesting part is this:I found this issue happens when we use volume and mount it becasue Dockerfile creates “mongodb” user and volume is created by root.More on that here: mongodb - Mongo docker with volume get error - Stack OverflowHowever, I also face another error:It says replica set IDs do not match.Have you ever had these errors? If I try to execute rs.reconfig() nothing will happen. Simply unsolvable.I am worried if these two errors are connected. If yes, then it’s fine because I’ll somehow find a way to solve the first (and then second won’t appear again).However, I am worried if these two are not connected because I am not able to solve it. Whatever I try - nothing.Thanks.EDIT: Interestng is that I redeployed stack again and my script gets documents although my latest rs.status looks like this:Thanks", "username": "jellyx" }, { "code": "", "text": "Seems like it is not possible to increase # of replicas in Swarm. If I wish to have more replicas I need to add more services (additional secondary nodes). Am I right? Thanks.", "username": "jellyx" } ]
Mongod.lock when replica increases + replica set IDs do not match
2020-12-11T11:42:20.810Z
Mongod.lock when replica increases + replica set IDs do not match
4,855
null
[ "containers", "installation" ]
[ { "code": "cat /etc/os-releaseNAME=\"Debian GNU/Linux\"\nVERSION_ID=\"10\"\nVERSION=\"10 (buster)\"\nVERSION_CODENAME=buster\nID=debian\nHOME_URL=\"https://www.debian.org/\"\nSUPPORT_URL=\"https://www.debian.org/support\"\nBUG_REPORT_URL=\"https://bugs.debian.org/\"\napt-get install -y mongodb-orgDone.\n/var/lib/dpkg/info/mongodb-org-server.postinst: 43: /var/lib/dpkg/info/mongodb-org-server.postinst: systemctl: not found\ndpkg: error processing package mongodb-org-server (--configure):\n installed mongodb-org-server package post-installation script subprocess returned error exit status 127\n", "text": "I am following the directions here: https://docs.mongodb.com/manual/tutorial/install-mongodb-on-debian/I am attempting to create a docker image based on the openjdk image and wish to install a mongdb server there, which I’m using internally to manage temporary databases.I wanted to make sure what I was running, so I ran cat /etc/os-release in my image, and it displayed the following data:When I get to the command apt-get install -y mongodb-org I get the following errorI searched for the error, including this forum, and couldn’t find it, so I’m posting here. Please advise.Thanks.", "username": "Thom_Hehl" }, { "code": "systemctldocker network", "text": "Hi @Thom_Hehl and welcome in the MongoDB Community !Usually Docker images are as small as possible so most of the time, they only embed what is absolutely necessary for them to work.systemctl is not part of the OpenJDK image you are trying to use apparently (makes sense to me) which is required for MongoDB installed by the package manager because it would configure MongoDB to start automatically when your computer starts, etc. Which doesn’t make sense in a Docker container.So a few advises:Cheers,\nMaxime.", "username": "MaBeuLux88" }, { "code": "mongod", "text": "There are perfectly valid reasons for installing the MongoDB packages in a container environment. For instance, we use Gitlab pipelines (which run in docker containers) for testing, where the test runner expects to find a mongod executable. So far, we could provide the binary by installing the Debian/Ubuntu packages. This is not possible now with the hard requirement on systemd.", "username": "languitar" }, { "code": "", "text": "Hi @languitar,When you install MongoDB a the packet manager, you are installing it as a service on your machine which will start during the boot. It’s hard to do that without systemd / systemctl and it’s not what you want in a docker container - indeed.If you just want to install MongoDB binaries without all the service configurations, use the tarball like this one for MDB 4.4.2 for Ubuntu 20.04: https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2004-4.4.2.tgz.You can find all these links in the MongoDB Download Center right next to the download button once you have selected what you need.", "username": "MaBeuLux88" }, { "code": "", "text": "But why can’t it just install itself including the systemd service files but without registering? Many other packages do that as well.As a side note: a dependency is probably missing here then, because the missing systemd is not caught via package dependencies.", "username": "languitar" }, { "code": "", "text": "The official container installs by linking /bin/true to /usr/local/bin/systemctl and removes it after installing mongodb-org-* packages.", "username": "chris" }, { "code": "", "text": "this post and workaround method deserves more than a hundred upvotes.IMHO we can not assume systemctl always present.", "username": "Er_ZH" }, { "code": "ln -T /bin/true /usr/bin/systemctl && apt-get update && apt-get install -y mongodb-org && rm /usr/bin/systemctl", "text": "This worked for me in a debian 10 container: ln -T /bin/true /usr/bin/systemctl && apt-get update && apt-get install -y mongodb-org && rm /usr/bin/systemctl", "username": "Edward_Bordin" }, { "code": "", "text": "This solution fixed my problem.\n@Edward_Bordin posted an example code on how to fix the two comments below.", "username": "Vladyslav_Petrykov" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Mongodb-org-server.postinst: systemctl: not found
2020-09-16T14:40:27.768Z
Mongodb-org-server.postinst: systemctl: not found
6,743
null
[ "dot-net", "indexes" ]
[ { "code": "PeopleDocumentIDCardCopytaxCopyPermitCopyIDCardCopydb.PeopleDocument.createIndex( { \"lastModifiedDate\": 1 }, { expireAfterSeconds: 31622400} ) \nIDCardCopyvar collInternDocuments = _database.GetCollection<BsonDocument>(\"PPLDocuments\");\n\n\n\nvar doc = new BsonDocument{\n {\"Documentid\", strDocumentID},\n { \"FileType\", typeOfDocument}, \n { \"FileContent\", file.ContentType},\n { \"FileName\", file.FileName},\n { \"UploadTime\", DateTime.Now},\n { \"UploadedBy\", uploadedBy},\n { \"GridFSFileID\", strGridFSFileID}\n };\n \n\ncollInternDocuments.UpdateOne( Builders<BsonDocument>.Filter.Eq(\"_id\", internUserID), Builders<BsonDocument>.Update.AddToSet(\"Documents\", doc));\ndb.PeopleDocument.createIndex( {UploadTime:1}, \n {\n expireAfterSeconds:900,\n partialFilterExpression:{\n FileType:{$eq:\"IDCardCopy\"}\n }\n })\n {\n \"v\":2,\n \"key\":{\n \"_id\":1\n },\n \"name\" :\"_id_\",\n \"ns\" : \"people.PeopleDocument\"\n},\n{ \n\n \"v\":2,\n \"key\":{\n \"UploadTime\":1\n },\n \"name\" :\"UploadTime_1\",\n \"ns\" : \"people.PeopleDocument\",\n \"expireAfterSeconds\":900,\n \"partialFilterExpression\":{\n \"FileType\": { \"$eq\":\"IDCardCopy\"\n }\n }\n }\n", "text": "I have a collection called PeopleDocument. This collection contains three different types of files: IDCardCopy, taxCopy, PermitCopy. Users can upload any of these files. I want to autodelete IDCardCopy one year after it was uploaded. I am looking at MongoDB TTL, however I have some questions:If I create an index like the one above, I think it will delete all files in PeopleDocument after 1 year, is it possible to only delete IDCardCopy?More detail:This is a C# code I use to insert Document:This is how I created Index:This is the results for db.PeopleDocument.getIndexes():This didn’t delete the file after 900 sec, could this be a date issue?", "username": "Sujinthan_Satkunaraj" }, { "code": "lastModifiedDate{ \"FileType\", typeOfDocument },\n{ \"UploadTime\", DateTime.Now }\n\ndb.PeopleDocument.createIndex( \n { UploadTime: 1 }, \n {\n expireAfterSeconds: 900,\n partialFilterExpression: { FileType: { $eq: \"IDCardCopy\" } }\n }\n)\nFileType: { $eq: \"IDCardCopy\" }900UploadTimemongodb.PeopleDocument.aggregate([\n { $project: { UploadTimeType: { $type: \"$UploadTime\" } } }\n])\n\"UploadTimeType\" : \"date\"mongo// Collection 'test' with two documents:\n{ _id:1, dt: ISODate(\"2020-12-07T06:54:12.562Z\"), fld: 8 },\n{ _id:2, dt: ISODate(\"2020-12-07T06:54:12.562Z\"), fld: 220 }\n\n// Create TTL Index with Partial Index\ndb.test.createIndex( { dt: 1 }, {\n expireAfterSeconds: 150, // 2.5 mins\n partialFilterExpression: {\n fld: { $gt: 100 }\n }\n});\n\ndb.test.getIndexes()\n{\n\t\"v\" : 2,\n\t\"key\" : {\n\t\t\"dt\" : 1\n\t},\n\t\"name\" : \"dt_1\",\n\t\"ns\" : \"test.test\",\n\t\"expireAfterSeconds\" : 150,\n\t\"partialFilterExpression\" : {\n\t\t\t\"fld\" : {\n\t\t\t\t\"$gt\" : 100\n\t\t\t}\n\t}\n}\n_id: 22.5", "text": "Hello @Sujinthan_Satkunaraj, welcome to the MongoDB Community forum.I have a collection called PeopleDocument. This collection contains three different types of files: IDCardCopy, taxCopy, PermitCopy. Users can upload any of these files. I want to autodelete IDCardCopy one year after it was uploaded. I am looking at MongoDB TTL.This will work using TTL index along with a Partial Index. The documents with FileType = “IDCardCopy” will be deleted after one year, when you specify the Partial Index filter for the “IDCardCopy” FileType. Note the TTL index field must be a BSON Date field.db.PeopleDocument.createIndex( { “lastModifiedDate”: 1 }, { expireAfterSeconds: 31622400} )\nIf I create an index like the one above, I think it will delete all files in PeopleDocument after 1 year, is it possible to only delete IDCardCopy?Yes, the lastModifiedDate must be a BSON Date field.The above code looks okay to me. The documents of FileType: { $eq: \"IDCardCopy\" } should be deleted after 900 seconds (15 mins).Can you verify the field type of UploadTime from the mongo shell using this query?The output should be: \"UploadTimeType\" : \"date\"Here is a similar example I had tried from mongo shell and this worked fine. I am using MongoDB v4.2.8.After the index was created, the document with _id: 2 was deleted after the 2.5 mins.", "username": "Prasad_Saya" }, { "code": "\"UploadTimeType\" : \"string\"BsonDateTime uploadDate = new BsonDateTime(DateTime.Now);\nvar doc = new BsonDocument{\n {\"Documentid\", strDocumentID},\n { \"FileType\", typeOfDocument}, \n { \"FileContent\", file.ContentType},\n { \"FileName\", file.FileName},\n { \"UploadTime\", uploadDate },\n { \"UploadedBy\", uploadedBy},\n { \"GridFSFileID\", strGridFSFileID}\n };\n", "text": "I get \"UploadTimeType\" : \"string\".So I guess UploadTime is not type: Date.\nHow do I insert UploadTime as type Date?I tried the following it didn’t work:This did not work.", "username": "Sujinthan_Satkunaraj" }, { "code": "mongo", "text": "I tried the following it didn’t work:Can you explain what is that did not work?Are you able to insert the document? If so, what does it look like when queried from mongo shell? Please post the result of your insert.", "username": "Prasad_Saya" }, { "code": "BsonDateTime uploadDate = new BsonDateTime(DateTime.Now); \nuploadDateBsonDateTimeuploadDateDateTimedb.PeopleDocument.aggregate([\n{ $project: { UploadTimeType: { $type: \"$UploadTime\" } } }])\nUploadTimeType: string", "text": "I thought by inserting uploadDate as a BsonDateTime it will store uploadDate as type DateTime, however that didn’t work. When I run:it returns UploadTimeType: string.I solved this by creating a C# console app that will delete documents created one year ago, I created a Task Scheduler that will run everyday to run this app.", "username": "Sujinthan_Satkunaraj" } ]
Auto-Delete Certain type of Document in Mongodb in collection
2020-12-06T19:43:50.667Z
Auto-Delete Certain type of Document in Mongodb in collection
6,508
null
[ "php" ]
[ { "code": "$updateDocQuery = new MongoCode ('var bulkOp = db.bsd_CampaignDetailStats.initializeOrderedBulkOp();\n db.bsd_CampaignDetailStats.find({\\'CampDetail.Steps.StepID\\': ' . $stepID . '}).forEach(function(doc) {\n for(var i=0; i < doc.CampDetail.length; i++) {\n if (doc.CampDetail[i].ActionType == \\'Offer\\') {\n for(var j=0; j < doc.CampDetail[i].Steps.length; j++) {\n var updateDocument = {};\n updateDocument[\\'CampDetail.\\' + i + \\'.Steps.\\' + j + \\'.RedeemedCount\\'] = ' . $doc['value']['count'] . '; \n updateDocument[\\'CampDetail.\\' + i + \\'.Steps.\\' + j + \\'.RedeemedAmount\\'] = ' . $doc['value']['total'] . '; \n bulkOp.find({\\'_id\\': doc._id}).update({\n \\'$set\\': updateDocument \n });\n bulkOp.execute(); \n }\n }\n } \n })');\n $db->execute($updateDocQuery);", "text": "Looking for an alternative way to implement the following php code as MongoCode and $db->execute have been deprecated.", "username": "Human_Being" }, { "code": "", "text": "Just rewrite it as a PHP function. Nothing you are doing there is exotic.\nWhat you’re basically doing is writing a program in Javascript and embedding in a PHP program.\nJust rewrite your little program in PHP.", "username": "Jack_Woehr" } ]
PHP execute and MongoCode alternatives
2020-12-10T19:54:11.973Z
PHP execute and MongoCode alternatives
1,929
null
[ "dot-net", "legacy-realm-cloud" ]
[ { "code": "", "text": "Upgrading from realm dotnet 10.0.0.beta.2 to beta.3, application fails on call to\nRealm.GetInstanceAsync(config).Error:\n“SSL Server certificate rejected”", "username": "Richard_Fairall" }, { "code": "", "text": "Interesting - just to confirm, are you using it with MongoDB Realm (asking because the tag says legacy-realm-cloud which is the one hosted on https://realm.io).", "username": "nirinchev" }, { "code": "", "text": "Yes it’s MongoDB realm. I just upgraded from 10.0.0-beta.2 to beta.3.\nI seem to have lost the link to the 10.0.0 forum.This occurs using Credentials.EmailPassword Authentication.\nI switched to anonymous connection and get the same result.", "username": "Richard_Fairall" }, { "code": "", "text": "What platform are you seeing this exception on? Also, are you by any chance behind some corporate proxy/firewall that does its own SSL termination?", "username": "nirinchev" }, { "code": "", "text": "All on a home standard network.\nOn iMac, Parallels Windows 10, Mcaffee .\nNever had a problem before beta.3.I published the app and ran it on a standard Windows 10 PC.\nSame result. SSL rejected.", "username": "Richard_Fairall" }, { "code": "", "text": "Thanks, I was able to reproduce it - we’ll look into hotfixing that, but for now my suggestion would be to downgrade to 10.0.0-beta.2.I filed realm-core/4213 to track resolving it.", "username": "nirinchev" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Realm 10.0.0-beta.3 SSL error on call to Realm.GetInstanceAsync(config)
2020-12-10T16:08:36.878Z
Realm 10.0.0-beta.3 SSL error on call to Realm.GetInstanceAsync(config)
4,163
null
[ "swift", "atlas-device-sync" ]
[ { "code": "class PublicData: Object {\n @objc dynamic var _id: String = \"\"\n override class func primaryKey() -> String? { return \"_id\" }\n\n // ...\n}\n\nclass PrivateObject: Object {\n var publicDatas: List<String>\n\n // ...\n\n var realData: [PublicData] {\n return Array(publicDatas.map { id in\n publicRealm.object(ofType: PublicData.self, forPrimaryKey: id)!\n })\n }\n}\n", "text": "I’m using Sync. I have some data in a collection in a PUBLIC realm, accessible by all users, and a collection of private objects in each user’s personal Realm. I want to setup a many-to-many relationship between these two collections.Since the collections are not in the same Realm, I have to use foreign keys (strings, the type of my primary keys). I can get back the object by querying the primary keys.For a list (to-many relationship), here’s what it would look like (example in Swift):My question: is that efficient, querying primary keys in a loop? Is there a better way to do this, am I missing something?", "username": "Jean-Baptiste_Beau" }, { "code": "", "text": "Hi @Jean-Baptiste_Beau,Would that make more sense to use the Realm partition key to sync and filter on the MongoDB Rules side the data which is public or specific user id oriented.Then it feels like you can verify who sees what rather than using relationships which are not so recommended for MongoDB architctures.Best regards,\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "Hi @Pavel_Duchovny,Thank you for your reply. However, I’m not sure to understand your point.The goal of having my static data sitting in a ‘PUBLIC’ Realm is to be able to update it if needed.Basically, my users will sync to two Realms:If I’d want to use only one Realm instead of two, I’d have to copy the static data in every user Realm. The data is not that big, but still this solution feels bad, and I’m not sure maintenance would be easy for this.Does that address your point or have I misunderstood your answer?\nThanks a lot!", "username": "Jean-Baptiste_Beau" }, { "code": "", "text": "Hi @Jean-Baptiste_Beau,I think the following section should answer most of your concerns:https://docs.mongodb.com/realm/sync/partitioning/#example-partitioning-data-across-groupsIt feels like some parts of the presented design covers your idea. Let me know if you still have questions.Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "@Pavel_Duchovny in the given example, there are indeed multiple Realms, including one ‘PUBLIC’ and some others, same case as mine. The examples however don’t explain how to structure relationships in this case, since you can’t make relationships between objects in different Realms.Do you need to store IDs as strings and query the associated objects with a regular query? Or can you somehow structure this in such a way you can have “real” relationships?", "username": "Jean-Baptiste_Beau" }, { "code": "", "text": "Hi @Jean-Baptiste_Beau,Realm allows you to set relationships in your code:\nhttps://docs.mongodb.com/realm/node/relationships/index.html#key-conceptsAs well as on the collection rule side under the relationship tab you can define a list type which is many to many:Best\nPavel", "username": "Pavel_Duchovny" }, { "code": "", "text": "@Pavel_Duchovny Yes, but these relationships don’t work for objects located in different Realms. How to proceed then?", "username": "Jean-Baptiste_Beau" }, { "code": "", "text": "Hi @Jean-Baptiste_Beau,What do you mean? What kind of relationship will be there??", "username": "Pavel_Duchovny" }, { "code": "{\n name: \"Theme\",\n primaryKey: \"_id\",\n properties: {\n _id: \"string\",\n _partition: \"string\",\n name_fr: \"string\",\n name_eng: \"string\",\n description_fr: \"string\",\n description_eng: \"string\",\n },\n}\n\n{\n name: \"JournalEntry\",\n primaryKey: \"_id\",\n properties: {\n _id: \"string\",\n _partition: \"string\",\n title: \"string\",\n text: \"string\",\n theme: Theme,\n },\n}\nPUBLICuser_idtheme: Themetheme_id: stringrealm.object(type, primary_key)", "text": "@Pavel_Duchovny I recreated a small example similar to my situation:Say that I have two schemas:The themes would be public and static. The journal entries would be added by users, and private. This means that I need to use two Realms, one for the partition key PUBLIC, where the themes will be, and one for each user with partition key user_id.However, I still want a relation between these two objects. In this example, I made the relationship the standard way (theme: Theme), but this won’t work since the objects are in different Realms.Therefore, from what I understand, I have two solutions:I hope I managed to made myself clearer. This public data / private data combination seems a quite common use case to me, and yet I can’t find any doc that addresses this specifically. What do others do in this case? Did I structure something wrong?", "username": "Jean-Baptiste_Beau" }, { "code": "", "text": "Hey @Jean-Baptiste_Beau,Shame there isn’t an answer to this, I am about to implement something similar myself. Out of interest did you find a good solution?Many thanks!Will", "username": "varyamereon" }, { "code": "theme_id: stringrealm.object(type, primary_key)", "text": "instead of a theme, store in the journal entries theme_id: string , and query the theme when I need it using the normal realm.object(type, primary_key) .@Jean-Baptiste_Beau Yes - this is exactly what we recommend. You can see a presentation I did that covers this topic - here:", "username": "Ian_Ward" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
To-many relationship with foreign keys: best practice
2020-08-19T09:03:20.810Z
To-many relationship with foreign keys: best practice
9,678
null
[ "node-js", "typescript" ]
[ { "code": "[8:16:58 PM] Starting compilation in watch mode...\n\nnode_modules/@nestjs/mongoose/dist/factories/schema.factory.d.ts:4:60 - error TS2315: Type \n'Schema' is not generic.\n\n4 static createForClass<T = any>(target: Type<unknown>): mongoose.Schema<T>;\n ~~~~~~~~~~~~~~~~~~\nnode_modules/@types/mongoose/index.d.ts:79:1 - error TS6200: Definitions of the following \nidentifiers conflict with those in another file: DocumentDefinition, FilterQuery, \nUpdateQuery, NativeError, Mongoose, CastError, Collection, Connection, Error, QueryCursor, \nVirtualType, Schema, Subdocument, Array, DocumentArray, Buffer, ObjectId, Decimal128, Map, \nAggregate, SchemaType, Document\n\n79 declare module \"mongoose\" {\n node_modules/mongoose/index.d.ts:1:1\n 1 declare module \"mongoose\" {\n ~~~~~~~\n Conflicts are in this file.\n\n node_modules/@types/mongoose/index.d.ts:226:14 - error TS2403: Subsequent variable \n declarations must have the same type. Variable 'SchemaTypes' must be of type 'typeof \n Types', but here has type 'typeof Types'.\n\n 226 export var SchemaTypes: typeof Schema.Types;\n ~~~~~~~~~~~\n\n node_modules/mongoose/index.d.ts:45:14\n 45 export var SchemaTypes: typeof Schema.Types;\n ~~~~~~~~~~~\n 'SchemaTypes' was also declared here.\n\n node_modules/@types/mongoose/index.d.ts:822:24 - error TS2314: Generic type \n 'Query<ResultType, DocType, T>' requires 3 type argument(s).\n\n 822 constructor(query: Query<T>, options: any);\n ~~~~~~~~\n\n node_modules/@types/mongoose/index.d.ts:1013:19 - error TS2314: Generic type \n 'Query<ResultType, DocType, T>' requires 3 type argument(s).\n\n 1013 pre<T extends Query<any> = Query<any>>(\n ~~~~~~~~~~\n\n node_modules/@types/mongoose/index.d.ts:1013:32 - error TS2314: Generic type \n 'Query<ResultType, DocType, T>' requires 3 type argument(s).\n\n 1013 pre<T extends Query<any> = Query<any>>(\n ~~~~~~~~~~\n\n node_modules/@types/mongoose/index.d.ts:1036:48 - error TS2314: Generic type \n 'Query<ResultType, DocType, T>' requires 3 type argument(s).\n\n 1036 pre<T extends Document | Model<Document> | Query<any> | Aggregate<any>>(\n", "text": "I finally came here after putting lot of efforts but no success. I learn Nestjs/Angular/MongoDB. So far I got success running both Angular server & Nestjs server simaltaneously. But I get huge list of errors 147 (mostly related to schema) when I initialize mongoDB with them. Everything seems fine, connection between mongdb compass & atlas work perfectly.\ngrateful for your advisefollowing an error.", "username": "Kash_XAEED" }, { "code": "", "text": "I had the same error into Typescript + Express + Mongoose application. Try adding skipLibCheck: true option to your tsconfig.json. I found the solution here, it is was published just 2 days ago: https://github.com/DefinitelyTyped/DefinitelyTyped/issues/33290#issuecomment-740513790", "username": "Valentyn_Kuznetsov" }, { "code": "", "text": "Also you don’t need to use @types/mongoose since mongoose 5.11 was published with its own type definitions. Hope it helps. Typescript Schema definition is incorrect · Issue #9606 · Automattic/mongoose · GitHub", "username": "Valentyn_Kuznetsov" } ]
Schema Error while running backend server nestjs/angular
2020-12-02T09:21:53.266Z
Schema Error while running backend server nestjs/angular
9,989
null
[ "aggregation" ]
[ { "code": "{_id:\"doc1\", score:29, length:7, time:3},\n{_id:\"doc2\", score:19, length:11, time:13}\n{_id:\"doc3\", score:3, length:7, time:13}\n...\nXproperty:\"max value\"{_id:\"doc1\", score:29, length:7, time:3, scoreProp:\"maxValue\"},\n{_id:\"doc2\", score:19, length:11, lengthProp:\"maxValue\", time:13}\n{_id:\"doc3\", score:3, length:7, time:13}\n$group_id_ids$switch", "text": "I’ve been thinking about this example:Is there any way I could set up a query to retrieve, on each document, a field like this Xproperty:\"max value\"?It requires a scan over the array of documents, so I guess it’s only using $group we can start. Maybe push out the _id or _ids. But, what then?I tried using $switch but it does not work as plainJS (breaks out by default after a match.)Tried this approach too, which doesn’t look nice", "username": "santimir" }, { "code": "$function$accumulator$function{\n $project: {\n allDocs: {\n $function: {\n body: function(allDocs) {\n var length, score, time;\n for (var i=0; i<allDocs.length; i++) {\n if (!length || allDocs[i].length > length.length) length = allDocs[i];\n if (!score || allDocs[i].score > score.score) score = allDocs[i];\n if (!time || allDocs[i].time > time.time) time = allDocs[i];\n }\n for (var i=0; i<allDocs.length; i++) {\n if (allDocs[i]._id == length._id) allDocs[i].lengthProp = \"maxValue\";\n if (allDocs[i]._id == score._id) allDocs[i].scoreProp = \"maxValue\";\n if (allDocs[i]._id == time._id) allDocs[i].timeProp = \"maxValue\";\n }\n return allDocs;\n },\n args: [\"$allDocs\"],\n lang: \"js\"\n }\n }\n }\n},\nallDocsqueryResult[0]['allDocs']", "text": "I think its better to do in your client side language, or You can use the $function operator to define custom functions to implement behavior not supported by the MongoDB Query Language. See also $accumulator Start from MongoDB v4.4.You can change JS code inside $function as per your understanding,Stage 1: Group all in a array\nStage 2:Stage 3: unwind array\nStage 4: replace object to rootYou can skip 3rd and 4th stages, query will return only single document and we know the field name is allDocs, can directly access like queryResult[0]['allDocs'].", "username": "turivishal" }, { "code": "function", "text": "Very interesting, never tried function yet.\nThanks!", "username": "santimir" }, { "code": "scoreProp:\"maxValue\"maxField:\"score\"maxValue : the value of score fieldmaxField : \"score\"{_id:\"doc1\", score:29, length:7, time:3},\n{_id:\"doc2\", score:19, length:11, time:13}\n{_id:\"doc3\", score:3, length:7, time:13}\n{_id:\"doc1\", score:29, length:7, time:3, max : { v:29, k:\"score\" } },\n{_id:\"doc2\", score:19, length:11, time:13, max: { v:19, k:\"score\"} },\n{_id:\"doc3\", score:3, length:7, time:13, max: { v:3, k:\"score\"} }\n{_id:\"doc1\", score:29, length:7, time:3, max : { v:29, k:\"score\"},\n{_id:\"doc2\", score:19, length:11, time:13, max : { v:19, k:\"score\"}\n{_id:\"doc3\", score:3, length:7, time:13, max : { v:7, k:\"length\"}\n{_id:\"doc1\", score:29, length:7, time:3, max : { v:29, k:\"score\"}},\n{_id:\"doc2\", score:19, length:11, time:13, max : { v:19, k:\"score\"}},\n{_id:\"doc3\", score:3, length:7, time:13, max : { v:13, k:\"time\"}}\n", "text": "I would like to add two comments despite the thread being solved.First, personally, I would reverse the added field name and value. Rather than scoreProp:\"maxValue\" I would have created maxField:\"score\". My reasoning is that documents will have a consistent schema. This helps since you can create an index of this new field and simply queries. Having a polymorphic schema is not a problem (and is something I like very much about MongoDB) but when there is an alternative I prefer the consistent version.My second comment is only relevant if the transformation is a one time thing to update a collection because a new requirement has popped up. I would do the transformation in multiple passes simply because I find it is simpler to implement.So if we start withIn pass 1 all documents are modified to becomeIn pass 2 we only modify documents where length > maxValue to getSince we have 3 source fields we have 3 passes and the third pass will update document where time > max.v to get the final resultThere are many drawbacks with this approach. One of them is that some documents are modified multiple time. Another one is that all documents have the new schema after the first pass but they contain invalid data until all passes are completed.To the keen observer that saw the switch from maxValue:number and maxField:string to max:{v:number,k:string}, I realized while writing that if I have max as an object I could $project : { max : 1 } to get both the maximum value and the key of the field that has the maximum value.", "username": "steevej" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
Add field on document, compared to the rest of documents
2020-12-10T07:28:25.796Z
Add field on document, compared to the rest of documents
2,097
null
[]
[ { "code": "", "text": "Hey everyone,\nI have a little problem with mongodb.\nI use MongoDB Atlas (cloud google Belgium) for a web porject.\nBut I am in a public administration and it’s very securised. There is a firewall with whitelist IP adress.\nSo I need the IP adress of MongoDB Atlas to open the port and the IP adress and to use my cluster of my project.\nFor more details, the project is made with Node.js and I use mongoose.", "username": "Quentin_TREHEUX" }, { "code": "", "text": "This answer from @steevej shows how to lookup your hosts from the mongodb+srv uri.From there resolve the hosts returned in the SRV records.Do to the cloudy nature of Atlas I do not think you have any guarantees of the ips remaining as they are over the clusters lifetime. Happy to be corrected here.This outline when the IPs change and when they do not.", "username": "chris" } ]
IP adress MongoDB ATlas
2020-12-09T19:23:24.767Z
IP adress MongoDB ATlas
1,895
null
[]
[ { "code": "", "text": "Hi Team,I have registered for MongoDB Certified Developer Certification Exam on 19 Jan 2021. But due to some unavoidable circumstances, I want to reschedule the exam for the next February slot. I am able to see the available slots till march, but when I am trying to transfer an exam session from MongoDB university it shows no upcoming exams. and while trying to reschedule it from examity it shows date out of range.Could you please confirm if I can book the February slot or I have to stick to the January slot?Regards,\nKrushna", "username": "Krushna_Borkar" }, { "code": "", "text": "Hi @Krushna_Borkar,Please send your query to [email protected]", "username": "kanikasingla" }, { "code": "", "text": "", "username": "Stennie_X" }, { "code": "", "text": "", "username": "Stennie_X" }, { "code": "", "text": "", "username": "Stennie_X" } ]
Rescheduling MongoDB Developer Certification
2020-12-10T06:18:03.493Z
Rescheduling MongoDB Developer Certification
1,943
https://www.mongodb.com/…4_2_1024x512.png
[ "configuration" ]
[ { "code": "", "text": "Hi\nI’ve seen that from this link that it’s strongly recommended to use the XFS file system on Linux:However the ubuntu instructions don’t mention setting up XFS or how to do so:I’m struggling to find any instructions elsewhere for doing this (I want to set up an Ubuntu 20.04 image with the XFS file system, for me to then install MongoDB 4.4).I don’t really want to have multiple file systems mounted on my image to keep things as simple as possible, but I’m not very familiar with Linux setup so any links to how to do this would be greatly appreciatedThanks", "username": "Ted_Francis" }, { "code": "disksfdiskgdiskmkfsdisks", "text": "If you are setting up a production mongodb on Ubuntu a certain level of proficiency(or access to some who as) is going to be necessary. Aside from installation and configuration there will be a need for ongoing maintenance of the system.If you are setting up a system to learn mongodb, xfs is not going to be required.Mongodb Atlas alleviates this and allows you to concentrate on mongodb.You are going to need to create or identify a partition to use and format it XFS.\nThere is no reason not to use XFS for the entire system, RHEL7+ use XFS by default, you’ll have to reinstall though.Operating on existing partitions and filesystems can be a destructive process and render your system unusable.You can use disks(graphical) to partition and format.\nOr fdisk/gdisk to partition and mkfs to create the filesystem.\nThe filesystem will need to be mounted at boot for mongod to auto start, disks again can do this otherwise you can edit /etc/fstab.", "username": "chris" }, { "code": "", "text": "Thanks for your reply much appreciated.\nIt is going to be (eventually) a production system and I’m very familiar with mongodb, and pretty familiar with Linux in general just less so with the particular commands I need to mount a different filesystem.I’m happy to use XFS as the only file system if that’s easy to do and I’m setting up everything from scratch anyway so no problem there.Thanks for the pointers - can you clarify if I can use fdisk/gdisk to use XFS for the entire file system?", "username": "Ted_Francis" }, { "code": "", "text": "Hi there\nI’m still unclear from your reply how I could use XFS for the entire file system and then reinstall.What I want to be able to do is:\nWrite the Ubuntu OS on to my SD card using Raspberry Pi Imager ideally or another mechanism is fine\nSet up the XFS file system on part of that SD card - how can I do this without wiping out the OS, while running this on a single SD card?", "username": "Ted_Francis" }, { "code": "", "text": "Hi @Ted_FrancisIf you’re looking to install MongoDB on a Raspberry Pi, there’s a guide here: Install & Configure MongoDB on the Raspberry Pi.Note that installing and managing MongoDB on a Raspberry Pi vs. a regular Linux server may involve different steps and set of skills (i.e. a small embedded server vs. a full high performance server). The XFS recommendation is mainly for a regular Linux server, and may not necessarily apply to your use case.Best regards,\nKevin", "username": "kevinadi" }, { "code": "", "text": "Thanks @kevinadi that guide is certainly useful - I’m glad to say I had already done (almost) all of those steps based on information found elsewhere, but there were a few new things in there that I hadn’t considered. Much appreciated.Thanks also for the info about the XFS recommendation being more appropriate to a full high performance server, that’s very useful to know.", "username": "Ted_Francis" } ]
Instructions for setting up Ubuntu 20.04 image with XFS file system
2020-11-23T13:21:06.857Z
Instructions for setting up Ubuntu 20.04 image with XFS file system
8,124
https://www.mongodb.com/…be0ea097404d.png
[ "aggregation", "queries" ]
[ { "code": "", "text": "Hi. I am running a query on my mongodb using the driver and writing the code in javascript. I am quite new to this.\nSo my question is , after i run the query i get the results for each individual document with the field i am querrying. How can i combine all of those results to give me one number. I am posting my results below.\nimage596×551 34.2 KBAs you can see above i get three results , one for each document. How can I add those field values together. Dont know if it is a javascript or mongo technique.Thank you", "username": "johan_potgieter" }, { "code": "", "text": "You should take a look at https://docs.mongodb.com/manual/aggregation/. The $match and $group stages would be a good starting point.MongoDB university offers a an aggregation framework specific course (M121). You may find more information at https://university.mongodb.com/.But you can do it in JavaScript also using any JavaScript arithmetic operators.", "username": "steevej" }, { "code": "", "text": "Noi understand how to run the aggregation inmongo and a query in JS. My question is how do i add all the individual values from a certain field in al the different documents together.", "username": "johan_potgieter" } ]
Adding values of multiple documents. Javascript and MongoDB
2020-12-09T10:19:52.056Z
Adding values of multiple documents. Javascript and MongoDB
2,021
https://www.mongodb.com/…e_2_1024x116.png
[]
[ { "code": "", "text": "ERROR: pyzmq has an invalid wheel, multiple .dist-info directories found: libsodium-1.0.17.dist-info, pyzmq-18.1.0.dist-infois shown while installing requirements.txt for the M220P course.\nimage1271×145 17.4 KB", "username": "Tanay_Patel" }, { "code": "", "text": "Hi @Tanay_Patel,While we’re working to integrate the University and community forums, this isn’t quite ready yet, so you might find a faster answer in the University forums here: Working with Data - MongoDB Developer Community Forums", "username": "Jamie" }, { "code": "", "text": "thanks @Jamie\nI will post it on the mentioned forum.", "username": "Tanay_Patel" } ]
Unable to install requirements.txt for M220P
2020-12-10T07:19:36.919Z
Unable to install requirements.txt for M220P
2,377
null
[ "java", "performance" ]
[ { "code": "", "text": "Hey there,I just upgraded our Spring Boot Application and therefore went with the new driver. After a long journey of debugging, I figured out that somehow the MongoCollections count method is (at least for me) broken. It offers two CountStrategys, whereas AGGREGATE is chosen by default but this leads to a never finishing response. The other option, COMMAND, does work. Also other queries like find works, it just this combination which does lead to a non starting application.\nI’m not sure if this question is too specific here, but what do i miss? Are there some properties my running mongo 4.0 instance is not fulfilling or why does this (new?) count method is not doing what it is supposed to be.Best, Richard", "username": "Richard_Kwasnicki" }, { "code": "MongoCollection#countMongoCollection#countDocumentsMongoCollection#countMongoCollection#estimatedDocumentCountMongoCollection#countMongoCollection#countDocumentsMongoCollection#estimatedDocumentCount", "text": "Hi Richard,Can you post a working code example somewhere that we can take a look at? I’d like to see the path through which the MongoCollection#count is invoked. The answer to that will help us arrive at the best solution to your problem.For background: MongoCollection#count was deprecated in 3.x and no longer included in 4.x. There are two replacements, depending on the behavior you want. If you want an accurate count of the documents in the collection (which MongoCollection#count provided if and only if you included a non-empty query filter), then you should use MongoCollection#countDocuments. If you want an estimated count of the documents in the collection (which MongoCollection#count provided if and only if you included an empty query filter), then you should use MongoCollection#estimatedDocumentCountMy hypothesis is that Spring Data MongoDB replaced use of MongoCollection#count with MongoCollection#countDocuments instead of MongoCollection#estimatedDocumentCount, and that’s why you’re seeing the behavior change.Thanks,\nJeff", "username": "Jeffrey_Yemin" }, { "code": "", "text": "Hey, thanks for your fast reply. Your hypothesis seems to be right, here is a Picture of the stacktrace reaching executeCount function. Screenshot from 2020-12-09 09-12-14901×199 42.5 KBAfter application startup, the same call does seem to work, at least for me thats strange…", "username": "Richard_Kwasnicki" }, { "code": "", "text": "The query filter here for this count is empty in both cases, the working and non-working call…", "username": "Richard_Kwasnicki" }, { "code": "", "text": "It’s not that countDocuments won’t ever complete, it’s just that it requires a complete scan of the collection to do so, whereas estimatedDocumentCount just uses index metadata to provide an estimated count.", "username": "Jeffrey_Yemin" }, { "code": "", "text": "Okay, that explains why it’s slow on testsystem (docs ~200k) and horrible on bigger testsystem (19kk docs). I realized I tried different different collections, so even after startup its horribly slow using count via spring. I think I’ll open a bug ticket on spring mongo…", "username": "Richard_Kwasnicki" }, { "code": "", "text": "OK, drop the link to the Spring ticket in here so I can comment on it if necessary.", "username": "Jeffrey_Yemin" }, { "code": "", "text": "**[Richard Kwasnicki](https://jira.spring.io/secure/ViewProfile.jspa?name=JIRAUS…ER50775)** opened **[DATAMONGO-2669](https://jira.spring.io/browse/DATAMONGO-2669?redirect=false)** and commented\n\nWhen using SimpleMongoRepository.count(), MongoDB is now performing a collscan instead of using estimated count by metadata. I had a short discussion with a MongoDB developer at https://developer.mongodb.com/community/forums/t/upgrading-java-mongodriver-3-x-to-4-x-leads-to-lock-with-aggregate-countstrategy/12754\n\nHe advised to never use a non-empty query on exact count and better go with `MongoCollection#estimatedDocumentCount` instead of  `MongoCollection#countDocuments`. \n\nFor us it took minutes to perform a count query on a quite large collection on startup\n\n\n---\n\n**Affects:** 3.1.1 (2020.0.1)", "username": "Richard_Kwasnicki" } ]
Upgrading Java MongoDriver 3.x to 4.x leads to lock with aggregate CountStrategy
2020-12-08T16:56:17.562Z
Upgrading Java MongoDriver 3.x to 4.x leads to lock with aggregate CountStrategy
4,102
null
[ "security", "field-encryption" ]
[ { "code": "", "text": "Hi,I’m hoping someone could point me in the right direction here.We are working with highly sensitive data and have subsequently starting working on implement CSFLE on sensitive data in sensitive collections. We have got CSFLE working using an Azure KMS and all is well.However, it seems that when using an encryption enabled MongoClient, any unsupported operations are blocked on all collections regardless of whether they have encrypted fields or not. Is this the desired behaviour of the driver and if so, what is a suitable workaround?We are using the v2.16 of the C# driver for reference.The only way I can see us working around this is by registering 2 clients:Is this the recommended approach? My concern is the number of connections to the database will increase as from my understanding the connections are handled by the MongoClient and 2 clients would result in 2 collection pools.In summary, my questions are:Thanks,\nLuke", "username": "Luke_Warren" }, { "code": "", "text": "Hey, can you provide a command you run that fails with csfle?", "username": "Dmitry_Lukyanov" }, { "code": "", "text": "Hi Demitry,I ended up logging a support ticket. It seems that this is a known bug with the driver: https://jira.mongodb.org/browse/SERVER-68371Please note to anyone is future that you cannot run your search pipelines with an encryption enabled Mongo Client.Commands run via the Mongocryptd process which does not recognise $search stages in aggregations. You will also get errors with lookup aggregations as they are not supported.The workaround is to register 2 mongo clients - one for dealing with encrypted collections and one for running aggregations. You will then need to specifically omit encrypted fields to avoid serialisation issues.This is not ideal as you will use more DB connections because Mongo cannot pool across 2 different clients with different configurations.", "username": "Luke_Warren" } ]
Client side field level encryption
2020-12-10T06:18:28.418Z
Client side field level encryption
1,679
null
[ "queries", "node-js" ]
[ { "code": " events.js:291\n2020-12-09T16:08:01.009305+00:00 app[worker.1]: throw er; // Unhandled 'error' event\n2020-12-09T16:08:01.009305+00:00 app[worker.1]: ^\n2020-12-09T16:08:01.009306+00:00 app[worker.1]: \n2020-12-09T16:08:01.009306+00:00 app[worker.1]: MongoError: Unrecognized expression '$or:'\nconst profPipeLine = [\n {\n '$match': {\n '$expr': {\n '$or:': [\n {'$eq': ['operationType', 'insert']},\n {'$eq': ['operationType', 'update']},\n {'$eq': ['operationType', 'delete']}\n ]\n }\n }\n }\n\n const options = { 'fullDocument': 'updateLookup' };\n\n const profileStream = MatchProfile.watch(profPipeLine, options); \n\n profileStream.on('change', next => {\n...\n})\n", "text": "I have an app deployed on heroku, which keeps crashing because of this error:And here’s the implementation of my change stream:Idk why I keep getting that error about $or, because it works without errors locally, and its not the first time i’ve used it. I’m also using nodejs event emmitters in my project, if that may have anything to do with it since I’m sure change streams use the same event emmitter library from nodejs.", "username": "Project_PlaDat" }, { "code": "'$or:': [:$or'$or:'", "text": "'$or:': [Its probably the syntax error. Looks like the colon : after the $or ('$or:') is causing the error.", "username": "Prasad_Saya" }, { "code": "'$match' : { 'operationType' : { '$in' : [ 'insert', 'update', 'delete' ] } }\n", "text": "If I may. I suggest that you look at https://docs.mongodb.com/manual/reference/operator/aggregation/in/ to replace the whole $expr. The following should be equivalent:", "username": "steevej" }, { "code": "$in$in$in", "text": "I suggest that you look at https://docs.mongodb.com/manual/reference/operator/aggregation/in/ to replace the whole $expr.The $in operator usage is correct, but the link should be: $in — MongoDB ManualNote that there is an $in operator and then there is a $in aggregation operator.", "username": "Prasad_Saya" } ]
Getting 'Unknown top level operator $or'
2020-12-09T19:22:44.072Z
Getting &lsquo;Unknown top level operator $or&rsquo;
37,463
null
[ "replication", "connecting", "containers", "devops" ]
[ { "code": "version: '3.7'\n\nservices:\n mongo-1:\n image: mongo:4.4.2\n container_name: mongo-1\n ports:\n - 27030:27017\n networks: \n - mongo\n restart: always\n command: /usr/bin/mongod --bind_ip_all --replSet rs0 --journal --dbpath /data/db --enableMajorityReadConcern false\n volumes:\n - ./mongo-1/db:/data/db\n\n mongo-2:\n image: mongo:4.4.2\n container_name: mongo-2\n ports:\n - 27031:27017\n networks: \n - mongo\n restart: always\n command: /usr/bin/mongod --bind_ip_all --replSet rs0 --journal --dbpath /data/db --enableMajorityReadConcern false\n volumes:\n - ./mongo-2/db:/data/db\n\n mongo-3:\n image: mongo:4.4.2\n container_name: mongo-3\n ports: \n - 27032:27017\n networks: \n - mongo\n restart: always\n command: /usr/bin/mongod --bind_ip_all --replSet rs0 --journal --dbpath /data/db --enableMajorityReadConcern false\n volumes:\n - ./mongo-3/db:/data/db\n\nvolumes:\n mongo-1:\n mongo-2:\n mongo-3:\n\nnetworks:\n mongo:\ndocker container lsCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n4de461506c94 mongo:4.4.2 \"docker-entrypoint.s…\" 18 seconds ago Up 16 seconds 0.0.0.0:27031->27017/tcp mongo-2\n2f6ff3f6cbef mongo:4.4.2 \"docker-entrypoint.s…\" 18 seconds ago Up 16 seconds 0.0.0.0:27032->27017/tcp mongo-3\n8a467ad33809 mongo:4.4.2 \"docker-entrypoint.s…\" 18 seconds ago Up 16 seconds 0.0.0.0:27030->27017/tcp mongo-1\ndocker exec -it mongo-1 mongors.stats() rs.initiate({\n _id : 'rs0',\n members: [\n { _id : 0, host : \"mongo-1:27017\" },\n { _id : 1, host : \"mongo-2:27017\" },\n { _id : 2, host : \"mongo-3:27017\" } \n ]\n });\nCannot connect to replica set \"localhost:replica\"[localhost:27030].\n\nSet's primary is unreachable.\n\nReason:\n\nNo member of the set is reachable. Reason: Connect failed\nMembers:\nlocalhost:27030\nlocalhost:27031\nlocalhost:27032\n\nSet Name: rs0\nextra_hosts:\n - mongo-1:127.0.0.1\n", "text": "Hi everyne,This is how my docker-compose.yml looks like:I’ll create volumes in production instead biding, but not important right now.After I execute docker container ls I can see the following:and then I do the next:docker exec -it mongo-1 mongowhich gets me into the mongo shell.If I execte rs.stats() it will get me the following:rs.status();\n{\n“operationTime” : Timestamp(0, 0),\n“ok” : 0,\n“errmsg” : “no replset config has been received”,\n“code” : 94,\n“codeName” : “NotYetInitialized”,\n“$clusterTime” : {\n“clusterTime” : Timestamp(0, 0),\n“signature” : {\n“hash” : BinData(0,“AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),\n“keyId” : NumberLong(0)\n}\n}\n}Now I want to add replica members and I’ll execute the next:And this is where problems begin.I cannot login to replica set, but can on individual nodes. For example, I create a new document in primary replica and without any problems I can see that docment in secondary nodes (when I log in individually).I tried this on my Mac OS X, but the same thing is on Linux too.This is my error message:and this is how my connection looks:Note that this is is Robo 3T. It works on Atlas, for example.Does anyone know what should I do?I spent last 2 days on this and I think I checked every tutorial, every YouTube video, bu still nothing.Seems like Mongo Dockerfile is hard coded to expose it to port 27017, so it’s not possible to change that.I also tried with:in each service but nothing.Thanks.", "username": "jellyx" }, { "code": "", "text": "See if you can make this work for you.Updates the hosts file to match your replicas.", "username": "chris" }, { "code": "/etc/hosts", "text": "Nothing. I already tried this by changing /etc/hosts.I also created username, password, and database. I can log in directly (without replica).", "username": "jellyx" }, { "code": "services:\n mongo-0-a:\n ports:\n - 127.0.10.1:27017:27017\n...\n mongo-0-b:\n ports:\n - 127.0.10.2:27017:27017\n...\n mongo-0-c:\n ports:\n - 127.0.10.3:27017:27017\n...\nversion: '3.7'\n\nservices:\n mongo-0-a:\n image: mongo:4.4\n ports:\n - 127.0.10.1:27017:27017\n volumes:\n - mongo-0-a:/data/db\n restart: unless-stopped\n command: \"--wiredTigerCacheSizeGB 0.25 --replSet rs0\"\n\n mongo-0-b:\n image: mongo:4.4\n ports:\n - 127.0.10.2:27017:27017\n volumes:\n - mongo-0-b:/data/db\n restart: unless-stopped\n command: \"--wiredTigerCacheSizeGB 0.25 --replSet rs0\"\n\n mongo-0-c:\n image: mongo:4.4\n ports:\n - 127.0.10.3:27017:27017\n volumes:\n - mongo-0-c:/data/db\n restart: unless-stopped\n command: \"--wiredTigerCacheSizeGB 0.25 --replSet rs0\"\nvolumes:\n mongo-0-a:\n mongo-0-b:\n mongo-0-c:\n...\n127.0.10.1 mongo-0-a\n127.0.10.2 mongo-0-b\n127.0.10.3 mongo-0-c\n...\nmongo --host 127.0.10.2 --eval 'var m = db.isMaster(); print(\"Is Primary?\", m.ismaster); print(\"hosts:\", m.hosts); print(\"Primary:\", m.primary)' --quiet\nIs Primary? false\nhosts: mongo-0-a:27017,mongo-0-b:27017,mongo-0-c:27017\nPrimary: mongo-0-c:27017\nmongo --host rs0/127.0.10.2 --quiet\n2020-12-09T15:25:53.458-0500 I NETWORK [js] Starting new replica set monitor for rs0/127.0.10.2:27017\n2020-12-09T15:25:53.458-0500 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to 127.0.10.2:27017\n2020-12-09T15:25:53.460-0500 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to mongo-0-c:27017\n2020-12-09T15:25:53.463-0500 I NETWORK [ReplicaSetMonitor-TaskExecutor] Confirmed replica set for rs0 is rs0/mongo-0-a:27017,mongo-0-b:27017,mongo-0-c:27017\n2020-12-09T15:25:53.463-0500 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to mongo-0-a:27017\n2020-12-09T15:25:53.463-0500 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to mongo-0-b:27017\n\n", "text": "The ports in your replicaset have to change too, to match the exposed ports. With a replicaset it is the config in the replica set that a client will connect to.I prefer to use different ip’s and keep 27017 as the port.My ports in compose look like:This command on the secondary below returns the hosts of the replicaset a client must connect to. These hostnames must resolve and the ports have to match. This is why exposing on a different port to what is in the rs.conf() does not work.Connecting to replicaSet:", "username": "chris" }, { "code": "Adding the hostnames to the hosts file did not work for me. I think if all hostnames refers to the same host IP (e.g. 127.0.0.1), it's not going to work if all docker ports are the same (e.g. 27017). The replica set is composed by `mongo1:27017, mongo2:27017 and mongo3:27017` inside docker. Outside docker it corresponds to `127.0.0.1:27017, 127.0.0.1:27017 and 127.0.0.1:27017` which won't work. To fix the issue I had to set a different port for each node.\n\n```\ndocker network create mongo-cluster\ndocker run --name mongo1 -d --net mongo-cluster -p 9042:9042 mongo:3.6 mongod --replSet docker-rs --port 9042\ndocker run --name mongo2 -d --net mongo-cluster -p 9142:9142 mongo:3.6 mongod --replSet docker-rs --port 9142\ndocker run --name mongo3 -d --net mongo-cluster -p 9242:9242 mongo:3.6 mongod --replSet docker-rs --port 9242\ndocker exec -it mongo1 mongo --port 9042\nconfig = {\"_id\" : \"docker-rs\", \"members\" : [{\"_id\" : 0,\"host\" : \"mongo1:9042\"},{\"_id\" : 1,\"host\" : \"mongo2:9142\"},{\"_id\" : 2,\"host\" : \"mongo3:9242\"}]}\nrs.initiate(config)\nrs.status() \n```\n\nand finally add the hostnames to the hosts file\n\n```\n127.0.0.1 mongo1 mongo2 mongo3\n```\n", "text": "Many thanks for this!I also found another working solution here:By user Sydney.It says:I tested on 4.4.2 and it works. Anyway, I’ll go with your solution because I like it more.However, I wonder why solution from this guy works when it uses ports 9042, 9142, 9242, but does’t work when I set 27030, 27031, 27032, for example. Or even 8042, 8142, 8242.Do yo know why it happens?As I said, I won’t go with this solution, but just too curios. Thanks!", "username": "jellyx" }, { "code": "config = {\"_id\" : \"docker-rs\", \"members\" : [{\"_id\" : 0,\"host\" : \"mongo1:9042\"},{\"_id\" : 1,\"host\" : \"mongo2:9142\"},{\"_id\" : 2,\"host\" : \"mongo3:9242\"}]}", "text": "Because they init the replicaset with the same ports as the ones exposed.config = {\"_id\" : \"docker-rs\", \"members\" : [{\"_id\" : 0,\"host\" : \"mongo1:9042\"},{\"_id\" : 1,\"host\" : \"mongo2:9142\"},{\"_id\" : 2,\"host\" : \"mongo3:9242\"}]}I tested on 4.4.2 and it works. Anyway, I’ll go with your solution because I like it more.Using ip’s in the 127.0.0.0/8 is great, its a whole /8 network!! And you can use default ports over and over again.", "username": "chris" }, { "code": "docker runrs.status()", "text": "Yes, but I changed everything. Wherever 9042 is mentioned I changed it. In docker run, in mongo shell when creating an initialization, etc.My rs.status() returns members that container 8042, 8142, 8242, for example.EDIT: I missed something. It’s okay now. Thank you again for your help.", "username": "jellyx" }, { "code": "ERROR: for docker-mongodb_mongo-0-a_1 Cannot start service mongo-0-a: Ports are not available: listen tcp 127.0.10.1:27017: bind: can't assign requested address\n\nERROR: for mongo-0-c Cannot start service mongo-0-c: Ports are not available: listen tcp 127.0.10.3:27017: bind: can't assign requested address\n\nERROR: for mongo-0-b Cannot start service mongo-0-b: Ports are not available: listen tcp 127.0.10.2:27017: bind: can't assign requested address\n\nERROR: for mongo-0-a Cannot start service mongo-0-a: Ports are not available: listen tcp 127.0.10.1:27017: bind: can't assign requested address", "text": "Ah, something weird is happening:", "username": "jellyx" }, { "code": "", "text": "This topic was automatically closed 5 days after the last reply. New replies are no longer allowed.", "username": "system" } ]
MongoDB Replica Docker: Cannot connect on replica, only individual connection
2020-12-09T16:35:15.576Z
MongoDB Replica Docker: Cannot connect on replica, only individual connection
39,148
null
[ "java", "change-streams" ]
[ { "code": "\t MongoClient mongoClient = MongoClients.create(\"mongodb://localhost:27017,localhost:27018,localhost:27019/?replicaSet=replica\");\n \tMongoDatabase database = mongoClient.getDatabase(\"test\");\t\t\n \tMongoCollection<Document> collectionStream = database.getCollection(\"myCollection\");\n \tList<Bson> pipeline = Arrays.asList(Aggregates.match(Filters.and(Filters.in(\"operationType\", Arrays.asList(\"insert\", \"update\", \"replace\", \"invalidate\")))));\n \tMongoCursor<ChangeStreamDocument<Document>> cursor = collectionStream.watch(pipeline).fullDocument(FullDocument.UPDATE_LOOKUP).iterator();\n \tChangeStreamDocument<Document> streamedEvent = cursor.next();\n \tSystem.out.println(\"Streamed event: \" + streamedEvent);\n Exception in thread \"main\" com.mongodb.MongoExecutionTimeoutException: Error waiting for snapshot not less than { ts: Timestamp(1605805914, 1), t: -1 }, current relevant optime is { ts: Timestamp(1605805864, 1), t: 71 }. :: caused by :: operation exceeded time limit\n \tat com.mongodb.internal.connection.ProtocolHelper.createSpecialException(ProtocolHelper.java:239)\n \tat com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException(ProtocolHelper.java:171)\n \tat com.mongodb.internal.connection.InternalStreamConnection.receiveCommandMessageResponse(InternalStreamConnection.java:359)\n \tat com.mongodb.internal.connection.InternalStreamConnection.sendAndReceive(InternalStreamConnection.java:280)\n \tat com.mongodb.internal.connection.UsageTrackingInternalConnection.sendAndReceive(UsageTrackingInternalConnection.java:100)\n \tat com.mongodb.internal.connection.DefaultConnectionPool$PooledConnection.sendAndReceive(DefaultConnectionPool.java:490)\n \tat com.mongodb.internal.connection.CommandProtocolImpl.execute(CommandProtocolImpl.java:71)\n \tat com.mongodb.internal.connection.DefaultServer$DefaultServerProtocolExecutor.execute(DefaultServer.java:259)\n \tat com.mongodb.internal.connection.DefaultServerConnection.executeProtocol(DefaultServerConnection.java:202)\n \tat com.mongodb.internal.connection.DefaultServerConnection.command(DefaultServerConnection.java:118)\n \tat com.mongodb.internal.connection.DefaultServerConnection.command(DefaultServerConnection.java:110)\n \tat com.mongodb.internal.operation.CommandOperationHelper.executeCommand(CommandOperationHelper.java:345)\n \tat com.mongodb.internal.operation.CommandOperationHelper.executeCommand(CommandOperationHelper.java:336)\n \tat com.mongodb.internal.operation.CommandOperationHelper.executeCommandWithConnection(CommandOperationHelper.java:222)\n \tat com.mongodb.internal.operation.CommandOperationHelper$5.call(CommandOperationHelper.java:208)\n \tat com.mongodb.internal.operation.OperationHelper.withReadConnectionSource(OperationHelper.java:583)\n \tat com.mongodb.internal.operation.CommandOperationHelper.executeCommand(CommandOperationHelper.java:205)\n \tat com.mongodb.internal.operation.AggregateOperationImpl.execute(AggregateOperationImpl.java:189)\n \tat com.mongodb.internal.operation.ChangeStreamOperation$1.call(ChangeStreamOperation.java:325)\n \tat com.mongodb.internal.operation.ChangeStreamOperation$1.call(ChangeStreamOperation.java:321)\n \tat com.mongodb.internal.operation.OperationHelper.withReadConnectionSource(OperationHelper.java:583)\n \tat com.mongodb.internal.operation.ChangeStreamOperation.execute(ChangeStreamOperation.java:321)\n \tat com.mongodb.internal.operation.ChangeStreamOperation.execute(ChangeStreamOperation.java:60)\n \tat com.mongodb.client.internal.MongoClientDelegate$DelegateOperationExecutor.execute(MongoClientDelegate.java:178)\n \tat com.mongodb.client.internal.ChangeStreamIterableImpl.execute(ChangeStreamIterableImpl.java:204)\n \tat com.mongodb.client.internal.ChangeStreamIterableImpl.cursor(ChangeStreamIterableImpl.java:158)\n \tat com.mongodb.client.internal.ChangeStreamIterableImpl.iterator(ChangeStreamIterableImpl.java:153)\n \tat com.softstrategy.ProvaWatcher.ProvaWatcherApplication.main(ProvaWatcherApplication.java:34)\n", "text": "Hi all,This post is based on one of my previous one regarding Change Event Stream\nhttps://www.mongodb.com/community/forums/t/change-events-stream-stops-working-when-a-node-fails-in-replicaset/11043/2I am still trying to keep working my watcher when one of my data-bearing node of a replica set PSA\ngoes down by setting in the file .conf of every node the following instruction: replication.enableMajorityReadConcern equals to false.Here my java code with API mongodb-driver-sync 4.2.0-beta1:When my settings are like above, by stopping one of the primary or secondary node, I am always getting the following exception in console:On the other hand, if I comment out the enableMajorityReadConcern in the every file .conf nodes as it is by default, that exception does not appear.\n(Nonetheless, my watcher still does not stream…)Hence my questions are the following two ones:Thanks in advance.", "username": "Giovanni_Desiderio" }, { "code": "", "text": "Hi @Giovanni_Desiderio,Thanks for linking the other thread for background information.If majority read concern is set to false, change streams goes into a “speculative majority read”. In this case, change streams read at the current point in time and waits for it to be majority committed. If it cannot do majority commit, it throws an exception. When the secondary (only other data-bearing) node is unavailable the majority commit point cannot advance, and thus the current point in time will never become majority committed as well.When the arbiter is unavailable, the majority commit point still advances. As the secondary (the other data-bearing node) able to advance the majority commit point. In this case, the system operates normally.I am still trying to keep working my watcher when one of my data-bearing node of a replica set PSAIn your case, I’d recommend to substitute your arbiter for another secondary. This is because it requires a majority committed data (i.e. change stream) with good redundancy.Regards,\nWan.", "username": "wan" }, { "code": "", "text": "Thank you for your detailed answer.\nBy the way, reading the documentation (https://docs.mongodb.com/manual/reference/read-concern-majority/), it seems that disabling ReadConcern majority should not affect in any way the change streaming watcher in mongodb 4.2 or higher.\nOn the contrary, your previuos answer seems to be totally in contrast with what is written in documentation.\nAm I not right?Regarding a phrase of your answerchange streams read at the current point in time and waits for it to be majority committedwhich kind of commit are you talking about?I suppose you are pointing to the WriteConcern parameter, meaning that a new data must be majority persisted, to be available for the streaming watcher.\nBut in my replica set configuration, the parameter WriteConcern is equal to 1.\nHence, I think if I had an insert/update operation, the commit of this operation, in order to be receveid by the majority of actors, would require just one actor.\nThat is, just the node that is the primary, in that moment.What is wrong with my thougt?\nHave I misunderstood anything?Greetings,\nGiovanni", "username": "Giovanni_Desiderio" }, { "code": "", "text": "Hi @Giovanni_Desiderio,On the contrary, your previuos answer seems to be totally in contrast with what is written in documentation.I think the documentation does not cover the situation where the data-bearing secondary is not available longer than the point-in-time majority commit. I’ll discuss this with the documentation team for clarification.That is, just the node that is the primary, in that moment.Change streams only notify on data changes that have persisted to a majority of data-bearing members in the replica set. This ensures that notifications are triggered only by majority-committed changes that are durable in failure scenarios.Regards,\nWan.", "username": "wan" } ]
ReadConcernMajority disabled raises MongoExecutionTimeoutException in Change Stream
2020-11-19T21:42:23.353Z
ReadConcernMajority disabled raises MongoExecutionTimeoutException in Change Stream
3,402
null
[ "server", "upgrading" ]
[ { "code": " { \"_id\" : \"reports-z1-0\", \"host\" : \"reports-z1-0/mongodb-shard-reports-0-0.mongodb-shard-reports-0.default:27018,mongodb-shard-reports-0-1.mongodb-shard-reports-0.default:27018,mongodb-shard-reports-0-2.mongodb-shard-reports-0.default:27018\", \"state\" : 1, \"tags\" : [ \"z1\" ] }\n { \"_id\" : \"reports-z1-1\", \"host\" : \"reports-z1-1/mongodb-shard-reports-1-0.mongodb-shard-reports-1.default:27018,mongodb-shard-reports-1-1.mongodb-shard-reports-1.default:27018,mongodb-shard-reports-1-2.mongodb-shard-reports-1.default:27018\", \"state\" : 1, \"tags\" : [ \"z1\" ] }\n { \"_id\" : \"shard-z0-0\", \"host\" : \"shard-z0-0/mongodb-shard-data-0-0.mongodb-shard-data-0.default:27018,mongodb-shard-data-0-1.mongodb-shard-data-0.default:27018,mongodb-shard-data-0-2.mongodb-shard-data-0.default:27018\", \"state\" : 1, \"tags\" : [ \"z0\" ] }\n { \"_id\" : \"shard-z0-1\", \"host\" : \"shard-z0-1/mongodb-shard-data-1-0.mongodb-shard-data-1.default:27018,mongodb-shard-data-1-1.mongodb-shard-data-1.default:27018,mongodb-shard-data-1-2.mongodb-shard-data-1.default:27018\", \"state\" : 1, \"tags\" : [ \"z0\" ] }\n { \"_id\" : \"shard-z0-2\", \"host\" : \"shard-z0-2/mongodb-shard-data-2-0.mongodb-shard-data-2.default:27018,mongodb-shard-data-2-1.mongodb-shard-data-2.default:27018,mongodb-shard-data-2-2.mongodb-shard-data-2.default:27018\", \"state\" : 1, \"tags\" : [ \"z0\" ] }\ndb.adminCommand( { setFeatureCompatibilityVersion: \"4.4\" } )\n{\n\t\"operationTime\" : Timestamp(1607534165, 15),\n\t\"ok\" : 0,\n\t\"errmsg\" : \"No chunks were found for the collection\",\n\t\"code\" : 117,\n\t\"codeName\" : \"ConflictingOperationInProgress\",\n\t\"$gleStats\" : {\n\t\t\"lastOpTime\" : {\n\t\t\t\"ts\" : Timestamp(1607534165, 15),\n\t\t\t\"t\" : NumberLong(79)\n\t\t},\n\t\t\"electionId\" : ObjectId(\"7fffffff000000000000004f\")\n\t},\n\t\"lastCommittedOpTime\" : Timestamp(1607534165, 15),\n\t\"$configServerState\" : {\n\t\t\"opTime\" : {\n\t\t\t\"ts\" : Timestamp(1607534165, 4),\n\t\t\t\"t\" : NumberLong(73)\n\t\t}\n\t},\n\t\"$clusterTime\" : {\n\t\t\"clusterTime\" : Timestamp(1607534165, 15),\n\t\t\"signature\" : {\n\t\t\t\"hash\" : BinData(0,\"nQDtxjtK93fj0wnyN8Wy19Phb9U=\"),\n\t\t\t\"keyId\" : NumberLong(\"6860912798809980930\")\n\t\t}\n\t}\n}\nThe server generated these startup warnings when booting:\n 2020-12-09T17:13:20.379+00:00: A featureCompatibilityVersion upgrade did not complete. To fix this, use the setFeatureCompatibilityVersion command to resume upgrade to 4.4\n 2020-12-09T17:13:20.379+00:00: currentfeatureCompatibilityVersion: upgrading to 4.4\ndb.adminCommand( { getParameter: 1, featureCompatibilityVersion: 1 } )\n{\n\t\"featureCompatibilityVersion\" : {\n\t\t\"version\" : \"4.2\",\n\t\t\"targetVersion\" : \"4.4\"\n\t},\n\t\"ok\" : 1,\n\t\"$gleStats\" : {\n\t\t\"lastOpTime\" : Timestamp(0, 0),\n\t\t\"electionId\" : ObjectId(\"7fffffff0000000000000049\")\n\t},\n\t\"lastCommittedOpTime\" : Timestamp(1607534584, 1),\n\t\"$clusterTime\" : {\n\t\t\"clusterTime\" : Timestamp(1607534584, 1),\n\t\t\"signature\" : {\n\t\t\t\"hash\" : BinData(0,\"MfD/ygGU2rnKml3T/d91iImtIdk=\"),\n\t\t\t\"keyId\" : NumberLong(\"6860912798809980930\")\n\t\t}\n\t},\n\t\"operationTime\" : Timestamp(1607534584, 1)\n}\n\"ConflictingOperationInProgress: Chunks cannot be split while a feature compatibility version upgrade or downgrade is in progress\"\n\"ctx\":\"initandlisten\",\"msg\":\"A featureCompatibilityVersion upgrade did not complete. To fix this, use the setFeatureCompatibilityVersion command to resume upgrade to 4.4\",\"attr\":{\"currentfeatureCompatibilityVersion\":\"upgrading to 4.4\"},\"tags\":[\"startupWarnings\"]}\n", "text": "Hi,Long time mongodb user, this is the first time I’m stuck on a weird issue…I have a cluster with 5 shards, as:I did upgrade from 4.2 to latest 4.4.2When setting theThe command returnI checked all nodes by hand and some are showing:andThe config node and shard-z0-0 are stuck in that state.shard-z0-1 and shard-z0-2 still show FCV set to 4.2the reports-z1-0 and reports-z1-1 show the correct FCV of 4.4.I tried to restart everything it doesnt help.Due to that all chunk splitting are stuck:I did spot that message on data nodes:How can I recover from that state?Thanks", "username": "Cyril_Peponnet" }, { "code": "{\"t\":{\"$date\":\"2020-12-09T20:59:13.160+00:00\"},\"s\":\"I\", \"c\":\"SH_REFR\", \"id\":24103, \"ctx\":\"ConfigServerCatalogCacheLoader-14\",\"msg\":\"Error refreshing cached collection\",\"attr\":{\"namespace\":\"config.system.sessions\",\"durationMillis\":1,\"error\":\"ConflictingOperationInProgress: No chunks were found for the collection\"}}\n{\"t\":{\"$date\":\"2020-12-09T20:59:13.162+00:00\"},\"s\":\"I\", \"c\":\"SH_REFR\", \"id\":24103, \"ctx\":\"ConfigServerCatalogCacheLoader-14\",\"msg\":\"Error refreshing cached collection\",\"attr\":{\"namespace\":\"config.system.sessions\",\"durationMillis\":1,\"error\":\"ConflictingOperationInProgress: No chunks were found for the collection\"}}\n{\"t\":{\"$date\":\"2020-12-09T20:59:13.164+00:00\"},\"s\":\"I\", \"c\":\"SH_REFR\", \"id\":24103, \"ctx\":\"ConfigServerCatalogCacheLoader-14\",\"msg\":\"Error refreshing cached collection\",\"attr\":{\"namespace\":\"config.system.sessions\",\"durationMillis\":1,\"error\":\"ConflictingOperationInProgress: No chunks were found for the collection\"}}\n", "text": "Adding a bit of logs from data node:", "username": "Cyril_Peponnet" } ]
MongoDB 4.4 update: setFeatureCompatibilityVersion: 4.4 is stuck due to ConflictingOperationInProgress
2020-12-09T19:22:48.927Z
MongoDB 4.4 update: setFeatureCompatibilityVersion: 4.4 is stuck due to ConflictingOperationInProgress
3,347