Provided by: libmongoc-doc_1.9.2+dfsg-1build1_all bug

NAME

       mongoc_guides - Guides

COMMON TASKS

       Drivers  for  some  other  languages  provide  helper functions to perform certain common tasks. In the C
       Driver we must explicitly build commands to send to the server.

       This snippet contains example code for the explain, copydb and cloneCollection commands.

   Setup
       First we'll write some code to insert sample data: doc-common-insert.c.INDENT 0.0

          /* Don't try to compile this file on its own. It's meant to be #included
             by example code */

          /* Insert some sample data */
          bool
          insert_data (mongoc_collection_t *collection)
          {
             mongoc_bulk_operation_t *bulk;
             enum N { ndocs = 4 };
             bson_t *docs[ndocs];
             bson_error_t error;
             int i = 0;
             bool ret;

             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, NULL);

             docs[0] = BCON_NEW ("x", BCON_DOUBLE (1.0), "tags", "[", "dog", "cat", "]");
             docs[1] = BCON_NEW ("x", BCON_DOUBLE (2.0), "tags", "[", "cat", "]");
             docs[2] = BCON_NEW (
                "x", BCON_DOUBLE (2.0), "tags", "[", "mouse", "cat", "dog", "]");
             docs[3] = BCON_NEW ("x", BCON_DOUBLE (3.0), "tags", "[", "]");

             for (i = 0; i < ndocs; i++) {
                mongoc_bulk_operation_insert (bulk, docs[i]);
                bson_destroy (docs[i]);
                docs[i] = NULL;
             }

             ret = mongoc_bulk_operation_execute (bulk, NULL, &error);

             if (!ret) {
                fprintf (stderr, "Error inserting data: %s\n", error.message);
             }

             mongoc_bulk_operation_destroy (bulk);
             return ret;
          }

          /* A helper which we'll use a lot later on */
          void
          print_res (const bson_t *reply)
          {
             BSON_ASSERT (reply);
             char *str = bson_as_canonical_extended_json (reply, NULL);
             printf ("%s\n", str);
             bson_free (str);
          }

   explain Command
       This is how to use the explain command in MongoDB 3.2+: explain.c.INDENT 0.0

          bool
          explain (mongoc_collection_t *collection)
          {
             bson_t *command;
             bson_t reply;
             bson_error_t error;
             bool res;

             command = BCON_NEW ("explain",
                                 "{",
                                 "find",
                                 BCON_UTF8 (COLLECTION_NAME),
                                 "filter",
                                 "{",
                                 "x",
                                 BCON_INT32 (1),
                                 "}",
                                 "}");
             res = mongoc_collection_command_simple (
                collection, command, NULL, &reply, &error);
             if (!res) {
                fprintf (stderr, "Error with explain: %s\n", error.message);
                goto cleanup;
             }

             /* Do something with the reply */
             print_res (&reply);

          cleanup:
             bson_destroy (&reply);
             bson_destroy (command);
             return res;
          }

   copydb Command
       This example requires two instances of mongo to be running.

       Here's how to use the copydb command to copy a database from another instance of MongoDB: copydb.c.INDENT
       0.0

          bool
          copydb (mongoc_client_t *client, const char *other_host_and_port)
          {
             mongoc_database_t *admindb;
             bson_t *command;
             bson_t reply;
             bson_error_t error;
             bool res;

             BSON_ASSERT (other_host_and_port);
             /* Must do this from the admin db */
             admindb = mongoc_client_get_database (client, "admin");

             command = BCON_NEW ("copydb",
                                 BCON_INT32 (1),
                                 "fromdb",
                                 BCON_UTF8 ("test"),
                                 "todb",
                                 BCON_UTF8 ("test2"),

                                 /* If you want from a different host */
                                 "fromhost",
                                 BCON_UTF8 (other_host_and_port));
             res =
                mongoc_database_command_simple (admindb, command, NULL, &reply, &error);
             if (!res) {
                fprintf (stderr, "Error with copydb: %s\n", error.message);
                goto cleanup;
             }

             /* Do something with the reply */
             print_res (&reply);

          cleanup:
             bson_destroy (&reply);
             bson_destroy (command);
             mongoc_database_destroy (admindb);

             return res;
          }

   cloneCollection Command
       This example requires two instances of mongo to be running.

       Here's an example of the cloneCollection command to clone a collection from another instance of  MongoDB:
       clone-collection.c.INDENT 0.0

          bool
          clone_collection (mongoc_database_t *database, const char *other_host_and_port)
          {
             bson_t *command;
             bson_t reply;
             bson_error_t error;
             bool res;

             BSON_ASSERT (other_host_and_port);
             command = BCON_NEW ("cloneCollection",
                                 BCON_UTF8 ("test.remoteThings"),
                                 "from",
                                 BCON_UTF8 (other_host_and_port),
                                 "query",
                                 "{",
                                 "x",
                                 BCON_INT32 (1),
                                 "}");
             res =
                mongoc_database_command_simple (database, command, NULL, &reply, &error);
             if (!res) {
                fprintf (stderr, "Error with clone: %s\n", error.message);
                goto cleanup;
             }

             /* Do something with the reply */
             print_res (&reply);

          cleanup:
             bson_destroy (&reply);
             bson_destroy (command);

             return res;
          }

   Running the Examples
       common-operations.c.INDENT 0.0

          /*
           * Copyright 2016 MongoDB, Inc.
           *
           * Licensed under the Apache License, Version 2.0 (the "License");
           * you may not use this file except in compliance with the License.
           * You may obtain a copy of the License at
           *
           *   http://www.apache.org/licenses/LICENSE-2.0
           *
           * Unless required by applicable law or agreed to in writing, software
           * distributed under the License is distributed on an "AS IS" BASIS,
           * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
           * See the License for the specific language governing permissions and
           * limitations under the License.
           */

          #include <mongoc.h>
          #include <stdio.h>

          const char *COLLECTION_NAME = "things";

          #include "../doc-common-insert.c"
          #include "explain.c"
          #include "copydb.c"
          #include "clone-collection.c"

          int
          main (int argc, char *argv[])
          {
             mongoc_database_t *database = NULL;
             mongoc_client_t *client = NULL;
             mongoc_collection_t *collection = NULL;
             char *host_and_port;
             int res = 0;
             char *other_host_and_port = NULL;

             if (argc < 2 || argc > 3) {
                fprintf (stderr,
                         "usage: %s MONGOD-1-CONNECTION-STRING "
                         "[MONGOD-2-HOST-NAME:MONGOD-2-PORT]\n",
                         argv[0]);
                fprintf (stderr,
                         "MONGOD-1-CONNECTION-STRING can be "
                         "of the following forms:\n");
                fprintf (stderr, "localhost\t\t\t\tlocal machine\n");
                fprintf (stderr, "localhost:27018\t\t\t\tlocal machine on port 27018\n");
                fprintf (stderr,
                         "mongodb://user:pass@localhost:27017\t"
                         "local machine on port 27017, and authenticate with username "
                         "user and password pass\n");
                return 1;
             }

             mongoc_init ();

             if (strncmp (argv[1], "mongodb://", 10) == 0) {
                host_and_port = bson_strdup (argv[1]);
             } else {
                host_and_port = bson_strdup_printf ("mongodb://%s", argv[1]);
             }
             other_host_and_port = argc > 2 ? argv[2] : NULL;

             client = mongoc_client_new (host_and_port);

             if (!client) {
                fprintf (stderr, "Invalid hostname or port: %s\n", host_and_port);
                res = 2;
                goto cleanup;
             }

             mongoc_client_set_error_api (client, 2);
             database = mongoc_client_get_database (client, "test");
             collection = mongoc_database_get_collection (database, COLLECTION_NAME);

             printf ("Inserting data\n");
             if (!insert_data (collection)) {
                res = 3;
                goto cleanup;
             }

             printf ("explain\n");
             if (!explain (collection)) {
                res = 4;
                goto cleanup;
             }

             if (other_host_and_port) {
                printf ("copydb\n");
                if (!copydb (client, other_host_and_port)) {
                   res = 5;
                   goto cleanup;
                }

                printf ("clone collection\n");
                if (!clone_collection (database, other_host_and_port)) {
                   res = 6;
                   goto cleanup;
                }
             }

          cleanup:
             if (collection) {
                mongoc_collection_destroy (collection);
             }

             if (database) {
                mongoc_database_destroy (database);
             }

             if (client) {
                mongoc_client_destroy (client);
             }

             bson_free (host_and_port);
             mongoc_cleanup ();
             return res;
          }
          $ mongod

          $ mkdir /tmp/db2$ mongod --dbpath /tmp/db2 --port 27018 # second instance

       Now compile and run the example program:

          $ cd examples/common_operations/$ gcc -Wall -o example common-operations.c $(pkg-config --cflags --libs libmongoc-1.0)$ ./example localhost:27017 localhost:27018
          Inserting data
          explain
          {
             "executionStats" : {
                "allPlansExecution" : [],
                "executionStages" : {
                   "advanced" : 19,
                   "direction" : "forward" ,
                   "docsExamined" : 76,
                   "executionTimeMillisEstimate" : 0,
                   "filter" : {
                      "x" : {
                         "$eq" : 1
                      }
                   },
                   "invalidates" : 0,
                   "isEOF" : 1,
                   "nReturned" : 19,
                   "needTime" : 58,
                   "needYield" : 0,
                   "restoreState" : 0,
                   "saveState" : 0,
                   "stage" : "COLLSCAN" ,
                   "works" : 78
                },
                "executionSuccess" : true,
                "executionTimeMillis" : 0,
                "nReturned" : 19,
                "totalDocsExamined" : 76,
                "totalKeysExamined" : 0
             },
             "ok" : 1,
             "queryPlanner" : {
                "indexFilterSet" : false,
                "namespace" : "test.things",
                "parsedQuery" : {
                   "x" : {
                      "$eq" : 1
                   }
                },
                "plannerVersion" : 1,
                "rejectedPlans" : [],
                "winningPlan" : {
                   "direction" : "forward" ,
                   "filter" : {
                      "x" : {
                         "$eq" : 1
                      }
                   },
                   "stage" : "COLLSCAN"
                }
             },
             "serverInfo" : {
                "gitVersion" : "05552b562c7a0b3143a729aaa0838e558dc49b25" ,
                "host" : "MacBook-Pro-57.local",
                "port" : 27017,
                "version" : "3.2.6"
             }
          }
          copydb
          { "ok" : 1 }
          clone collection
          { "ok" : 1 }

ADVANCED CONNECTIONS

       The following guide contains information specific to certain types of MongoDB configurations.

       For  an  example of connecting to a simple standalone server, see the Tutorial. To establish a connection
       with authentication options enabled, see the Authentication page.

   Connecting to a Replica Set
       Connecting to a replica set is much like connecting to a standalone MongoDB server.  Simply  specify  the
       replica set name using the ?replicaSet=myreplset URI option.

          #include <bson.h>
          #include <mongoc.h>

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;

             mongoc_init ();

             /* Create our MongoDB Client */
             client = mongoc_client_new (
                "mongodb://host01:27017,host02:27017,host03:27017/?replicaSet=myreplset");

             /* Do some work */
             /* TODO */

             /* Clean up */
             mongoc_client_destroy (client);
             mongoc_cleanup ();

             return 0;
          }

       TIP:
          Multiple  hostnames  can  be  specified  in the MongoDB connection string URI, with a comma separating
          hosts in the seed list.

          It is recommended to use a seed list of members of the replica set to allow the driver to  connect  to
          any node.

   Connecting to a Sharded Cluster
       To connect to a sharded cluster, specify the mongos nodes the client should connect to. The C Driver will
       automatically detect that it has connected to a mongos sharding server.

       If  more  than  one  hostname  is  specified, a seed list will be created to attempt failover between the
       mongos instances.

       WARNING:
          Specifying the replicaSet parameter when connecting to a mongos sharding server is invalid.

          #include <bson.h>
          #include <mongoc.h>

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;

             mongoc_init ();

             /* Create our MongoDB Client */
             client = mongoc_client_new ("mongodb://myshard01:27017/");

             /* Do something with client ... */

             /* Free the client */
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }

   Connecting to an IPv6 Address
       The MongoDB C Driver will automatically resolve IPv6 addresses from host names. However,  to  specify  an
       IPv6 address directly, wrap the address in [].

          mongoc_uri_t *uri = mongoc_uri_new ("mongodb://[::1]:27017");

   Connecting to a UNIX Domain Socket
       On  UNIX-like  systems, the C Driver can connect directly to a MongoDB server using a UNIX domain socket.
       Pass the URL-encoded path to the socket, which must be suffixed with .sock. For example, to connect to  a
       domain socket at /tmp/mongodb-27017.sock:

          mongoc_uri_t *uri = mongoc_uri_new ("mongodb://%2Ftmp%2Fmongodb-27017.sock");

       Include username and password like so:

          mongoc_uri_t *uri = mongoc_uri_new ("mongodb://user:pass@%2Ftmp%2Fmongodb-27017.sock");

   Connecting to a server over SSL
       These are instructions for configuring TLS/SSL connections.

       To run a server locally (on port 27017, for example):

          $ mongod --port 27017 --sslMode requireSSL --sslPEMKeyFile server.pem --sslCAFile ca.pem

       Add /?ssl=true to the end of a client URI.

          mongoc_client_t *client = NULL;
          client = mongoc_client_new ("mongodb://localhost:27017/?ssl=true");

       MongoDB  requires  client certificates by default, unless the --sslAllowConnectionsWithoutCertificates is
       provided. The C Driver can be configured to present a client certificate using a mongoc_ssl_opt_t:

          const mongoc_ssl_opt_t *ssl_default = mongoc_ssl_opt_get_default ();
          mongoc_ssl_opt_t ssl_opts = { 0 };

          /* optionally copy in a custom trust directory or file; otherwise the default is used. */
          memcpy (&ssl_opts, ssl_default, sizeof ssl_opts);
          ssl_opts.pem_file = "client.pem"

          mongoc_client_set_ssl_opts (client, &ssl_opts);

       The client certificate provided by pem_file must be issued by  one  of  the  server  trusted  Certificate
       Authorities  listed  in --sslCAFile, or issued by a CA in the native certificate store on the server when
       omitted.

       To verify the server certificate against a specific CA, provide a PEM armored file with a CA certificate,
       or concatenated list of CA certificates using the ca_file option, or c_rehash directory structure of CAs,
       pointed to using the ca_dir option. When no ca_file or ca_dir  is  provided,  the  driver  will  use  CAs
       provided by the native platform certificate store.

       See mongoc_ssl_opt_t for more information on the various SSL related options.

   Compressing data to and from MongoDB
       MongoDB 3.4 added Snappy compression support, and zlib compression in 3.6.  To enable compression support
       the client must be configured with which compressors to use:

          mongoc_client_t *client = NULL;
          client = mongoc_client_new ("mongodb://localhost:27017/?compressors=snappy,zlib");

       The  compressors option specifies the priority order of compressors the client wants to use. Messages are
       compressed if the client and server share any compressors in common.

       Note that the compressor used by the server might not be the same compressor as  the  client  used.   For
       example,  if  the  client  uses  the  connection  string compressors=zlib,snappy the client will use zlib
       compression to send data (if possible), but the server might still reply using snappy, depending  on  how
       the server was configured.

       The  driver  must be built with zlib and/or snappy support to enable compression support, any unknown (or
       not compiled in) compressor value will be ignored.

   Additional Connection Options
       The full list of connection options can be found in the mongoc_uri_t docs.

       Certain socket/connection related options are not configurable:
                      ─────────────────────────────────────────────────────────────────────────
                        Option          Description                    Value
                      ─────────────────────────────────────────────────────────────────────────
                        SO_KEEPALIVE    TCP Keep Alive                 Enabled
                      ─────────────────────────────────────────────────────────────────────────
                        TCP_KEEPIDLE    How long a connection  needs   300 seconds
                                        to  remain  idle  before TCP
                                        starts   sending   keepalive
                                        probes
                      ─────────────────────────────────────────────────────────────────────────
                        TCP_KEEPINTVL   The  time in seconds between   10 seconds
                                        TCP probes
                      ─────────────────────────────────────────────────────────────────────────
                        TCP_KEEPCNT     How  many  probes  to  send,   9 probes
                                        without     acknowledgement,
                                        before     dropping      the
                                        connection
                      ─────────────────────────────────────────────────────────────────────────
                        TCP_NODELAY     Send   packets  as  soon  as   Enabled (no buffering)
                                        possible  or  buffer   small
                                        packets (Nagle algorithm)
                      ┌───────────────┬──────────────────────────────┬────────────────────────┐
                      │               │                              │                        │
CONNECTION POOLING    │               │                              │                        │
--

CURSORS

   Handling Cursor Failures
       Cursors  exist  on  a  MongoDB  server.  However, the mongoc_cursor_t structure gives the local process a
       handle to the cursor. It is possible for errors to occur on the server while iterating a  cursor  on  the
       client.  Even  a  network  partition may occur. This means that applications should be robust in handling
       cursor failures.

       While iterating cursors, you should check to see if an error has occurred. See the following example  for
       how to robustly check for errors.

          static void
          print_all_documents (mongoc_collection_t *collection)
          {
             mongoc_cursor_t *cursor;
             const bson_t *doc;
             bson_error_t error;
             bson_t query = BSON_INITIALIZER;
             char *str;

             cursor = mongoc_collection_find_with_opts (collection, query, NULL, NULL);

             while (mongoc_cursor_next (cursor, &doc)) {
                str = bson_as_canonical_extended_json (doc, NULL);
                printf ("%s\n", str);
                bson_free (str);
             }

             if (mongoc_cursor_error (cursor, &error)) {
                fprintf (stderr, "Failed to iterate all documents: %s\n", error.message);
             }

             mongoc_cursor_destroy (cursor);
          }

   Destroying Server-Side Cursors
       The  MongoDB  C  driver  will  automatically destroy a server-side cursor when mongoc_cursor_destroy() is
       called. Failure to call this function when done with a cursor will leak memory client  side  as  well  as
       consume  extra memory server side. If the cursor was configured to never timeout, it will become a memory
       leak on the server.

   Tailable Cursors
       Tailable cursors are cursors that remain open even after they've returned a final result.  This  way,  if
       more  documents  are  added  to a collection (i.e., to the cursor's result set), then you can continue to
       call mongoc_cursor_next() to retrieve those additional results.

       Here's a complete test case that demonstrates the use of tailable cursors.

       NOTE:
          Tailable cursors are for capped collections only.

       An example to tail the oplog from a replica set.  mongoc-tail.c.INDENT 0.0

          #include <bson.h>
          #include <mongoc.h>
          #include <stdio.h>
          #include <stdlib.h>

          #ifdef _WIN32
          #define sleep(_n) Sleep ((_n) *1000)
          #endif

          static void
          print_bson (const bson_t *b)
          {
             char *str;

             str = bson_as_canonical_extended_json (b, NULL);
             fprintf (stdout, "%s\n", str);
             bson_free (str);
          }

          static mongoc_cursor_t *
          query_collection (mongoc_collection_t *collection, uint32_t last_time)
          {
             mongoc_cursor_t *cursor;
             bson_t query;
             bson_t gt;
             bson_t opts;

             BSON_ASSERT (collection);

             bson_init (&query);
             BSON_APPEND_DOCUMENT_BEGIN (&query, "ts", &gt);
             BSON_APPEND_TIMESTAMP (&gt, "$gt", last_time, 0);
             bson_append_document_end (&query, &gt);

             bson_init (&opts);
             BSON_APPEND_BOOL (&opts, "tailable", true);
             BSON_APPEND_BOOL (&opts, "awaitData", true);

             cursor = mongoc_collection_find_with_opts (collection, &query, &opts, NULL);

             bson_destroy (&query);
             bson_destroy (&opts);

             return cursor;
          }

          static void
          tail_collection (mongoc_collection_t *collection)
          {
             mongoc_cursor_t *cursor;
             uint32_t last_time;
             const bson_t *doc;
             bson_error_t error;
             bson_iter_t iter;

             BSON_ASSERT (collection);

             last_time = (uint32_t) time (NULL);

             while (true) {
                cursor = query_collection (collection, last_time);
                while (!mongoc_cursor_error (cursor, &error) &&
                       mongoc_cursor_more (cursor)) {
                   if (mongoc_cursor_next (cursor, &doc)) {
                      if (bson_iter_init_find (&iter, doc, "ts") &&
                          BSON_ITER_HOLDS_TIMESTAMP (&iter)) {
                         bson_iter_timestamp (&iter, &last_time, NULL);
                      }
                      print_bson (doc);
                   }
                }
                if (mongoc_cursor_error (cursor, &error)) {
                   if (error.domain == MONGOC_ERROR_SERVER) {
                      fprintf (stderr, "%s\n", error.message);
                      exit (1);
                   }
                }

                mongoc_cursor_destroy (cursor);
                sleep (1);
             }
          }

          int
          main (int argc, char *argv[])
          {
             mongoc_collection_t *collection;
             mongoc_client_t *client;

             if (argc != 2) {
                fprintf (stderr, "usage: %s MONGO_URI\n", argv[0]);
                return EXIT_FAILURE;
             }

             mongoc_init ();

             client = mongoc_client_new (argv[1]);
             if (!client) {
                fprintf (stderr, "Invalid URI: \"%s\"\n", argv[1]);
                return EXIT_FAILURE;
             }

             mongoc_client_set_error_api (client, 2);

             collection = mongoc_client_get_collection (client, "local", "oplog.rs");

             tail_collection (collection);

             mongoc_collection_destroy (collection);
             mongoc_client_destroy (client);

             return 0;
          }
          $ gcc -Wall -o mongoc-tail mongoc-tail.c $(pkg-config --cflags --libs libmongoc-1.0)
          $ ./mongoc-tail mongodb://example.com/?replicaSet=myReplSet
          {
              "h" : -8458503739429355503,
              "ns" : "test.test",
              "o" : {
                  "_id" : {
                      "$oid" : "5372ab0a25164be923d10d50"
                  }
              },
              "op" : "i",
              "ts" : {
                  "$timestamp" : {
                      "i" : 1,
                      "t" : 1400023818
                  }
              },
              "v" : 2
          }

       The line of output is a sample from performing db.test.insert({}) from the mongo  shell  on  the  replica
       set.

       See also mongoc_cursor_set_max_await_time_ms.

BULK WRITE OPERATIONS

       This tutorial explains how to take advantage of MongoDB C driver bulk write operation features. Executing
       write operations in batches reduces the number of network round trips, increasing write throughput.

   Bulk Insert
       First we need to fetch a bulk operation handle from the mongoc_collection_t.

          mongoc_bulk_operation_t *bulk =
             mongoc_collection_create_bulk_operation_with_opts (collection, NULL);

       We  can  now start inserting documents to the bulk operation. These will be buffered until we execute the
       operation.

       The  bulk  operation  will  coalesce  insertions  as  a  single  batch  for  each  consecutive  call   to
       mongoc_bulk_operation_insert(). This creates a pipelined effect when possible.

       To   execute  the  bulk  operation  and  receive  the  result  we  call  mongoc_bulk_operation_execute().
       bulk1.c.INDENT 0.0

          #include <assert.h>
          #include <bcon.h>
          #include <mongoc.h>
          #include <stdio.h>

          static void
          bulk1 (mongoc_collection_t *collection)
          {
             mongoc_bulk_operation_t *bulk;
             bson_error_t error;
             bson_t *doc;
             bson_t reply;
             char *str;
             bool ret;
             int i;

             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, NULL);

             for (i = 0; i < 10000; i++) {
                doc = BCON_NEW ("i", BCON_INT32 (i));
                mongoc_bulk_operation_insert (bulk, doc);
                bson_destroy (doc);
             }

             ret = mongoc_bulk_operation_execute (bulk, &reply, &error);

             str = bson_as_canonical_extended_json (&reply, NULL);
             printf ("%s\n", str);
             bson_free (str);

             if (!ret) {
                fprintf (stderr, "Error: %s\n", error.message);
             }

             bson_destroy (&reply);
             mongoc_bulk_operation_destroy (bulk);
          }

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;
             mongoc_collection_t *collection;

             mongoc_init ();

             client = mongoc_client_new ("mongodb://localhost/?appname=bulk1-example");
             mongoc_client_set_error_api (client, 2);
             collection = mongoc_client_get_collection (client, "test", "test");

             bulk1 (collection);

             mongoc_collection_destroy (collection);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }

Example reply document:

          {"nInserted"   : 10000,
           "nMatched"    : 0,
           "nModified"   : 0,
           "nRemoved"    : 0,
           "nUpserted"   : 0,
           "writeErrors" : []
           "writeConcernErrors" : [] }

   Mixed Bulk Write Operations
       MongoDB C driver also supports executing mixed bulk write operations. A  batch  of  insert,  update,  and
       remove operations can be executed together using the bulk write operations API.

   Ordered Bulk Write Operations
       Ordered  bulk  write  operations  are  batched  and  sent  to the server in the order provided for serial
       execution. The reply document describes the type and count of operations performed.  bulk2.c.INDENT 0.0

          #include <assert.h>
          #include <bcon.h>
          #include <mongoc.h>
          #include <stdio.h>

          static void
          bulk2 (mongoc_collection_t *collection)
          {
             mongoc_bulk_operation_t *bulk;
             bson_error_t error;
             bson_t *query;
             bson_t *doc;
             bson_t *opts;
             bson_t reply;
             char *str;
             bool ret;
             int i;

             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, NULL);

             /* Remove everything */
             query = bson_new ();
             mongoc_bulk_operation_remove (bulk, query);
             bson_destroy (query);

             /* Add a few documents */
             for (i = 1; i < 4; i++) {
                doc = BCON_NEW ("_id", BCON_INT32 (i));
                mongoc_bulk_operation_insert (bulk, doc);
                bson_destroy (doc);
             }

             /* {_id: 1} => {$set: {foo: "bar"}} */
             query = BCON_NEW ("_id", BCON_INT32 (1));
             doc = BCON_NEW ("$set", "{", "foo", BCON_UTF8 ("bar"), "}");
             mongoc_bulk_operation_update_many_with_opts (bulk, query, doc, NULL, &error);
             bson_destroy (query);
             bson_destroy (doc);

             /* {_id: 4} => {'$inc': {'j': 1}} (upsert) */
             opts = BCON_NEW ("upsert", BCON_BOOL (true));
             query = BCON_NEW ("_id", BCON_INT32 (4));
             doc = BCON_NEW ("$inc", "{", "j", BCON_INT32 (1), "}");
             mongoc_bulk_operation_update_many_with_opts (bulk, query, doc, opts, &error);
             bson_destroy (query);
             bson_destroy (doc);
             bson_destroy (opts);

             /* replace {j:1} with {j:2} */
             query = BCON_NEW ("j", BCON_INT32 (1));
             doc = BCON_NEW ("j", BCON_INT32 (2));
             mongoc_bulk_operation_replace_one_with_opts (bulk, query, doc, NULL, &error);
             bson_destroy (query);
             bson_destroy (doc);

             ret = mongoc_bulk_operation_execute (bulk, &reply, &error);

             str = bson_as_canonical_extended_json (&reply, NULL);
             printf ("%s\n", str);
             bson_free (str);

             if (!ret) {
                printf ("Error: %s\n", error.message);
             }

             bson_destroy (&reply);
             mongoc_bulk_operation_destroy (bulk);
          }

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;
             mongoc_collection_t *collection;

             mongoc_init ();

             client = mongoc_client_new ("mongodb://localhost/?appname=bulk2-example");
             mongoc_client_set_error_api (client, 2);
             collection = mongoc_client_get_collection (client, "test", "test");

             bulk2 (collection);

             mongoc_collection_destroy (collection);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }

Example reply document:

          { "nInserted"   : 3,
            "nMatched"    : 2,
            "nModified"   : 2,
            "nRemoved"    : 10000,
            "nUpserted"   : 1,
            "upserted"    : [{"index" : 5, "_id" : 4}],
            "writeErrors" : []
            "writeConcernErrors" : [] }

       The index field in the upserted array is the 0-based index of the upsert operation; in this example,  the
       sixth operation of the overall bulk operation was an upsert, so its index is 5.

   Unordered Bulk Write Operations
       Unordered  bulk  write operations are batched and sent to the server in arbitrary order where they may be
       executed in parallel. Any errors that occur are reported after all operations are attempted.

       In the next example the first and third operations fail due to the unique constraint on _id. Since we are
       doing unordered execution the second and fourth operations succeed.  bulk3.c.INDENT 0.0

          #include <assert.h>
          #include <bcon.h>
          #include <mongoc.h>
          #include <stdio.h>

          static void
          bulk3 (mongoc_collection_t *collection)
          {
             bson_t opts = BSON_INITIALIZER;
             mongoc_bulk_operation_t *bulk;
             bson_error_t error;
             bson_t *query;
             bson_t *doc;
             bson_t reply;
             char *str;
             bool ret;

             /* false indicates unordered */
             BSON_APPEND_BOOL (&opts, "ordered", false);
             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, &opts);
             bson_destroy (&opts);

             /* Add a document */
             doc = BCON_NEW ("_id", BCON_INT32 (1));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             /* remove {_id: 2} */
             query = BCON_NEW ("_id", BCON_INT32 (2));
             mongoc_bulk_operation_remove_one (bulk, query);
             bson_destroy (query);

             /* insert {_id: 3} */
             doc = BCON_NEW ("_id", BCON_INT32 (3));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             /* replace {_id:4} {'i': 1} */
             query = BCON_NEW ("_id", BCON_INT32 (4));
             doc = BCON_NEW ("i", BCON_INT32 (1));
             mongoc_bulk_operation_replace_one (bulk, query, doc, false);
             bson_destroy (query);
             bson_destroy (doc);

             ret = mongoc_bulk_operation_execute (bulk, &reply, &error);

             str = bson_as_canonical_extended_json (&reply, NULL);
             printf ("%s\n", str);
             bson_free (str);

             if (!ret) {
                printf ("Error: %s\n", error.message);
             }

             bson_destroy (&reply);
             mongoc_bulk_operation_destroy (bulk);
             bson_destroy (&opts);
          }

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;
             mongoc_collection_t *collection;

             mongoc_init ();

             client = mongoc_client_new ("mongodb://localhost/?appname=bulk3-example");
             mongoc_client_set_error_api (client, 2);
             collection = mongoc_client_get_collection (client, "test", "test");

             bulk3 (collection);

             mongoc_collection_destroy (collection);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }

Example reply document:

          { "nInserted"    : 0,
            "nMatched"     : 1,
            "nModified"    : 1,
            "nRemoved"     : 1,
            "nUpserted"    : 0,
            "writeErrors"  : [
              { "index"  : 0,
                "code"   : 11000,
                "errmsg" : "E11000 duplicate key error index: test.test.$_id_ dup key: { : 1 }" },
              { "index"  : 2,
                "code"   : 11000,
                "errmsg" : "E11000 duplicate key error index: test.test.$_id_ dup key: { : 3 }" } ],
            "writeConcernErrors" : [] }

          Error: E11000 duplicate key error index: test.test.$_id_ dup key: { : 1 }

       The bson_error_t domain is MONGOC_ERROR_COMMAND and its code is 11000.

   Bulk Operation Bypassing Document Validation
       This feature is only available when using MongoDB 3.2 and later.

       By default bulk operations are validated against the schema, if any is defined. In certain cases  however
       it may be necessary to bypass the document validation.  bulk5.c.INDENT 0.0

          #include <assert.h>
          #include <bcon.h>
          #include <mongoc.h>
          #include <stdio.h>

          static void
          bulk5_fail (mongoc_collection_t *collection)
          {
             mongoc_bulk_operation_t *bulk;
             bson_error_t error;
             bson_t *doc;
             bson_t reply;
             char *str;
             bool ret;

             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, NULL);

             /* Two inserts */
             doc = BCON_NEW ("_id", BCON_INT32 (31));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             doc = BCON_NEW ("_id", BCON_INT32 (32));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             /* The above documents do not comply to the schema validation rules
              * we created previously, so this will result in an error */
             ret = mongoc_bulk_operation_execute (bulk, &reply, &error);

             str = bson_as_canonical_extended_json (&reply, NULL);
             printf ("%s\n", str);
             bson_free (str);

             if (!ret) {
                printf ("Error: %s\n", error.message);
             }

             bson_destroy (&reply);
             mongoc_bulk_operation_destroy (bulk);
          }

          static void
          bulk5_success (mongoc_collection_t *collection)
          {
             mongoc_bulk_operation_t *bulk;
             bson_error_t error;
             bson_t *doc;
             bson_t reply;
             char *str;
             bool ret;

             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, NULL);

             /* Allow this document to bypass document validation.
              * NOTE: When authentication is enabled, the authenticated user must have
              * either the "dbadmin" or "restore" roles to bypass document validation */
             mongoc_bulk_operation_set_bypass_document_validation (bulk, true);

             /* Two inserts */
             doc = BCON_NEW ("_id", BCON_INT32 (31));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             doc = BCON_NEW ("_id", BCON_INT32 (32));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             ret = mongoc_bulk_operation_execute (bulk, &reply, &error);

             str = bson_as_canonical_extended_json (&reply, NULL);
             printf ("%s\n", str);
             bson_free (str);

             if (!ret) {
                printf ("Error: %s\n", error.message);
             }

             bson_destroy (&reply);
             mongoc_bulk_operation_destroy (bulk);
          }

          int
          main (int argc, char *argv[])
          {
             bson_t *options;
             bson_error_t error;
             mongoc_client_t *client;
             mongoc_collection_t *collection;
             mongoc_database_t *database;

             mongoc_init ();

             client = mongoc_client_new ("mongodb://localhost/?appname=bulk5-example");
             mongoc_client_set_error_api (client, 2);
             database = mongoc_client_get_database (client, "testasdf");

             /* Create schema validator */
             options = BCON_NEW (
                "validator", "{", "number", "{", "$gte", BCON_INT32 (5), "}", "}");
             collection =
                mongoc_database_create_collection (database, "collname", options, &error);

             if (collection) {
                bulk5_fail (collection);
                bulk5_success (collection);
                mongoc_collection_destroy (collection);
             } else {
                fprintf (stderr, "Couldn't create collection: '%s'\n", error.message);
             }

             bson_free (options);
             mongoc_database_destroy (database);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }
          { "nInserted" : 0,
            "nMatched" : 0,
            "nModified" : 0,
            "nRemoved" : 0,
            "nUpserted" : 0,
            "writeErrors" : [
              { "index" : 0,
                "code" : 121,
                "errmsg" : "Document failed validation" } ] }

          Error: Document failed validation

          { "nInserted" : 2,
            "nMatched" : 0,
            "nModified" : 0,
            "nRemoved" : 0,
            "nUpserted" : 0,
            "writeErrors" : [] }

       The bson_error_t domain is MONGOC_ERROR_COMMAND.

   Bulk Operation Write Concerns
       By  default  bulk  operations  are  executed  with  the write_concern of the collection they are executed
       against. A custom write concern can be passed to the  mongoc_collection_create_bulk_operation_with_opts()
       method.  Write  concern  errors  (e.g.  wtimeout)  will  be  reported after all operations are attempted,
       regardless of execution order.  bulk4.c.INDENT 0.0

          #include <assert.h>
          #include <bcon.h>
          #include <mongoc.h>
          #include <stdio.h>

          static void
          bulk4 (mongoc_collection_t *collection)
          {
             bson_t opts = BSON_INITIALIZER;
             mongoc_write_concern_t *wc;
             mongoc_bulk_operation_t *bulk;
             bson_error_t error;
             bson_t *doc;
             bson_t reply;
             char *str;
             bool ret;

             wc = mongoc_write_concern_new ();
             mongoc_write_concern_set_w (wc, 4);
             mongoc_write_concern_set_wtimeout (wc, 100); /* milliseconds */
             mongoc_write_concern_append (wc, &opts);

             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, &opts);

             /* Two inserts */
             doc = BCON_NEW ("_id", BCON_INT32 (10));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             doc = BCON_NEW ("_id", BCON_INT32 (11));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             ret = mongoc_bulk_operation_execute (bulk, &reply, &error);

             str = bson_as_canonical_extended_json (&reply, NULL);
             printf ("%s\n", str);
             bson_free (str);

             if (!ret) {
                printf ("Error: %s\n", error.message);
             }

             bson_destroy (&reply);
             mongoc_bulk_operation_destroy (bulk);
             mongoc_write_concern_destroy (wc);
             bson_destroy (&opts);
          }

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;
             mongoc_collection_t *collection;

             mongoc_init ();

             client = mongoc_client_new ("mongodb://localhost/?appname=bulk4-example");
             mongoc_client_set_error_api (client, 2);
             collection = mongoc_client_get_collection (client, "test", "test");

             bulk4 (collection);

             mongoc_collection_destroy (collection);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }

Example reply document and error message:

          { "nInserted"    : 2,
            "nMatched"     : 0,
            "nModified"    : 0,
            "nRemoved"     : 0,
            "nUpserted"    : 0,
            "writeErrors"  : [],
            "writeConcernErrors" : [
              { "code"   : 64,
                "errmsg" : "waiting for replication timed out" }
          ] }

          Error: waiting for replication timed out

       The bson_error_t domain is MONGOC_ERROR_WRITE_CONCERN if there are write  concern  errors  and  no  write
       errors. Write errors indicate failed operations, so they take precedence over write concern errors, which
       mean merely that the write concern is not satisfied yet.

   Setting Collation Order
       This feature is only available when using MongoDB 3.4 and later.  bulk-collation.c.INDENT 0.0

          #include <bcon.h>
          #include <mongoc.h>
          #include <stdio.h>

          static void
          bulk_collation (mongoc_collection_t *collection)
          {
             mongoc_bulk_operation_t *bulk;
             bson_t *opts;
             bson_t *doc;
             bson_t *selector;
             bson_t *update;
             bson_error_t error;
             bson_t reply;
             char *str;
             uint32_t ret;

             /* insert {_id: "one"} and {_id: "One"} */
             bulk = mongoc_collection_create_bulk_operation_with_opts (
                collection, NULL);
             doc = BCON_NEW ("_id", BCON_UTF8 ("one"));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             doc = BCON_NEW ("_id", BCON_UTF8 ("One"));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             /* "One" normally sorts before "one"; make "one" come first */
             opts = BCON_NEW ("collation",
                              "{",
                              "locale",
                              BCON_UTF8 ("en_US"),
                              "caseFirst",
                              BCON_UTF8 ("lower"),
                              "}");

             /* set x=1 on the document with _id "One", which now sorts after "one" */
             update = BCON_NEW ("$set", "{", "x", BCON_INT64 (1), "}");
             selector = BCON_NEW ("_id", "{", "$gt", BCON_UTF8 ("one"), "}");
             mongoc_bulk_operation_update_one_with_opts (
                bulk, selector, update, opts, &error);

             ret = mongoc_bulk_operation_execute (bulk, &reply, &error);

             str = bson_as_canonical_extended_json (&reply, NULL);
             printf ("%s\n", str);
             bson_free (str);

             if (!ret) {
                printf ("Error: %s\n", error.message);
             }

             bson_destroy (&reply);
             bson_destroy (update);
             bson_destroy (selector);
             bson_destroy (opts);
             mongoc_bulk_operation_destroy (bulk);
          }

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;
             mongoc_collection_t *collection;

             mongoc_init ();

             client = mongoc_client_new ("mongodb://localhost/?appname=bulk-collation");
             mongoc_client_set_error_api (client, 2);
             collection = mongoc_client_get_collection (client, "db", "collection");
             bulk_collation (collection);

             mongoc_collection_destroy (collection);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }
          { "nInserted" : 2,
             "nMatched" : 1,
             "nModified" : 1,
             "nRemoved" : 0,
             "nUpserted" : 0,
             "writeErrors" : [  ]
          }

   Unacknowledged Bulk Writes
       Set  "w"  to  zero  for  an unacknowledged write. The driver sends unacknowledged writes using the legacy
       opcodes OP_INSERT, OP_UPDATE, and OP_DELETE.  bulk6.c.INDENT 0.0

          #include <bcon.h>
          #include <mongoc.h>
          #include <stdio.h>

          static void
          bulk6 (mongoc_collection_t *collection)
          {
             bson_t opts = BSON_INITIALIZER;
             mongoc_write_concern_t *wc;
             mongoc_bulk_operation_t *bulk;
             bson_error_t error;
             bson_t *doc;
             bson_t *selector;
             bson_t reply;
             char *str;
             bool ret;

             wc = mongoc_write_concern_new ();
             mongoc_write_concern_set_w (wc, 0);
             mongoc_write_concern_append (wc, &opts);

             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, &opts);

             doc = BCON_NEW ("_id", BCON_INT32 (10));
             mongoc_bulk_operation_insert (bulk, doc);
             bson_destroy (doc);

             selector = BCON_NEW ("_id", BCON_INT32 (11));
             mongoc_bulk_operation_remove_one (bulk, selector);
             bson_destroy (selector);

             ret = mongoc_bulk_operation_execute (bulk, &reply, &error);

             str = bson_as_canonical_extended_json (&reply, NULL);
             printf ("%s\n", str);
             bson_free (str);

             if (!ret) {
                printf ("Error: %s\n", error.message);
             }

             bson_destroy (&reply);
             mongoc_bulk_operation_destroy (bulk);
             mongoc_write_concern_destroy (wc);
             bson_destroy (&opts);
          }

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;
             mongoc_collection_t *collection;

             mongoc_init ();

             client = mongoc_client_new ("mongodb://localhost/?appname=bulk6-example");
             mongoc_client_set_error_api (client, 2);
             collection = mongoc_client_get_collection (client, "test", "test");

             bulk6 (collection);

             mongoc_collection_destroy (collection);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }

The reply document is empty:

          { }

   Further Reading
       See the Driver Bulk API Spec, which describes bulk write operations for all MongoDB drivers.

AGGREGATION FRAMEWORK EXAMPLES

       This document provides a number of practical examples that display the capabilities  of  the  aggregation
       framework.

       The Aggregations using the Zip Codes Data Set examples uses a publicly available data set of all zipcodes
       and populations in the United States. These data are available at: zips.json.

   Requirements
       Let's check if everything is installed.

       Use the following command to load zips.json data set into mongod instance:

          $ mongoimport --drop -d test -c zipcodes zips.json

       Let's use the MongoDB shell to verify that everything was imported successfully.

          $ mongo test
          MongoDB shell version: 2.6.1
          connecting to: test
          > db.zipcodes.count()
          29467
          > db.zipcodes.findOne()
          {
                "_id" : "35004",
                "city" : "ACMAR",
                "loc" : [
                        -86.51557,
                        33.584132
                ],
                "pop" : 6055,
                "state" : "AL"
          }

   Aggregations using the Zip Codes Data Set
       Each document in this collection has the following form:

          {
            "_id" : "35004",
            "city" : "Acmar",
            "state" : "AL",
            "pop" : 6055,
            "loc" : [-86.51557, 33.584132]
          }

       In these documents:

       • The _id field holds the zipcode as a string.

       • The city field holds the city name.

       • The state field holds the two letter state abbreviation.

       • The pop field holds the population.

       • The loc field holds the location as a [latitude, longitude] array.

   States with Populations Over 10 Million
       To  get  all  states  with  a population greater than 10 million, use the following aggregation pipeline:
       aggregation1.c.INDENT 0.0

          #include <mongoc.h>
          #include <stdio.h>

          static void
          print_pipeline (mongoc_collection_t *collection)
          {
             mongoc_cursor_t *cursor;
             bson_error_t error;
             const bson_t *doc;
             bson_t *pipeline;
             char *str;

             pipeline = BCON_NEW ("pipeline",
                                  "[",
                                  "{",
                                  "$group",
                                  "{",
                                  "_id",
                                  "$state",
                                  "total_pop",
                                  "{",
                                  "$sum",
                                  "$pop",
                                  "}",
                                  "}",
                                  "}",
                                  "{",
                                  "$match",
                                  "{",
                                  "total_pop",
                                  "{",
                                  "$gte",
                                  BCON_INT32 (10000000),
                                  "}",
                                  "}",
                                  "}",
                                  "]");

             cursor = mongoc_collection_aggregate (
                collection, MONGOC_QUERY_NONE, pipeline, NULL, NULL);

             while (mongoc_cursor_next (cursor, &doc)) {
                str = bson_as_canonical_extended_json (doc, NULL);
                printf ("%s\n", str);
                bson_free (str);
             }

             if (mongoc_cursor_error (cursor, &error)) {
                fprintf (stderr, "Cursor Failure: %s\n", error.message);
             }

             mongoc_cursor_destroy (cursor);
             bson_destroy (pipeline);
          }

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;
             mongoc_collection_t *collection;

             mongoc_init ();

             client = mongoc_client_new (
                "mongodb://localhost:27017?appname=aggregation-example");
             mongoc_client_set_error_api (client, 2);
             collection = mongoc_client_get_collection (client, "test", "zipcodes");

             print_pipeline (collection);

             mongoc_collection_destroy (collection);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return 0;
          }
          { "_id" : "PA", "total_pop" : 11881643 }
          { "_id" : "OH", "total_pop" : 10847115 }
          { "_id" : "NY", "total_pop" : 17990455 }
          { "_id" : "FL", "total_pop" : 12937284 }
          { "_id" : "TX", "total_pop" : 16986510 }
          { "_id" : "IL", "total_pop" : 11430472 }
          { "_id" : "CA", "total_pop" : 29760021 }

       The above aggregation pipeline is build from two pipeline operators: $group and $match.

       The $group pipeline operator requires _id field where we specify grouping; remaining fields  specify  how
       to  generate  composite  value  and  must  use one of the group aggregation functions: $addToSet, $first,
       $last, $max, $min, $avg, $push, $sum. The $match pipeline  operator  syntax  is  the  same  as  the  read
       operation query syntax.

       The $group process reads all documents and for each state it creates a separate document, for example:

          { "_id" : "WA", "total_pop" : 4866692 }

       The  total_pop field uses the $sum aggregation function to sum the values of all pop fields in the source
       documents.

       Documents created by $group are piped to the $match pipeline operator. It returns the documents with  the
       value of total_pop field greater than or equal to 10 million.

   Average City Population by State
       To  get  the  first  three  states  with  the  greatest  average  population  per city, use the following
       aggregation:

          pipeline = BCON_NEW ("pipeline", "[",
             "{", "$group", "{", "_id", "{", "state", "$state", "city", "$city", "}", "pop", "{", "$sum", "$pop", "}", "}", "}",
             "{", "$group", "{", "_id", "$_id.state", "avg_city_pop", "{", "$avg", "$pop", "}", "}", "}",
             "{", "$sort", "{", "avg_city_pop", BCON_INT32 (-1), "}", "}",
             "{", "$limit", BCON_INT32 (3) "}",
          "]");

       This aggregate pipeline produces:

          { "_id" : "DC", "avg_city_pop" : 303450.0 }
          { "_id" : "FL", "avg_city_pop" : 27942.29805615551 }
          { "_id" : "CA", "avg_city_pop" : 27735.341099720412 }

       The above aggregation pipeline is build from three pipeline operators: $group, $sort and $limit.

       The first $group operator creates the following documents:

          { "_id" : { "state" : "WY", "city" : "Smoot" }, "pop" : 414 }

       Note, that the $group operator can't use nested documents except the _id field.

       The second $group uses these documents to create the following documents:

          { "_id" : "FL", "avg_city_pop" : 27942.29805615551 }

       These documents are sorted by the avg_city_pop field in descending order. Finally,  the  $limit  pipeline
       operator returns the first 3 documents from the sorted set.

DISTINCT AND MAPREDUCE

       This  document  provides  some  practical,  simple,  examples  to  demonstrate the distinct and mapReduce
       commands.

   Setup
       First we'll write some code to insert sample data: doc-common-insert.c.INDENT 0.0

          /* Don't try to compile this file on its own. It's meant to be #included
             by example code */

          /* Insert some sample data */
          bool
          insert_data (mongoc_collection_t *collection)
          {
             mongoc_bulk_operation_t *bulk;
             enum N { ndocs = 4 };
             bson_t *docs[ndocs];
             bson_error_t error;
             int i = 0;
             bool ret;

             bulk = mongoc_collection_create_bulk_operation_with_opts (collection, NULL);

             docs[0] = BCON_NEW ("x", BCON_DOUBLE (1.0), "tags", "[", "dog", "cat", "]");
             docs[1] = BCON_NEW ("x", BCON_DOUBLE (2.0), "tags", "[", "cat", "]");
             docs[2] = BCON_NEW (
                "x", BCON_DOUBLE (2.0), "tags", "[", "mouse", "cat", "dog", "]");
             docs[3] = BCON_NEW ("x", BCON_DOUBLE (3.0), "tags", "[", "]");

             for (i = 0; i < ndocs; i++) {
                mongoc_bulk_operation_insert (bulk, docs[i]);
                bson_destroy (docs[i]);
                docs[i] = NULL;
             }

             ret = mongoc_bulk_operation_execute (bulk, NULL, &error);

             if (!ret) {
                fprintf (stderr, "Error inserting data: %s\n", error.message);
             }

             mongoc_bulk_operation_destroy (bulk);
             return ret;
          }

          /* A helper which we'll use a lot later on */
          void
          print_res (const bson_t *reply)
          {
             BSON_ASSERT (reply);
             char *str = bson_as_canonical_extended_json (reply, NULL);
             printf ("%s\n", str);
             bson_free (str);
          }

   distinct command
       This is how to use the distinct command to get the distinct  values  of  x  which  are  greater  than  1:
       distinct.c.INDENT 0.0

          bool
          distinct (mongoc_database_t *database)
          {
             bson_t *command;
             bson_t reply;
             bson_error_t error;
             bool res;
             bson_iter_t iter;
             bson_iter_t array_iter;
             double val;

             command = BCON_NEW ("distinct",
                                 BCON_UTF8 (COLLECTION_NAME),
                                 "key",
                                 BCON_UTF8 ("x"),
                                 "query",
                                 "{",
                                 "x",
                                 "{",
                                 "$gt",
                                 BCON_DOUBLE (1.0),
                                 "}",
                                 "}");
             res =
                mongoc_database_command_simple (database, command, NULL, &reply, &error);
             if (!res) {
                fprintf (stderr, "Error with distinct: %s\n", error.message);
                goto cleanup;
             }

             /* Do something with reply (in this case iterate through the values) */
             if (!(bson_iter_init_find (&iter, &reply, "values") &&
                   BSON_ITER_HOLDS_ARRAY (&iter) &&
                   bson_iter_recurse (&iter, &array_iter))) {
                fprintf (stderr, "Couldn't extract \"values\" field from response\n");
                goto cleanup;
             }

             while (bson_iter_next (&array_iter)) {
                if (BSON_ITER_HOLDS_DOUBLE (&array_iter)) {
                   val = bson_iter_double (&array_iter);
                   printf ("Next double: %f\n", val);
                }
             }

          cleanup:
             /* cleanup */
             bson_destroy (command);
             bson_destroy (&reply);
             return res;
          }

   mapReduce - basic example
       A  simple  example  using  the  map reduce framework. It simply adds up the number of occurrences of each
       "tag".

       First define the map and reduce functions: constants.c.INDENT 0.0

          const char *const COLLECTION_NAME = "things";

          /* Our map function just emits a single (key, 1) pair for each tag
             in the array: */
          const char *const MAPPER = "function () {"
                                     "this.tags.forEach(function(z) {"
                                     "emit(z, 1);"
                                     "});"
                                     "}";

          /* The reduce function sums over all of the emitted values for a
             given key: */
          const char *const REDUCER = "function (key, values) {"
                                      "var total = 0;"
                                      "for (var i = 0; i < values.length; i++) {"
                                      "total += values[i];"
                                      "}"
                                      "return total;"
                                      "}";
          /* Note We can't just return values.length as the reduce function
             might be called iteratively on the results of other reduce
             steps. */

Run the mapReduce command: map-reduce-basic.c.INDENT 0.0

          bool
          map_reduce_basic (mongoc_database_t *database)
          {
             bson_t reply;
             bson_t *command;
             bool res;
             bson_error_t error;
             mongoc_cursor_t *cursor;
             const bson_t *doc;

             bool map_reduce_done = false;
             bool query_done = false;

             const char *out_collection_name = "outCollection";
             mongoc_collection_t *out_collection;

             /* Empty find query */
             bson_t find_query = BSON_INITIALIZER;

             /* Construct the mapReduce command */

             /* Other arguments can also be specified here, like "query" or
                "limit" and so on */
             command = BCON_NEW ("mapReduce",
                                 BCON_UTF8 (COLLECTION_NAME),
                                 "map",
                                 BCON_CODE (MAPPER),
                                 "reduce",
                                 BCON_CODE (REDUCER),
                                 "out",
                                 BCON_UTF8 (out_collection_name));
             res =
                mongoc_database_command_simple (database, command, NULL, &reply, &error);
             map_reduce_done = true;

             if (!res) {
                fprintf (stderr, "MapReduce failed: %s\n", error.message);
                goto cleanup;
             }

             /* Do something with the reply (it doesn't contain the mapReduce results) */
             print_res (&reply);

             /* Now we'll query outCollection to see what the results are */
             out_collection =
                mongoc_database_get_collection (database, out_collection_name);
             cursor = mongoc_collection_find_with_opts (
                out_collection, &find_query, NULL, NULL);
             query_done = true;

             /* Do something with the results */
             while (mongoc_cursor_next (cursor, &doc)) {
                print_res (doc);
             }

             if (mongoc_cursor_error (cursor, &error)) {
                fprintf (stderr, "ERROR: %s\n", error.message);
                res = false;
                goto cleanup;
             }

          cleanup:
             /* cleanup */
             if (query_done) {
                mongoc_cursor_destroy (cursor);
                mongoc_collection_destroy (out_collection);
             }

             if (map_reduce_done) {
                bson_destroy (&reply);
                bson_destroy (command);
             }

             return res;
          }

   mapReduce - more complicated example
       You must have replica set running for this.

       In this example we contact a secondary in the replica set and do an "inline" map reduce, so  the  results
       are returned immediately: map-reduce-advanced.c.INDENT 0.0

          bool
          map_reduce_advanced (mongoc_database_t *database)
          {
             bson_t *command;
             bson_error_t error;
             bool res = true;
             mongoc_cursor_t *cursor;
             mongoc_read_prefs_t *read_pref;
             const bson_t *doc;

             /* Construct the mapReduce command */
             /* Other arguments can also be specified here, like "query" or "limit"
                and so on */

             /* Read the results inline from a secondary replica */
             command = BCON_NEW ("mapReduce",
                                 BCON_UTF8 (COLLECTION_NAME),
                                 "map",
                                 BCON_CODE (MAPPER),
                                 "reduce",
                                 BCON_CODE (REDUCER),
                                 "out",
                                 "{",
                                 "inline",
                                 "1",
                                 "}");

             read_pref = mongoc_read_prefs_new (MONGOC_READ_SECONDARY);
             cursor = mongoc_database_command (
                database, MONGOC_QUERY_NONE, 0, 0, 0, command, NULL, read_pref);

             /* Do something with the results */
             while (mongoc_cursor_next (cursor, &doc)) {
                print_res (doc);
             }

             if (mongoc_cursor_error (cursor, &error)) {
                fprintf (stderr, "ERROR: %s\n", error.message);
                res = false;
             }

             mongoc_cursor_destroy (cursor);
             mongoc_read_prefs_destroy (read_pref);
             bson_destroy (command);

             return res;
          }

   Running the Examples
       Here's how to run the example code basic-aggregation.c.INDENT 0.0

          /*
           * Copyright 2016 MongoDB, Inc.
           *
           * Licensed under the Apache License, Version 2.0 (the "License");
           * you may not use this file except in compliance with the License.
           * You may obtain a copy of the License at
           *
           *   http://www.apache.org/licenses/LICENSE-2.0
           *
           * Unless required by applicable law or agreed to in writing, software
           * distributed under the License is distributed on an "AS IS" BASIS,
           * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
           * See the License for the specific language governing permissions and
           * limitations under the License.
           */

          #include <mongoc.h>
          #include <stdio.h>

          #include "constants.c"

          #include "../doc-common-insert.c"
          #include "distinct.c"
          #include "map-reduce-basic.c"
          #include "map-reduce-advanced.c"

          int
          main (int argc, char *argv[])
          {
             mongoc_database_t *database = NULL;
             mongoc_client_t *client = NULL;
             mongoc_collection_t *collection = NULL;
             char *host_and_port = NULL;
             int res = 0;

             if (argc != 2) {
                fprintf (stderr, "usage: %s CONNECTION-STRING\n", argv[0]);
                fprintf (stderr,
                         "the connection string can be of the following forms:\n");
                fprintf (stderr, "localhost\t\t\t\tlocal machine\n");
                fprintf (stderr, "localhost:27018\t\t\t\tlocal machine on port 27018\n");
                fprintf (stderr,
                         "mongodb://user:pass@localhost:27017\t"
                         "local machine on port 27017, and authenticate with username "
                         "user and password pass\n");
                return 1;
             }

             mongoc_init ();

             if (strncmp (argv[1], "mongodb://", 10) == 0) {
                host_and_port = bson_strdup (argv[1]);
             } else {
                host_and_port = bson_strdup_printf ("mongodb://%s", argv[1]);
             }

             client = mongoc_client_new (host_and_port);

             if (!client) {
                fprintf (stderr, "Invalid hostname or port: %s\n", host_and_port);
                res = 2;
                goto cleanup;
             }

             mongoc_client_set_error_api (client, 2);
             database = mongoc_client_get_database (client, "test");
             collection = mongoc_database_get_collection (database, COLLECTION_NAME);

             printf ("Inserting data\n");
             if (!insert_data (collection)) {
                res = 3;
                goto cleanup;
             }

             printf ("distinct\n");
             if (!distinct (database)) {
                res = 4;
                goto cleanup;
             }

             printf ("map reduce\n");
             if (!map_reduce_basic (database)) {
                res = 5;
                goto cleanup;
             }

             printf ("more complicated map reduce\n");
             if (!map_reduce_advanced (database)) {
                res = 6;
                goto cleanup;
             }

          cleanup:
             if (collection) {
                mongoc_collection_destroy (collection);
             }

             if (database) {
                mongoc_database_destroy (database);
             }

             if (client) {
                mongoc_client_destroy (client);
             }

             if (host_and_port) {
                bson_free (host_and_port);
             }

             mongoc_cleanup ();
             return res;
          }
to do this can be found here).
          $ mongod

       Now compile and run the example program:

          $ cd examples/basic_aggregation/
          $ gcc -Wall -o agg-example basic-aggregation.c $(pkg-config --cflags --libs libmongoc-1.0)
          $ ./agg-example localhost

          Inserting data
          distinct
          Next double: 2.000000
          Next double: 3.000000
          map reduce
          { "result" : "outCollection", "timeMillis" : 155, "counts" : { "input" : 84, "emit" : 126, "reduce" : 3, "output" : 3 }, "ok" : 1 }
          { "_id" : "cat", "value" : 63 }
          { "_id" : "dog", "value" : 42 }
          { "_id" : "mouse", "value" : 21 }
          more complicated map reduce
          { "results" : [ { "_id" : "cat", "value" : 63 }, { "_id" : "dog", "value" : 42 }, { "_id" : "mouse", "value" : 21 } ], "timeMillis" : 14, "counts" : { "input" : 84, "emit" : 126, "reduce" : 3, "output" : 3 }, "ok" : 1 }

USING LIBMONGOC IN A MICROSOFT VISUAL STUDIO PROJECT

       Download and install libmongoc on your system, then open Visual Studio, select "File→New→Project...", and
       create a new Win32 Console Application.  [image]

       Remember to switch the platform from 32-bit to 64-bit: [image]

       Right-click on your console application in the Solution Explorer and select "Properties". Choose to  edit
       properties  for  "All  Configurations",  expand  the  "C/C++"  options  and  choose "General". Add to the
       "Additional Include Directories" these paths:

          C:\mongo-c-driver\include\libbson-1.0
          C:\mongo-c-driver\include\libmongoc-1.0
       [image]

       (If you chose a different CMAKE_INSTALL_PREFIX when you ran CMake, your include paths will be different.)

       Also in the Properties dialog, expand the "Linker" options and choose "Input", and add to the "Additional
       Dependencies" these libraries:

          C:\mongo-c-driver\lib\bson-1.0.lib
          C:\mongo-c-driver\lib\mongoc-1.0.lib
       [image]

       Adding these libraries as dependencies provides linker symbols to build your application, but to actually
       run it, libbson's and libmongoc's DLLs must be  in  your  executable  path.  Select  "Debugging"  in  the
       Properties dialog, and set the "Environment" option to:

          PATH=c:/mongo-c-driver/bin
       [image]

       Finally, include "mongoc.h" in your project's "stdafx.h":

          #include <mongoc.h>

   Static linking
       Following  the  instructions  above,  you  have  dynamically  linked  your application to the libbson and
       libmongoc DLLs. This is usually the right choice. If you want to link  statically  instead,  update  your
       "Additional  Dependencies" list by removing bson-1.0.lib and mongoc-1.0.lib and replacing them with these
       libraries:

          C:\mongo-c-driver\lib\bson-static-1.0.lib
          C:\mongo-c-driver\lib\mongoc-static-1.0.lib
          ws2_32.lib
          Secur32.lib
          Crypt32.lib
          BCrypt.lib
       [image]

       (To explain the purpose  of  each  library:  bson-static-1.0.lib  and  mongoc-static-1.0.lib  are  static
       archives  of  the  driver  code.  The socket library ws2_32 is required by libbson, which uses the socket
       routine gethostname to help guarantee ObjectId uniqueness. The BCrypt library is used  by  libmongoc  for
       SSL  connections  to  MongoDB, and Secur32 and Crypt32 are required for enterprise authentication methods
       like Kerberos.)

       Finally, define two preprocessor symbols before including mongoc.h in your stdafx.h:

          #define BSON_STATIC
          #define MONGOC_STATIC
          #include <mongoc.h>

       Making these changes to your  project  is  only  required  for  static  linking;  for  most  people,  the
       dynamic-linking instructions above are preferred.

   Next Steps
       Now  you  can  build  and  debug applications in Visual Studio that use libbson and libmongoc. Proceed to
       making-a-connection in the tutorial to learn how connect to MongoDB and perform operations.

CREATING INDEXES

       To   create   indexes   on   a   MongoDB   collection,   execute   the   createIndexes    command    with
       mongoc_database_write_command_with_opts.  See  the MongoDB Manual entry for the createIndexes command for
       details.

   Example
       example-create-indexes.c.INDENT 0.0

          /* gcc example-create-indexes.c -o example-create-indexes $(pkg-config --cflags
           * --libs libmongoc-1.0) */

          /* ./example-create-indexes [CONNECTION_STRING [COLLECTION_NAME]] */

          #include <mongoc.h>
          #include <stdio.h>
          #include <stdlib.h>

          int
          main (int argc, char *argv[])
          {
             mongoc_client_t *client;
             mongoc_database_t *db;
             const char *uristr = "mongodb://127.0.0.1/?appname=create-indexes-example";
             const char *collection_name = "test";
             bson_t keys;
             char *index_name;
             bson_t *create_indexes;
             bson_t reply;
             char *reply_str;
             bson_error_t error;
             bool r;

             mongoc_init ();

             if (argc > 1) {
                uristr = argv[1];
             }

             if (argc > 2) {
                collection_name = argv[2];
             }

             client = mongoc_client_new (uristr);

             if (!client) {
                fprintf (stderr, "Failed to parse URI.\n");
                return EXIT_FAILURE;
             }

             mongoc_client_set_error_api (client, 2);
             db = mongoc_client_get_database (client, "test");

             /* ascending index on field "x" */
             bson_init (&keys);
             BSON_APPEND_INT32 (&keys, "x", 1);
             index_name = mongoc_collection_keys_to_index_string (&keys);
             create_indexes = BCON_NEW ("createIndexes",
                                        BCON_UTF8 (collection_name),
                                        "indexes",
                                        "[",
                                        "{",
                                        "key",
                                        BCON_DOCUMENT (&keys),
                                        "name",
                                        BCON_UTF8 (index_name),
                                        "}",
                                        "]");

             r = mongoc_database_write_command_with_opts (
                db, create_indexes, NULL /* opts */, &reply, &error);

             reply_str = bson_as_json (&reply, NULL);
             printf ("%s\n", reply_str);

             if (!r) {
                fprintf (stderr, "Error in createIndexes: %s\n", error.message);
             }

             bson_free (index_name);
             bson_free (reply_str);
             bson_destroy (&reply);
             bson_destroy (create_indexes);
             mongoc_database_destroy (db);
             mongoc_client_destroy (client);

             mongoc_cleanup ();

             return r ? EXIT_SUCCESS : EXIT_FAILURE;
          }

AUTHOR

       MongoDB, Inc

COPYRIGHT

       2018, MongoDB, Inc

1.9.3-dev                                         Feb 05, 2018                                  MONGOC_GUIDES(3)