Golang mgo Fetching an item by label in a nested bson array - mongodb

I'm having trouble finding objects in a nested array. I need to find home/away within league array which has an events array.
Example JSON:
{
"sportId":4,
"last":266178326,
"league":[
{
"id":423,
"name":"Germany - Bundesliga",
"events":[
{
"id":1125584543,
"starts":"2020-06-07T17:00:00Z",
"home":"SC Rasta Vechta",
"away":"EnBW Ludwigsburg",
"rotNum":"2601",
"liveStatus":0,
"status":"I",
"parlayRestriction":0,
"altTeaser":false,
"resultingUnit":"Regular"
},
{
"id":1125585441,
"starts":"2020-06-10T18:30:00Z",
"home":"Ratiopharm Ulm",
"away":"Crailsheim Merlins",
"rotNum":"2617",
"liveStatus":0,
"status":"I",
"parlayRestriction":0,
"altTeaser":false,
"resultingUnit":"Regular"
}
]
},
{
"id":268,
"name":"ABA - Adriatic League",
"events":[
{
"id":1122419811,
"starts":"2020-05-07T19:34:00Z",
"home":"Test 1(Do Not Wager)",
"away":"Test 2(Do Not Wager)",
"rotNum":"999998",
"liveStatus":0,
"status":"I",
"parlayRestriction":1,
"altTeaser":false,
"resultingUnit":"Regular"
}
]
},
{
"id":487,
"name":"NBA",
"events":[
{
"id":1120192519,
"starts":"2020-05-01T17:00:00Z",
"home":"Test Team B",
"away":"Test Team A",
"rotNum":"123",
"liveStatus":0,
"status":"O",
"parlayRestriction":0,
"altTeaser":false,
"resultingUnit":"Regular"
}
]
}
]
}
For example finding the league name "Germany - Bundesliga" I solved it by doing
// retrieve league by searching in the fixture collection
func FindLeagueFixture(name string) (pinnacle.Fixtures, pinnacle.League, error) {
var fixtures []pinnacle.Fixtures
err := db.C(FIXTURES).Find(
bson.M{"league.name": bson.RegEx{
Pattern: name,
Options: "i",
}}).All(&fixtures)
if err != nil {
return pinnacle.Fixtures{}, pinnacle.League{}, err
}
But now I have to event home/away names within league events. For example, finding "SC Rasta Vechta". What's the best way to handle this?
I've tried something like (No regex usage yet, since I'm having trouble already. Only trying count, not doing the whole unmarshaling for now)
// retrieve sport team by searching in the fixture collection
func FindHomeOrAwayFixture(name string) (pinnacle.Fixtures, pinnacle.League, error) {
var fixtures []pinnacle.Fixtures
// find home
c, err := db.C(FIXTURES).Find(
bson.M{"league": bson.M{"$elemMatch": bson.M{"home": name}}}).Count()
if err != nil {
return pinnacle.Fixtures{}, pinnacle.League{}, err
}
fmt.Println(c)
}

Related

MongoDB change stream returns empty fullDocument on insert

Mongo 4.4 and respective Golang driver are used. Database’s replica set is being run locally at localhost:27017, localhost:27020. I’ve also tried using Atlas’s sandbox cluster which gave me the same results.
According to Mongo's documentation when handling insertion of a new document fullDocument field of event data is supposed to contain newly inserted document which for some reason is not the case for me. ns field where database and collection name are supposed to be and documentKey where affected document _id is stored are empty as well. operationType field contains correct operation type. In another test it appeared that update operations do not appear in a change stream at all.
It used to work as it should but now it doesn't. Why does it happen and what am I doing wrong?
Code
// ds is the connection to discord, required for doing stuff inside handlers
func iterateChangeStream(stream *mongo.ChangeStream, ds *discordgo.Session, ctx context.Context, cancel context.CancelFunc) {
defer stream.Close(ctx)
defer cancel() // for graceful crashing
for stream.Next(ctx) {
var event bson.M
err := stream.Decode(&event)
if err != nil {
log.Print(errors.Errorf("Failed to decode event: %w\n", err))
return
}
rv := reflect.ValueOf(event["operationType"]) // getting operation type
opType, ok := rv.Interface().(string)
if !ok {
log.Print("String expected in operationType\n")
return
}
// event["fullDocument"] will be empty even when handling insertion
// models.Player is a struct representing a document of the collection
// I'm watching over
doc, ok := event["fullDocument"].(models.Player)
if !ok {
log.Print("Failed to convert document into Player type")
return
}
handlerCtx := context.WithValue(ctx, "doc", doc)
// handlerToEvent maps operationType to respective handler
go handlerToEvent[opType](ds, handlerCtx, cancel)
}
}
func WatchEvents(ds *discordgo.Session, ctx context.Context, cancel context.CancelFunc) {
pipeline := mongo.Pipeline{
bson.D{{
"$match",
bson.D{{
"$or", bson.A{
bson.D{{"operationType", "insert"}}, // !!!
bson.D{{"operationType", "delete"}},
bson.D{{"operationType", "invalidate"}},
},
}},
}},
}
// mongo instance is initialized on program startup and stored in a global variable
opts := options.ChangeStream().SetFullDocument(options.UpdateLookup)
stream, err := db.Instance.Collection.Watch(ctx, pipeline, opts)
if err != nil {
log.Panic(err)
}
defer stream.Close(ctx)
iterateChangeStream(stream, ds, ctx, cancel)
}
My issue might be related to this, except that it consistently occurs on insertion instead ocuring sometimes on updates.
If you know how to enable change stream optimization feature flag mentioned inside link above, let me know.
Feel free to ask for more clarifications.
The question was answered here.
TLDR
You need to create the following structure to unmarshal event into:
type CSEvent struct {
OperationType string `bson:"operationType"`
FullDocument models.Player `bson:"fullDocument"`
}
var event CSEvent
err := stream.Decode(&event)
event will contain a copy of the inserted document.
From sample events that I see from this link we can see that fullDocument exists only on operationType: 'insert'.
{
_id: { _data: '825DE67A42000000072B022C0100296E5A10046BBC1C6A9CBB4B6E9CA9447925E693EF46645F696400645DE67A42113EA7DE6472E7680004' },
operationType: 'insert',
clusterTime: Timestamp { _bsontype: 'Timestamp', low_: 7, high_: 1575385666 },
fullDocument: {
_id: 5de67a42113ea7de6472e768,
name: 'Sydney Harbour Home',
bedrooms: 4,
bathrooms: 2.5,
address: { market: 'Sydney', country: 'Australia' } },
ns: { db: 'sample_airbnb', coll: 'listingsAndReviews' },
documentKey: { _id: 5de67a42113ea7de6472e768 }
}
{
_id: { _data: '825DE67A42000000082B022C0100296E5A10046BBC1C6A9CBB4B6E9CA9447925E693EF46645F696400645DE67A42113EA7DE6472E7680004' },
operationType: 'delete',
clusterTime: Timestamp { _bsontype: 'Timestamp', low_: 8, high_: 1575385666 },
ns: { db: 'sample_airbnb', coll: 'listingsAndReviews' },
documentKey: { _id: 5de67a42113ea7de6472e768 }
}
So I recommend You
to limit Your $match to insert
or add if statement to operationType.
if opType == "insert" {
doc, ok := event["fullDocument"].(models.Player)
if !ok {
log.Print("Failed to convert document into Player type")
return
}
handlerCtx := context.WithValue(ctx, "doc", doc)
// handlerToEvent maps operationType to respective handler
go handlerToEvent[opType](ds, handlerCtx, cancel)
return
}
or make sure You're getting document using id of document from event["documentKey"]["_id"] and call playersCollection.findOne({_id: event["documentKey"]["_id"]})

How can I compare two bson.M data sets using Golang

I have the following code which retrieves two data sets from two different collections in a MongoDB database
opts := options.Find()
opts.SetProjection(bson.M{
"productId": 1,
"_id": 0,
})
cursor, err := currentProductsCollection.Find(ctx, bson.M{}, opts)
var oldProducts []bson.M
err = cursor.All(ctx, &oldProducts)
cursor, err = newProductsCollection.Find(ctx, bson.M{}, opts)
var newProducts []bson.M
err = cursor.All(ctx, &newProducts)
I want to be able to compare oldProducts with newProducts to find out what new productId's have appeared and which old productId's have disappeared.
The two variables have both loaded fine and I can happily inspect them in the debugger, but I can't seem to find a way of comparing them. I had hoped to be able to range over each in turn doing a lookup on the other and getting a couple of slices of missing values but I can't find any way to do it.
I've been going round the houses with this for the last three hours so if anyone has any suggestions I would more than welcome them.
I am using the vanilla go.mongodb.org/mongo-driver drivers, not mgo
Create map for both old product and new product by product id
oldProductsMap = make(map[interface{}]bson.M)
for _,oldp := range oldProducts {
oldProductsMap[oldp["productId"]] = oldp
}
newProductsMap = make(map[interface{}]bson.M)
for _,newp :=range newProducts {
newProductsMap[newp["productId"]] = newp
}
Then for the disappeared product check old product is in newProductsMap. If not then the product disappeared
var disProducts []bson.M
for _,oldp := range oldProducts {
if _, ok := newProductsMap[oldp["productId"]]; !ok {
disProducts = append(disProducts, oldp)
}
}
For newly appeared product check new product is in oldProductsMap. If not then the product newly appeared.
var appProducts []bson.M
for _,newp := range newProducts {
if _, ok := oldProductsMap[newp["productId"]]; !ok {
appProducts = append(appProducts, oldp)
}
}
Note : You can do this portion when create map for new product also
If you are sure all entries have the productId field:
func exists(in []bson.M,id interface{}) bool {
for _,p:=range in {
if id==p["productId"] {
return true
}
}
return false
}
Then use this to scan both lists:
for _,oldp:=range oldProducts {
if !exists(newProducts,oldp["productId"]) {
// Removed
}
}
for _,newp:=range newProducts {
if !exists(oldProducts,newp["productId"]) {
// Added
}
}

How to fix: Golang "append" method pushing same elements to slice

I'm trying to map data from DB ( Mongo ) to slice in go , and everythin works fine if I'm returning simple []string but if I change type to []*models.Organization that code returns slice of same elements.
func (os *OrganizationService) GetAll() ([]*models.Organization, error) {
var organizations []*models.Organization
results := os.MongoClient.Collection("organizations").Find(bson.M{})
organization := &models.Organization{}
for results.Next(organization) {
fmt.Println(organization)
organizations = append(organizations, organization)
}
return organizations, nil
}
I expect output [{ Name: "someOrg", ID: "someId" },{ Name: "someOrg2", ID: "someID }, ... ] , but actual output is [{ Name: "someOrg", ID: "someId" },{ Name: "someOrg", ID: "someId" }, ... ]
I'm using bongo package.
The application appends the same value of organization on every iteration through the loop. Fix by creating a new value inside the loop.
func (os *OrganizationService) GetAll() ([]*models.Organization, error) {
var organizations []*models.Organization
results := os.MongoClient.Collection("organizations").Find(bson.M{})
organization := &models.Organization{}
for results.Next(organization) {
fmt.Println(organization)
organizations = append(organizations, organization)
organization = &models.Organization{} // new value for next iteration
}
return organizations, nil
}

Is there a way to get slice as result of Find()?

Now I'm doing:
sess := mongodb.DB("mybase").C("mycollection")
var users []struct {
Username string `bson:"username"`
}
err = sess.Find(nil).Select(bson.M{"username": 1, "_id": 0}).All(&users)
if err != nil {
fmt.Println(err)
}
var myUsers []string
for _, user := range users{
myUsers = append(myUsers, user.Username)
}
Is there a more effective way to get slice with usernames from Find (or another search function) directly, without struct and range loop?
The result of a MongoDB find() is always a list of documents. So if you want a list of values, you have to convert it manually just as you did.
Using a custom type (derived from string)
Also note that if you would create your own type (derived from string), you could override its unmarshaling logic, and "extract" just the username from the document.
This is how it could look like:
type Username string
func (u *Username) SetBSON(raw bson.Raw) (err error) {
doc := bson.M{}
if err = raw.Unmarshal(&doc); err != nil {
return
}
*u = Username(doc["username"].(string))
return
}
And then querying the usernames into a slice:
c := mongodb.DB("mybase").C("mycollection") // Obtain collection
var uns []Username
err = c.Find(nil).Select(bson.M{"username": 1, "_id": 0}).All(&uns)
if err != nil {
fmt.Println(err)
}
fmt.Println(uns)
Note that []Username is not the same as []string, so this may or may not be sufficient to you. Should you need a user name as a value of string instead of Username when processing the result, you can simply convert a Username to string.
Using Query.Iter()
Another way to avoid the slice copying would be to call Query.Iter(), iterate over the results and extract and store the username manually, similarly how the above custom unmarshaling logic does.
This is how it could look like:
var uns []string
it := c.Find(nil).Select(bson.M{"username": 1, "_id": 0}).Iter()
defer it.Close()
for doc := (bson.M{}); it.Next(&doc); {
uns = append(uns, doc["username"].(string))
}
if err := it.Err(); err != nil {
fmt.Println(err)
}
fmt.Println(uns)
I don't see what could be more effective than a simple range loop with appends. Without all the Mongo stuff your code basically is this and that's exactly how I would do this.
package main
import (
"fmt"
)
type User struct {
Username string
}
func main() {
var users []User
users = append(users, User{"John"}, User{"Jane"}, User{"Jim"}, User{"Jean"})
fmt.Println(users)
// Interesting part starts here.
var myUsers []string
for _, user := range users {
myUsers = append(myUsers, user.Username)
}
// Interesting part ends here.
fmt.Println(myUsers)
}
https://play.golang.com/p/qCwENmemn-R

Insert data in MongoDB using mgo

I'm trying to insert some data in MongoDB using mgo but the outcome is not what I wanted.
My struct
type Slow struct {
Endpoint string
Time string
}
My insert statement
err := collection.Insert(&Slow{endpoint, e})
if err != nil {
panic(err)
}
How I'm trying to print it
var results []Slow
err := collection.Find(nil).All(&results)
if err != nil {
panic(err)
}
s, _ := json.MarshalIndent(results, " ", " ")
w.Write(s)
My output (Marshaled JSON)
[{
"Endpoint": "/api/endpoint1",
"Time": "0.8s"
},
{
"Endpoint": "/api/endpoint2",
"Time": "0.7s"
}]
What I wanted
{
"/api/endpoint1":"0.8s",
"/api/endpoint2":"0.7s"
}
//No brackets
Thank you.
First, you seem to want the results sorted by Endpoint. If you don't specify any sort order when querying, you can have no guarantee of any specific order. So query them like this:
err := collection.Find(nil).Sort("endpoint").All(&results)
Next, what you want is not the JSON representation of the results. To get the format you want, use the following loop:
w.Write([]byte{'{'})
for i, slow := range results {
if i > 0 {
w.Write([]byte{','})
}
w.Write([]byte(fmt.Sprintf("\n\t\"%s\":\"%v\"", slow.Endpoint, slow.Time)))
}
w.Write([]byte("\n}"))
Output is as you expect it (try it on the Go Playground):
{
"/api/endpoint1":"0.8s",
"/api/endpoint2":"0.7s"
}