I want to update one of the documents CLIENTDATA USING _id and clientID as the filter keys how can I update it and is their any method through aggregation. How can I update swapnil name to something else using id and clientID as my filters
//UpdateClient is used to update clientData
func UpdateClient(Data structure.ClientDataUpdate) bool {
connection := GetConnection()
if connection == nil {
return false
}
collection := connection.Database("IAGENT").Collection("CLIENTDATA")
filter := bson.M{"$and": []interface{}{bson.M{"_id": Data.ID}, bson.M{"clientData.clientID": Data.ClientID}}}
update := bson.M{"$set": bson.M{"clientData.name": Data.Name, "clientData.policy": Data.Policy, "clientData.expiryDate": Data.ExpiryDate,"clientData.metaData":Data.Metadata,"clientData.mobile":Data.Phone}}
_, err := collection.UpdateOne(context.TODO(), filter, update)
if err != nil {
fmt.Println("updating the Data", err)
return false
}
return true
}
Here is the image of my MongoDB database with the above collection.
You need to use positional operator to update element in an array, so instead of using clientData.name you should use clientData.$.name
//UpdateClient is used to update clientData
func UpdateClient(Data structure.ClientDataUpdate) bool {
connection := GetConnection()
if connection == nil {
return false
}
collection := connection.Database("IAGENT").Collection("CLIENTDATA")
filter := bson.M{"$and": []interface{}{bson.M{"_id": Data.ID}, bson.M{"clientData.clientID": Data.ClientID}}}
update := bson.M{"$set": bson.M{"clientData.$.name": Data.Name, "clientData.$.policy": Data.Policy, "clientData.$.expiryDate": Data.ExpiryDate,"clientData.$.metaData":Data.Metadata,"clientData.$.mobile":Data.Phone}}
_, err := collection.UpdateOne(context.TODO(), filter, update)
if err != nil {
fmt.Println("updating the Data", err)
return false
}
return true
}
Related
Okay, so. There is this field called bitfield and inside is a map. Everytime i set bits, for example set bit 5000, i'll get key 4096, but if i set bit 1000, then i'll get the key, just 0. However, i set both, 5000 and 0, so it should be both keys 0 and 4096. How can i update the existing field, without deleting the other data, so both keys 0 and 4096 exist?
func UpsertBitsArray(tx models.Transactions, sendingQrlAddress string) (error, string) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
optionss := options.Find().
SetProjection(bson.M{"bitfield": 1}).
SetLimit(1)
results, err := addressesCollections.Find(ctx, bson.M{"id": sendingQrlAddress}, optionss)
if err != nil {
return nil, "Success"
}
var bitfieldStruct []wallet.Wallet
err = results.All(ctx, &bitfieldStruct)
if err != nil {
return nil, "Success"
}
// signature, err := base64.StdEncoding.DecodeString(tx.Signature)
// if err != nil {
// glog.Info("%v", err)
// }
otsKeyIndex := big.NewInt(10000)
for _, bitfieldExist := range bitfieldStruct {
if bitfieldExist.Paged == nil {
fmt.Println("No array/bitfield, creating new one..")
bitfieldExist.Paged = bitfield.NewBig()
bitfieldExist.Paged.Set(otsKeyIndex)
filter := bson.D{{"id", sendingQrlAddress}}
update := bson.D{
{
Key: "$set",
Value: bson.D{
{Key: "bitfield", Value: bitfieldExist.Paged},
},
},
}
opts := options.Update().SetUpsert(true)
result, err := addressesCollections.UpdateOne(context.TODO(), filter, update, opts)
if err != nil {
glog.Info("%v", err)
}
fmt.Printf("Number of documents updated: %v\n", result.ModifiedCount)
fmt.Printf("Number of documents upserted: %v\n", result.UpsertedCount)
} else {
fmt.Println("Bitfield/array exists, setting it..")
bitfieldExist.Paged.Set(otsKeyIndex)
filter := bson.D{{"id", sendingQrlAddress}}
update := bson.D{
{
Key: "$set",
Value: bson.D{
{Key: "bitfield", Value: bitfieldExist.Paged},
},
},
}
opts := options.Update().SetUpsert(true)
result, err := addressesCollections.UpdateOne(context.TODO(), filter, update, opts)
if err != nil {
glog.Info("%v", err)
}
fmt.Printf("Number of documents updated: %v\n", result.ModifiedCount)
fmt.Printf("Number of documents upserted: %v\n", result.UpsertedCount)
}
}
return nil, "Success"
}
Here is what i mean:
Edit:
The wallet struct:
type Wallet struct {
_id primitive.ObjectID `json:"_id,omitempty"`
Id string `json:"id,omitempty"`
Amount int `json:"amount,omitempty"`
Paged bitfield.Big `json:"bitfield"`
}
What i'm storing:
package bitfield
import (
"math/big"
)
const (
fieldSize = 1024
)
type Big map[string]Bitfield
func NewBig() Big {
return make(map[string]Bitfield)
}
func (fields Big) Set(i *big.Int) {
number, pageNumber := indexOfBig(i, fieldSize)
page, exists := fields[pageNumber.String()]
if !exists {
page = New(fieldSize)
fields[pageNumber.String()] = page
}
page.Set(uint(number.Uint64()))
}
I am trying to creat transaction in MongoDB with Golang and Iris. Problem is that transaction did not accept iris context and Con, I don't know why this thing happened. Can you tell me what I am doing wrong here?
Main.go Using Iris
func main() {
app := iris.New()
app.Logger().SetLevel("debug")
app.Use(recover.New())
app.Use(logger.New())
// Resource: http://localhost:8080
app.Get("/", func(ctx iris.Context) {
ctx.JSON(iris.Map{"message": "Welcome to Woft Bank"})
})
// API endpoints
router.SetRoute(app)
app.Listen(PORT)}
Router
func SetRoute(app *iris.Application) {
userRoute := app.Party("/user")
{
userRoute.Post("/register", middleware.UserValidator, controller.CreateUser)
userRoute.Get("/detail", middleware.UserValidator, controller.GetUserBalanceWithUserID)
userRoute.Patch("/transfer", middleware.TransferValidator, controller.Transfer)
}}
Transacion function (session was not created by this client)
func Transfer(ctx iris.Context) {
senderID := ctx.URLParam("from")
receiverID := ctx.URLParam("to")
amount, _ := strconv.ParseInt(ctx.URLParam("amount"), 10, 64)
session, err := Config.DB().StartSession()
if err != nil {
handleErr(ctx, err)
return
}
defer session.EndSession(ctx)
callback := func(sessCtx mongo.SessionContext) (interface{}, error) {
upsert := false
after := options.After
opt := options.FindOneAndUpdateOptions{
ReturnDocument: &after,
Upsert: &upsert,
}
sender := Models.User{}
filter := bson.M{"username": senderID}
update := bson.M{"$inc": bson.M{"balance": -amount}}
//FindOneAndUpdate did not accept sessCtx
err := UserCollection.FindOneAndUpdate(sessCtx, filter, update, &opt).Decode(&sender)
if err != nil {
return nil, err
}
if sender.Balance < 0 {
return nil, errors.New("sender's balance is not enough")
}
filter = bson.M{"username": receiverID}
update = bson.M{"$inc": bson.M{"balance": +amount}}
_, err = UserCollection.UpdateOne(sessCtx, filter, update)
if err != nil {
return nil, err
}
return sender, nil
}
result, err := session.WithTransaction(ctx, callback)
if err != nil {
handleErr(ctx, err)
return
}
response(result, "success", ctx)}
I'm trying to switch a go backend project from postgres to mongodb, the final missing piece that I couldn't fix is this
err := db.Model(&users).Where("id in (?)", pg.In(ids)).Select()
Could anyone help me with its equivalent in mongodb
This is the code I want to change
const userloaderKey = "userloader"
func DataloaderMiddleware(db *pg.DB, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
userloader := UserLoader{
maxBatch: 100,
wait: 1 * time.Millisecond,
fetch: func(ids []string) ([]*models.User, []error) {
var users []*models.User
err := db.Model(&users).Where("id in (?)", pg.In(ids)).Select()
if err != nil {
return nil, []error{err}
}
u := make(map[string]*models.User, len(users))
for _, user := range users {
u[user.ID] = user
}
result := make([]*models.User, len(ids))
for i, id := range ids {
result[i] = u[id]
}
return result, nil
},
}
ctx := context.WithValue(r.Context(), userloaderKey, &userloader)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func getUserLoader(ctx context.Context) *UserLoader {
return ctx.Value(userloaderKey).(*UserLoader)
}
The equivalent would be something like this
ids := []primitive.ObjectID{ // contains some IDs }
users := client.Database("myDatabase").Collection("users")
cur, err := users.Find(context.TODO(), bson.M{"_id": bson.M{"$in": ids}})
users := client.Database("myDatabase").Collection("users")
cur, err := users.Find(context.TODO(), bson.M{"_id": bson.M{"$in": ids}})
for cur.Next(context.TODO()) {
var user *models.User
err := cur.Decode(&user)
if err != nil {
log.Fatal(err)
}
users = append(users, user)
}
This did the job thank super
Currently I am able to return all my products from the collection.
I however want to be able to return products that come after a specific product ID (which would be the last one on the client side so they could load more)
Current way (return all)
query := bson.M{}
var product ReturnedProdcut
var products []ReturnedProduct
cur, err := mg.Db.Collection("products").Find(c.Request().Context(), query)
if err != nil {
fmt.Println(err)
}
for cur.Next(c.Request().Context()) {
err := cur.Decode(&product)
if err != nil {
fmt.Println(err)
}
products = append(products, product)
}
// return products list in JSON format
return c.JSON(http.StatusOK, products)
New Way Attempt(return based on page)
afterID := c.QueryParam("afterID")
if afterID == "" {
// get from start of collection based on latest date
}
// get 10 products after this ID, if no ID then get from start
query := bson.M{}
var product ReturnedProduct
var products []Returnedproduct
//.find(afterId).limit(10) - something like this?
cur, err := mg.Db.Collection("products").Find(c.Request().Context(), query)
if err != nil {
fmt.Println(err)
}
for cur.Next(c.Request().Context()) {
err := cur.Decode(&product)
if err != nil {
fmt.Println(err)
}
products = append(products, product)
}
// return products list in JSON format
return c.JSON(http.StatusOK, products)
The official MongoDB Go driver also has a *FindOptions optional parameter you could also explore.
pageOptions := options.Find()
pageOptions.SetSkip(int64(page)) //0-i
pageOptions.SetLimit(int64(limit)) // number of records to return
cur, err := userCollection.Find(c.Request().Context(), bson.D{{}}, pageOptions)
if err != nil {
// handle error
}
defer cur.Close(ctx)
var products []Returnedproduct
for cur.Next(c.Request().Context()) {
var product Returnedproduct
if err := cur.Decode(&product); err != nil {
// handle error
}
products = append(products, &product)
}
if err := cur.Err(); err != nil {
// handle error
}
You may construct a query where _id is greater than afterID, in which case you should also specify sorting by _id. For sorting and for setting a limit, you may use options.FindOptions.
You also should use Cursor.All() to decode all results and not one-by-one.
This is how it could look like:
query := bson.M{"_id": bson.M{"$gt": afterID}}
opts := options.Find().
SetSort(bson.M{"_id": 1}).
SetLimit(10)
ctx := c.Request().Context()
curs, err := mg.Db.Collection("products").Find(ctx, query, opts)
if err != nil {
// Handle error
}
var products []Returnedproduct
if err = curs.All(ctx, &products); err != nil {
// Handle error
}
return c.JSON(http.StatusOK, products)
While querying the below data, returned cursor is empty. while there is 100s of documents which satisfy the condition.
{
"_id": "5dd68c51a39809125944ffba",
"status": "success",
"balance": "0.000",
"request_params": {
"username": "test_user",
"service_code": "MR"
}
using below code
MongoDB driver "go.mongodb.org/mongo-driver/mongo"
func saveLog(data Log) bool {
mongo, err := openMongo()
if err != nil {
log.Println(err)
fmt.Println("Connection failed")
return false
} else {
LogCollection := mongo.Database(LogDb).Collection(CollectionLog)
insertedApi, err := LogCollection.InsertOne(context.TODO(), data)
if err != nil {
log.Println(err)
fmt.Println("Insert failed")
return false
} else {
log.Println(insertedApi.InsertedID)
return true
}
}
}
func parseLog() {
db, err := openMongo()
if err != nil {
fmt.Println(err)
fmt.Println("Connection failed")
return
} else {
logCollection := db.Database(LogDb).Collection(CollectionLog)
var results [] *Log
find := bson.D{{"status","success"},{"request_params",bson.D{{"username","test_user"}}}}
fmt.Println(find)
cur, err := logCollection.Find(context.TODO(), find)
if err != nil {
log.Fatal(err)
}else {
for cur.Next(context.TODO()) {
var elem Log
err := cur.Decode(&elem)
if err != nil {
fmt.Println("Parse error : ",err)
}
fmt.Println("Log : ",elem)
results = append(results, &elem)
}
}
}
}
Log write
saveLog(Log{"success","0.000",RequestParams{"test_user","MR"}})
Log read
parseLog()
Log struct
type Log struct {
Status string `bson:"status"`
Balance string `bson:"balance"`
RequestParams RequestParams `bson:"request_params"`
}
type RequestParams struct {
Username string `bson:"username"`
ServiceCode string `bson:"service_code"`
}
MongoDB data
status only is returning whole 8k documents
bson.D{{"status","success"}}
Isn't collection.Find() function the right one for it.
Shell command is returning documents correctly
db.log.find({"status":"success","request_params.username":"test_user"}).limit(10).pretty()
The issue here is because of the query filter. There is a difference between the following queries:
// Query A: {"status": "success", "request_params": {"username":"test_user"}}
find := bson.D{{"status","success"},{"request_params",bson.D{{"username","test_user"}}}}
// Query B: {"status": "success", "request_params.username":"test_user"}
find := bson.D{{"status","success"},{"request_params.username","test_user"}}
Query A means that you would like to match an exact document of request_params where the value object exactly equal to {"username":"test_user"}. None of the documents in your collection matches this criteria. The documents also contains {"service_code":"MR"}. While query B uses dot notation, which means that you would like to match request_params field where it contains a value of {"username":"test_user"}.
See also Query on Nested Field for more information.