I'm currently integrating the transaction logic into my go+mongodb api.
I already created this example endpoint. It allows you to retrieve a user document and send it back to the client with json encoding.
func GetUser(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["user-id"]
objectId, _ := primitive.ObjectIDFromHex(id)
user, err := UserById(objectId)
if err != nil {
// ...
}
// do some stuff with the user, whatever
// eventually send it back
json.NewEncoder(w).Encode(user)
}
func UserById(id primitive.ObjectID) (models.StoredUser, error) {
session, err := mongoClient.StartSession()
if err != nil {
return models.StoredUser{}, fmt.Errorf("failed starting session for transaction | %s", err.Error())
}
defer session.EndSession(context.TODO())
callback := func(ctx mongo.SessionContext) (any, error) {
res := usersCollection.FindOne(
ctx,
bson.M{
"_id": id,
},
)
if res.Err() != nil {
return models.StoredUser{}, fmt.Errorf("failed querying db | %s", res.Err())
}
return res, nil
}
result, err := session.WithTransaction(context.TODO(), callback)
if err != nil {
return models.StoredUser{}, fmt.Errorf("failed executing transaction | %s", err.Error())
}
asserted := result.(*mongo.SingleResult)
var ret models.StoredUser
if err := asserted.Decode(&ret); err != nil {
return models.StoredUser{}, fmt.Errorf("failed parsing user data in struct | %s", err.Error())
}
return ret, nil
}
Here are the big steps :
Parse the request content to get the user id
Create a session to perform the transaction
Declare the callback function using the id argument
Call the callback function from a transaction
Get back the *mongo.SingleResult as an interface{} and parsing it back to its original type
Decode the bson document contained in the *mongo.SingleResult to put it in the return struct
This function works, but is very verbose. The code is very duplicated.
I wonder if there is a way of not repeating the same code for each function I wanna make. My previous wrapper function attempts didn't lead to anything, as I actually need the variables where they are now at each call.
Still, the steps 2 and 5 especially seem very redundant and inefficient.
Anyone got any idea ?
Ok I found the following :
func Transaction(callback func(ctx mongo.SessionContext) (any, error)) (any, error) {
session, err := mongoClient.StartSession()
if err != nil {
return nil, fmt.Errorf("failed creating session | %s", err.Error())
}
defer session.EndSession(context.TODO())
res, err := session.WithTransaction(ctx, callback)
if err != nil {
return nil, fmt.Errorf("failed executing transaction | %s", err.Error())
}
return res, nil
}
Let's say i want then to fetch user objects :
func GetUsers() ([]models.User, error) {
callback := func(ctx mongo.SessionContext) (any, error) {
res, err := usersCollection.Find(ctx, bson.M{})
if err != nil {
return nil, fmt.Errorf("failed querying users collection | %s", err.Error())
}
var ret []models.User
if err := res.All(context.TODO(), &ret); err != nil {
return nil, fmt.Errorf("failed parsing results in struct | %s", err.Error())
}
return ret, nil
}
result, err := Transaction(callback)
if err != nil {
return []models.User{}, fmt.Errorf("failed executing transaction | %s", err.Error())
}
classes, _ := result.([]models.StoredClass)
return classes, nil
}
Related
The error
(*errors.errorString) *{s: "pq: unexpected DataRow in simple query execution"}
appears after the line with the commentary. Didn't find any solution online. Since stackoverflow asks for more details, this is an update query that is supposed to update a todo and a list of subtasks in the database. The exact error is in the question topic. I post the complete code for the function that returns the error.
func (t *TodoTable) UpdateTodo(ctx context.Context, todo *Todo, t_id int) error {
tx, err := t.sqlxdb.BeginTxx(ctx, &sql.TxOptions{})
if err != nil {
return err
}
rollback_err := func(err error) error {
if err2 := tx.Rollback(); err2 != nil {
return fmt.Errorf("%v; %v", err, err2)
}
return err
}
row := tx.QueryRowxContext(ctx, "UPDATE todos SET todo_name=$1, deadline=$2, updated_at=$3 WHERE todo_id=$4 returning todo_id", todo.TodoName, todo.Deadline, todo.UpdatedAt, t_id)
if row.Err() != nil {
return rollback_err(err)
}
var subs_ids []int
// Getting subs ids from database
query := fmt.Sprintf("SELECT sub_id FROM subs WHERE todo_id=%d", t_id)
// THE ERROR COMES AFTER EXECUTING THE LINE BELOW
rows, err := tx.Query(query)
if err != nil {
rollback_err(err)
}
if rows != nil {
for rows.Next() {
var sub_id int
err = rows.Scan(&sub_id)
if err != nil {
rollback_err(err)
}
subs_ids = append(subs_ids, sub_id)
}
if err := tx.Commit(); err != nil {
return rollback_err(err)
}
}
// Updating subs
for i, sub := range todo.Subs {
_, err = tx.ExecContext(ctx, fmt.Sprintf("UPDATE subs SET sub_name='%s' WHERE sub_id=%d", sub.Sub_name, subs_ids[i]))
if err != nil {
return rollback_err(err)
}
}
return nil
}
I am creating a Rest CRUD HTTP server written in Go. My error is I am getting this message from the database connection: "context deadline exceeded".
I have a CreateUsers() function to insert multiple users into the database. Currently, I am inserting one user at time:
func CreateUsers(users []*models.User) ([]primitive.ObjectID, error) {
client, ctx, cancel := database.GetConnection()
defer cancel()
defer client.Disconnect(ctx)
var userIds []primitive.ObjectID
if len(users) == 0 {
log.Printf("No users to create")
return userIds, errors.New("no users to create")
}
for _, user := range users {
user.ID = primitive.NewObjectID()
hashedPassword, err := utils.HashPassword(user.Password)
if err != nil {
log.Printf("Error while hashing password: %v", err)
return userIds, err
}
user.Password = hashedPassword
result, err := client.Database("users").Collection("users").InsertOne(ctx, user)
if err != nil {
log.Printf("Error while creating user: %v", err)
return userIds, err
}
oid := result.InsertedID.(primitive.ObjectID)
userIds = append(userIds, oid)
}
return userIds, nil
}
My database connection (database.GetConnection) is something like:
func GetConnection() (*mongo.Client, context.Context, context.CancelFunc) {
client, err := mongo.NewClient(options.Client().ApplyURI(connectionURI))
if err != nil {
log.Fatalf("error while creating client: %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout*time.Second)
err = client.Connect(ctx)
if err != nil {
log.Fatalf("cluster connection error: %v", err)
}
err = client.Ping(ctx, nil)
if err != nil {
log.Fatalf("cluster ping error")
}
log.Println("connected to mongodb")
return client, ctx, cancel
}
Note: connectionTimeout is equal to 5.
I am not sure but the time exceeded error may be related to the InsertOne() approach. So, instead of focusing in solving that error I should be asking how to parse the []models.User into bson.D{} documents to pass them as parameters to InsertMany().
I think another advantage of using InsertMany approach is the query time will be way below. Any suggestions?
I am new to GRPC and I am trying to implement a basic CRUD + listing. I use unary rpc's for the CRUD and a server stream for the listing. What I would like to do however is update the client whenever someone changes a record in the database that you are listing.
So for example user A is listing 10 companies. And user B is updating one of those companies. I want user A's client to be updated once the update rpc is called.
This is what I have for now
func RegisterCompanyServer(l hclog.Logger, gs *grpc.Server) {
r := postgres.NewPostgresCompanyRepository()
cs := NewCompanyServer(l, r)
pb.RegisterCompanyServiceServer(gs, cs)
}
type CompanyServer struct {
logger hclog.Logger
repo repo.CompanyRepository
pb.UnimplementedCompanyServiceServer
}
func NewCompanyServer(l hclog.Logger, r repo.CompanyRepository) *CompanyServer {
return &CompanyServer{
logger: l,
repo: r,
}
}
func (c *CompanyServer) ListCompany(req *pb.CompanyListRequest, stream pb.CompanyService_ListCompanyServer) error {
//Somehow listen to CreateCompany() and update the client
companies, err := c.repo.List(req.Query)
if err != nil {
return err
}
for _, c := range companies {
bytes, err := json.Marshal(c)
if err != nil {
return err
}
out := &pb.Company{}
if err = jsonEnc.Unmarshal(bytes, out); err != nil {
return err
}
res := &pb.CompanyListResponse{
Company: out,
}
err = stream.Send(res)
if err != nil {
return err
}
}
return nil
}
func (c *CompanyServer) CreateCompany(context context.Context, req *pb.CompanyCreateRequest) (*pb.CompanyCreateResponse, error) {
input := req.GetCompany()
if input == nil {
return nil, errors.New("Parsing Error")
}
bytes, err := jsonEnc.Marshal(input)
if err != nil {
return nil, err
}
company := &myCompany.Company{}
if err = json.Unmarshal(bytes, company); err != nil {
return nil, err
}
result, err := c.repo.Create(company)
if err != nil {
return nil, err
}
res := &pb.CompanyCreateResponse{
Id: result,
}
//Somehow notify the stream that a company was created
return res, nil
}
Is this even feasable with GRPC? What techniques are out there to do this? I am currently working with a postgresql database.
I have a struct field like this below. I also store raw protobuf of the same struct in db. Now every time fetch or save data to mongo. I have to update ReallyBigRaw, from the proto when I want to save to DB and when fetch I have to unmarshal ReallyBigRaw to ReallyBigObj to give out responses. Is there a way I can implement some interface or provide some callback functions so that the mongo driver does this automatically before saving or fetching data from DB.
Also, I am using the offical golang mongo driver not mgo, I have read some answers where can be done in mgo golang library.
import (
"github.com/golang/protobuf/jsonpb"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
proto "github.com/dinesh/api/go"
)
type ReallyBig struct {
ID string `bson:"_id,omitempty"`
DraftID string `bson:"draft_id,omitempty"`
// Marshaled ReallyBigObj proto to map[string]interface{} stored in DB
ReallyBigRaw map[string]interface{} `bson:"raw,omitempty"`
ReallyBigObj *proto.ReallyBig `bson:"-"`
CreatedAt primitive.DateTime `bson:"created_at,omitempty"`
UpdatedAt primitive.DateTime `bson:"updated_at,omitempty"`
}
func (r *ReallyBig) GetProto() (*proto.ReallyBig, error) {
if r.ReallyBigObj != nil {
return r.ReallyBigObj, nil
}
Obj, err := getProto(r.ReallyBigRaw)
if err != nil {
return nil, err
}
r.ReallyBigObj = Obj
return r.ReallyBigObj, nil
}
func getRaw(r *proto.ReallyBig) (map[string]interface{}, error) {
m := jsonpb.Marshaler{}
b := bytes.NewBuffer([]byte{})
// marshals proto to json format
err := m.Marshal(b, r)
if err != nil {
return nil, err
}
var raw map[string]interface{}
// unmarshal the raw data to an interface
err = json.Unmarshal(b.Bytes(), &raw)
if err != nil {
return nil, err
}
return raw, nil
}
func getProto(raw map[string]interface{}) (*proto.ReallyBig, error) {
b, err := json.Marshal(raw)
if err != nil {
return nil, err
}
u := jsonpb.Unmarshaler{}
var reallyBigProto proto.ReallyBig
err = u.Unmarshal(bytes.NewReader(b), &recipeProto)
if err != nil {
return nil, err
}
return &reallyBigProto, nil
}
I implemented the Marshaler and Unmarshaler interface. Since mongo driver calls MarshalBSON and UnmarshalBSON if the type implements Marshaler and Unmarshaler we also end up in infinite loop. To avoid that we create a Alias of the type. Alias in Golang inherit only the fields not the methods so we end up calling normal bson.Marshal and bson.Unmarshal
func (r *ReallyBig) MarshalBSON() ([]byte, error) {
type ReallyBigAlias ReallyBig
reallyBigRaw, err := getRaw(r.ReallyBigObj)
if err != nil {
return nil, err
}
r.ReallyBigRaw = reallyBigRaw
return bson.Marshal((*ReallyBigAlias)(r))
}
func (r *ReallyBig) UnmarshalBSON(data []byte) error {
type ReallyBigAlias ReallyBig
err := bson.Unmarshal(data, (*ReallyBigAlias)(r))
if err != nil {
return err
}
reallyBigProto, err := getProto(r.ReallyBigRaw)
if err != nil {
return err
}
r.ReallyBigObj = reallyBigProto
return nil
}
I am getting pq: sorry, too many clients already error when I am calling the GetMessages() multiple times.
Please find the updated code:
main() code
func main() {
dbConn, err := InitDB()
if err != nil {
Log.Error("Connection Error: ", err.Error())
return
}
defer dbConn.Close()
go run()
var input string
fmt.Scanln(&input)
}
Database connection code is:
func InitDB()(*sql.DB, error) {
connectionString := fmt.Sprintf("user=%v password='%v' dbname=%v sslmode=disable", USER, PASSWORD, DATABASE)
db, err = sql.Open(DRIVER, connectionString)
return db, err
}
run goroutine:
func run() {
for {
messages, err := GetMessages()
if err != nil {
Log.Error("Connection Error: ", err.Error())
return
}
log.Info(messages)
}
}
GetMessages() function code:
func GetMessages() (messages []string, err error) {
rows, err := db.Query(`SELECT message1, message2, message3, message4, message5,
message6, message7, message8, message9, message10, message11, message12, message13,
message14, message15, message16, message17, message18, message19, message20, message21,
message22, message23, message24, message25, message26, message27, message28, message29,
message30, message31, message32, message33, message34, message35, message36, message37,
message38, message39, message40, message41, message42, message43, message44, message45,
message46, message47, message48 FROM table1 WHERE id=1`)
if err != nil {
Log.Error("Query error", err)
return messages, err
}
var pointers []interface{}
defer rows.Close()
for rows.Next() {
pointers = make([]interface{}, 48)
messages = make([]string, 48)
for i, _ := range pointers {
pointers[i] = &messages[i]
}
err = rows.Scan(pointers...)
if err != nil {
Log.Error("Failed to scan row", err)
return messages, err
}
}
return messages, nil
}
I checked this answer and I have used scan but still it isn't working
UPDATE
Issue was in another function. I was using db.Query without closing the returned rows object and was repeatedly calling that function. I've updated my code; used db.Exec instead of db.Query and it's working now. Thank you so much #mkopriva for this answer. :)
Try setting SetMaxOpenConns. The default is 0 (unlimited). This may be causing the issue. It would help if you also had SetConnMaxLifetime; otherwise, Postgres will start holding connections longer, and you will notice an increase in memory usage.
I've had the same problem with my postgres / golang project.
Eventually, this example worked flawlessly, without "eating" any DB connections:
// example params
firstName := "Jeremy"
lastName := "Baker"
// setup statement
stmt, err := db.Prepare(
`INSERT INTO user (
firstname,
lastname) VALUES($1, $2)
RETURNING id`) // id is the primary key of table: user
if err != nil {
return err
}
defer stmt.Close()
// execute statement
var userID string
err = stmt.QueryRow(
firstName,
lastName).Scan(&userID)
if err != nil {
return err
}