Hi i´m trying to bind mongodb service on my expressjs app with Appfog.
I have a config file like this:
config.js
var config = {}
config.dev = {};
config.prod = {};
//DEV
config.dev.host = "localhost";
config.dev.port = 3000;
config.dev.mdbhost = "localhost";
config.dev.mdbport = 27017;
config.dev.db = "detysi";
//PROD
config.prod.service_type = "mongo-1.8";
config.prod.json = process.env.VCAP_SERVICES ? JSON.parse(process.env.VCAP_SERVICES) : '';
config.prod.credentials = process.env.VCAP_SERVICES ? config.prod.json[config.prod.service_type][0]["credentials"] : null;
config.prod.mdbhost = config.prod.credentials["host"];
config.prod.mdbport = config.prod.credentials["port"];
config.prod.db = config.prod.credentials["db"];
config.prod.port = process.env.VCAP_APP_PORT || process.env.PORT;
module.exports = config;
And this is my mongodb conf depending of the environment
app.js
if ( process.env.VCAP_SERVICES ) {
server = new Server(config.prod.mdbhost, config.prod.mdbport, { auto_reconnect: true });
db = new Db(config.prod.db, server);
} else {
server = new Server(config.dev.mdbhost, config.dev.mdbport, { auto_reconnect: true });
db = new Db(config.dev.db, server);
}
I bind the service manually from https://console.appfog.com, my app is using the infra AWS Virginia. I also use MongoHQ addon to create one collection with two documents.
When i go to Windows console and write af update myapp it throws me next error:
Cannot read property '0' of undefined
This is cause process.env.VCAP_SERVICES is undefined.
I was investigating that and it can be that my mongodb service is incorrectly binded.
After that i tried to bind mongodb service from the windows console like below:
af bind-service mongodb myapp
But it throws me next error:
Service mongodb and App myapp are not on the same infra
At this point i don´t know what can i do.
I had the same problem.
What fixed it for me:
Go to: console>services>(re)start mongodb service
Run the same command again.
Push again.
It should work.
Related
We have a .Net Core Console Application accessing Azure SQL (Gen5, 4 vCores) deployed as a web job in Azure.
We recently upgraded our small console application to ef6(6.0.11)
Since quite some time, the application keeps throwing below exception intermittently for READ operations(highlighted in below code):
Microsoft.Data.SqlClient.SqlException (0x80131904): A connection was successfully established with the server, but then an error occurred during the pre-login handshake. (provider: SSL Provider, error: 0 - The wait operation timed out.)
We are clueless on Root Cause of this issue. Any hints on where to start looking # for root cause?
Any pointer would be highly appreciated.
NOTE : Connection string has following settings in azure
"ConnectionStrings": { "DBContext": "Server=Trusted_Connection=False;Encrypt=False;" }
Overall code looks something like below:
` var config = new ConfigurationBuilder()
.SetBasePath(Directory.GetCurrentDirectory())
.Build();
var builder = new SqlConnectionStringBuilder(config.GetConnectionString("DBContext"));
builder.Password = "";
builder.UserID = "";
builder.DataSource = "";
builder.InitialCatalog = "";
string _connection = builder.ConnectionString;
var sp = new ServiceCollection()
.AddDbContext<DBContext>(x => x.UseSqlServer(_connection, providerOptions => providerOptions.EnableRetryOnFailure()))
.BuildServiceProvider();
var db = sp.GetService<DBContext>();
lock(db)
{
var NewTriggers = db.Triggers.Where(x => x.IsSubmitted == false && x.Error == null).OrderBy(x => x.CreateOn).ToList();
}
`
We tried migrating from EF 3.1 to EF 6.0.11. We were expecting a smooth transition
I am able to create a azurerm_postgresql_flexible_server and azurerm_postgresql_flexible_server_database using terraform.
I am not able to create a schema using TF but not able to get much help on documentation.
I also checked https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/postgresql_schema
but that uses a different provider. I am not sure what am I missing here.
This is the TF template which creates the Azure PostgreSQL server and DB -
module "common_modules" {
source = "../modules/Main"
}
provider "azurerm" {
features {}
}
locals {
#Construct Tag Data for Resource
resourceTags = {
environment = var.environment
createdBy = var.createdBy
managedBy = var.managedBy
colorBand = var.colorBand
purpose = var.purpose
lastUpdateOn = formatdate("DD-MM-YYYY hh:mm:ss ZZZ", timestamp())
}
}
resource "azurerm_postgresql_flexible_server" "postgreSQL" {
name = var.postgreSQL
location = var.location
resource_group_name = var.ckeditorResorceGroup
administrator_login = var.postgreSQLAdmin
administrator_password = var.password
sku_name = "B_Standard_B1ms"
version = "13"
storage_mb = 32768
backup_retention_days = 7
geo_redundant_backup_enabled = false
tags = local.resourceTags
}
resource "azurerm_postgresql_flexible_server_database" "postgreSQLDB" {
name = var.postgreSQLDB
server_id = azurerm_postgresql_flexible_server.postgreSQL.id
collation = "en_US.utf8"
charset = "utf8"
}
resource "azurerm_postgresql_flexible_server_firewall_rule" "postgreSQLFirewallRule" {
name = "allow_access_to_azure_services"
server_id = azurerm_postgresql_flexible_server.postgreSQL.id
start_ip_address = "0.0.0.0"
end_ip_address = "0.0.0.0"
}
have a look at https://registry.terraform.io/providers/cyrilgdn/postgresql or https://github.com/cyrilgdn/terraform-provider-postgresql
usable, but you need network connectivity to resolve names (azure private dns zone) and to connect with postgresql flexible server. The terraform code should run in same vnet like flexi server.
Hello I try to using Hangfire with mongoDb in Asp.net core app
I have an exception on the Startup.cs when launching my app which is:
Command listIndexes failed: ns does not exist: jobs.hangfire.mongo.jobParameter
I tried to create manually the collections using the prefix (jobParameter, statedata ...), but then he tried to drop them and didn't succeed
This my code in Startup.cs :
var mongoUrlBuilder = new MongoUrlBuilder("mongodb://user:pwd#localhost:27017/jobs?authSource=admin");
var mongoClient = new MongoClient(mongoUrlBuilder.ToMongoUrl());
// Add Hangfire services. Hangfire.AspNetCore nuget required
services.AddHangfire(configuration => configuration
.SetDataCompatibilityLevel(CompatibilityLevel.Version_170)
.UseSimpleAssemblyNameTypeSerializer()
.UseRecommendedSerializerSettings()
.UseMongoStorage(mongoClient, mongoUrlBuilder.DatabaseName, new MongoStorageOptions
{
MigrationOptions = new MongoMigrationOptions
{
MigrationStrategy = new MigrateMongoMigrationStrategy(),
BackupStrategy = new CollectionMongoBackupStrategy()
},
Prefix = "hangfire.mongo",
CheckConnection = true
})
);
// Add the processing server as IHostedService
services.AddHangfireServer(serverOptions =>
{
serverOptions.ServerName = "Hangfire.Mongo server 1";
});
I am currently using :
Hangfire 1.7.27
Hangfire.Mongo 0.7.27
Mongo.Driver 2.14.0
Do you have any idea ?
We have been trying today to put a Cygnus container in production and we haven't been able to connect it to MongoDB. In our case, we have installed MongoDB with the Auth flag, and we created different users in order to test everything work.
However, we didn't find out the way to connect Cygnus. It tries to connect to the sth_default database, but the it requires enough privileges to create other databases.
The workaround was to start the MongoDB service without the Auth flag, allowing us to check that everything worked when the user can access with admin user without login in, which is not the way we would like to work, due to the fact that it is insecure.
Are we missing anything?
Thanks in advance!
UPDATE
I'm adding here the Cygnus agent.conf file. Moreover, I'm using the Docker Image (docker-ngsi: https://hub.docker.com/r/fiware/cygnus-ngsi/) in its latest version.
cygnus-ngsi.sources = http-source
# Using both, Mongo and Postgres sinks
cygnus-ngsi.sinks = mongo-sink postgresql-sink
cygnus-ngsi.channels = mongo-channel postgresql-channel
cygnus-ngsi.sources.http-source.type = org.apache.flume.source.http.HTTPSource
cygnus-ngsi.sources.http-source.channels = mongo-channel postgresql-channel
cygnus-ngsi.sources.http-source.port = 5050
cygnus-ngsi.sources.http-source.handler = com.telefonica.iot.cygnus.handlers.NGSIRestHandler
cygnus-ngsi.sources.http-source.handler.notification_target = /notify
cygnus-ngsi.sources.http-source.handler.default_service = default
cygnus-ngsi.sources.http-source.handler.default_service_path = /
cygnus-ngsi.sources.http-source.interceptors = ts gi
cygnus-ngsi.sources.http-source.interceptors.ts.type = timestamp
cygnus-ngsi.sources.http-source.interceptors.gi.type = com.telefonica.iot.cygnus.interceptors.NGSIGroupingInterceptor$Builder
cygnus-ngsi.sources.http-source.interceptors.gi.grouping_rules_conf_file = /opt/apache-flume/conf/grouping_rules.conf
cygnus-ngsi.sinks.mongo-sink.type = com.telefonica.iot.cygnus.sinks.NGSIMongoSink
cygnus-ngsi.sinks.mongo-sink.channel = mongo-channel
#cygnus-ngsi.sinks.mongo-sink.enable_encoding = false
#cygnus-ngsi.sinks.mongo-sink.enable_grouping = false
#cygnus-ngsi.sinks.mongo-sink.enable_name_mappings = false
#cygnus-ngsi.sinks.mongo-sink.enable_lowercase = false
#cygnus-ngsi.sinks.mongo-sink.data_model = dm-by-entity
#cygnus-ngsi.sinks.mongo-sink.attr_persistence = row
cygnus-ngsi.sinks.mongo-sink.mongo_hosts = MyIP:MyPort
cygnus-ngsi.sinks.mongo-sink.mongo_username = MyUsername
cygnus-ngsi.sinks.mongo-sink.mongo_password = MyPassword
#cygnus-ngsi.sinks.mongo-sink.db_prefix = sth_
#cygnus-ngsi.sinks.mongo-sink.collection_prefix = sth_
#cygnus-ngsi.sinks.mongo-sink.batch_size = 1
#cygnus-ngsi.sinks.mongo-sink.batch_timeout = 30
#cygnus-ngsi.sinks.mongo-sink.batch_ttl = 10
#cygnus-ngsi.sinks.mongo-sink.data_expiration = 0
#cygnus-ngsi.sinks.mongo-sink.collections_size = 0
#cygnus-ngsi.sinks.mongo-sink.max_documents = 0
#cygnus-ngsi.sinks.mongo-sink.ignore_white_spaces = true
Thanks
The following configuration lines are missing:
cygnus-ngsi.sinks.mongo-sink.type = com.telefonica.iot.cygnus.sinks.NGSIMongoSink
cygnus-ngsi.sinks.mongo-sink.channel = mongo-channel
I.e. you have to specify the Java class implementing the MongoDB sink, and the channel that connects the source with such a sink.
If the configuration you are showing is the default one when Cygnus is installed through Docker, then the development team must be warned.
We have this project using Polymer as the FrontEnd and Node.js as the API being consumed by Polymer, and our Node API replies a really long time especially if you just leave the page alone for like 10 minutes. Upon further investigation by inserting a DATE calculation in the MySQL Query, I found out that MySQL responds a Really long time. The query looks like this:
var query = dataStruct['formed_query'];
console.log(query);
var now = Date.now();
console.log("Getting Data for Foobar Query============ "+Date());
console.log(query);
GLOBAL.db_foobar.getConnection(function(err1, connection) {
////console.log("requesting MySQL connection");
if(err1==null)
{
connection.query(query,function(err,rows,fields){
console.log("response from MySQL Foobar Query============= "+Date());
console.log("MySQL response Foobar Query=========> "+(Date.now()-now)+" ms");
if(err==null)
{
//respond.respondJSON is just a res.json(msg); but I've added a similar calculation for response time starting from express router.route until res.json occurs
respond.respondJSON(dataJSON['resVal'],res,req);
}else{
var msg = {
"status":"Error",
"desc":"[Foobar Query]Error Getting Connection",
"err":err1,
"db_name":"common",
"query":query
};
respond.respondError(msg,res,req);
}
connection.release();
});
}else{
var msg = {
"status":"Error",
"desc":"[Foobar Query]Error Getting Connection",
"err":err1,
"db_name":"common",
"query":query
};
respond.respondJSON(msg,res,req);
respond.emailError(msg);
try{
connection.release();
}catch(err_release){
respond.LogInConsole(err_release);
respond.LogInConsole(err_release.stack);
}
}
});
}
When Chrome Developer tools reports a LONG PENDING time for the API, this happens to my log.
SELECT * FROM `foobar_table` LIMIT 0,20;
MySQL response Foobar Query=========> 10006 ms
I'm dumbfounded as to why this is happening.
We have our system hosted in Google Cloud Services. Our MySQL is a Google SQL service with an activation policy of ALWAYS. We've also set that our Node Server, which is a Google Compute Engine, to keep alive TCP4 connections via:
echo 'net.ipv4.tcp_keepalive_time = 60' | sudo tee -a /etc/sysctl.conf
sudo /sbin/sysctl --load=/etc/sysctl.conf
I'm using mysql Pool from node-mysql
db_init.database = 'foobar_dbname';
db_init=ssl_set(db_init);
//GLOBAL.db_foobar = mysql.createConnection(db_init);
GLOBAL.db_foobar = mysql.createPool(db_init);
GLOBAL.db_foobar.on('connection', function (connection) {
setTimeout(tryForceRelease, mysqlForceTimeOut,connection);
});
db_init looks like this:
db_init = {
host : 'ip_address_of_GCS_SQL',
user : 'user_name_of_GCS_SQL[![enter image description here][1]][1]',
password : '',
database : '',
supportBigNumbers: true,
connectionLimit:100
};
I'm also forcing to release connections if they're not released in 2 minutes, just to make sure it's released
function tryForceRelease(connection)
{
try{
//console.log("force releasing connection");
connection.release();
}catch(err){
//do nothing
//console.log("connection already released");
}
}
This is really wracking my brains out here. If anyone can help please do.
I'll post the same answer here as I posted in node-mysql pool experiences ETIMEDOUT.
The questions are sufficiently different that I'm not sure it's worth duping them.
I suspect the reason is that keepalive is not enabled on the connection to the MySQL server.
node-mysql does not have an option to enable keepalive and neither does node-mysql2, but node-mysql2 provides a way to supply a custom function for creating sockets which we can use to enable keepalive:
var mysql = require('mysql2');
var net = require('net');
var pool = mysql.createPool({
connectionLimit : 100,
host : '123.123.123.123',
user : 'foo',
password : 'bar',
database : 'baz',
stream : function(opts) {
var socket = net.connect(opts.config.port, opts.config.host);
socket.setKeepAlive(true);
return socket;
}
});