We have this project using Polymer as the FrontEnd and Node.js as the API being consumed by Polymer, and our Node API replies a really long time especially if you just leave the page alone for like 10 minutes. Upon further investigation by inserting a DATE calculation in the MySQL Query, I found out that MySQL responds a Really long time. The query looks like this:
var query = dataStruct['formed_query'];
console.log(query);
var now = Date.now();
console.log("Getting Data for Foobar Query============ "+Date());
console.log(query);
GLOBAL.db_foobar.getConnection(function(err1, connection) {
////console.log("requesting MySQL connection");
if(err1==null)
{
connection.query(query,function(err,rows,fields){
console.log("response from MySQL Foobar Query============= "+Date());
console.log("MySQL response Foobar Query=========> "+(Date.now()-now)+" ms");
if(err==null)
{
//respond.respondJSON is just a res.json(msg); but I've added a similar calculation for response time starting from express router.route until res.json occurs
respond.respondJSON(dataJSON['resVal'],res,req);
}else{
var msg = {
"status":"Error",
"desc":"[Foobar Query]Error Getting Connection",
"err":err1,
"db_name":"common",
"query":query
};
respond.respondError(msg,res,req);
}
connection.release();
});
}else{
var msg = {
"status":"Error",
"desc":"[Foobar Query]Error Getting Connection",
"err":err1,
"db_name":"common",
"query":query
};
respond.respondJSON(msg,res,req);
respond.emailError(msg);
try{
connection.release();
}catch(err_release){
respond.LogInConsole(err_release);
respond.LogInConsole(err_release.stack);
}
}
});
}
When Chrome Developer tools reports a LONG PENDING time for the API, this happens to my log.
SELECT * FROM `foobar_table` LIMIT 0,20;
MySQL response Foobar Query=========> 10006 ms
I'm dumbfounded as to why this is happening.
We have our system hosted in Google Cloud Services. Our MySQL is a Google SQL service with an activation policy of ALWAYS. We've also set that our Node Server, which is a Google Compute Engine, to keep alive TCP4 connections via:
echo 'net.ipv4.tcp_keepalive_time = 60' | sudo tee -a /etc/sysctl.conf
sudo /sbin/sysctl --load=/etc/sysctl.conf
I'm using mysql Pool from node-mysql
db_init.database = 'foobar_dbname';
db_init=ssl_set(db_init);
//GLOBAL.db_foobar = mysql.createConnection(db_init);
GLOBAL.db_foobar = mysql.createPool(db_init);
GLOBAL.db_foobar.on('connection', function (connection) {
setTimeout(tryForceRelease, mysqlForceTimeOut,connection);
});
db_init looks like this:
db_init = {
host : 'ip_address_of_GCS_SQL',
user : 'user_name_of_GCS_SQL[![enter image description here][1]][1]',
password : '',
database : '',
supportBigNumbers: true,
connectionLimit:100
};
I'm also forcing to release connections if they're not released in 2 minutes, just to make sure it's released
function tryForceRelease(connection)
{
try{
//console.log("force releasing connection");
connection.release();
}catch(err){
//do nothing
//console.log("connection already released");
}
}
This is really wracking my brains out here. If anyone can help please do.
I'll post the same answer here as I posted in node-mysql pool experiences ETIMEDOUT.
The questions are sufficiently different that I'm not sure it's worth duping them.
I suspect the reason is that keepalive is not enabled on the connection to the MySQL server.
node-mysql does not have an option to enable keepalive and neither does node-mysql2, but node-mysql2 provides a way to supply a custom function for creating sockets which we can use to enable keepalive:
var mysql = require('mysql2');
var net = require('net');
var pool = mysql.createPool({
connectionLimit : 100,
host : '123.123.123.123',
user : 'foo',
password : 'bar',
database : 'baz',
stream : function(opts) {
var socket = net.connect(opts.config.port, opts.config.host);
socket.setKeepAlive(true);
return socket;
}
});
I am trying to pre-bind an XMPP session via XMPHP and pass the rid/sid/jid to a strophe client to attach to the session.
connection code here:
$conn = new CIRCUIT_BOSH('server.com', 7070, $username, $pass, $resource, 'server.com', $printlog=true, $loglevel=XMPPHP_Log::LEVEL_VERBOSE);
$conn->autoSubscribe();
try{
$conn->connect('http://xmpp.server.com/http-bind', 1, true);
$log->lwrite('Connected!');
}catch(XMPPHP_Exception $e){
die($e->getMessage());
}
I am getting the rid and sid but the fulljid in the $conn object stays empty and I cant see a session started on my openfire admin console.
If I create the jid manually by using the given resource and passing jid/rid/sid to strophe to use in attach, I get the ATTACHED status and I see calls from the client to the BOSH ip but I still dont see a session and I cant use the connection.
Strophe Client Code:
Called on document ready:
var sid = $.cookie('sid');
var rid = $.cookie('rid');
var jid = $.cookie('jid');
$(document).trigger('attach', {
sid: sid,
rid: rid,
jid: jid,
});
$(document).bind('attach', function (ev, data) {
var conn = new Strophe.Connection(
"http://xmpp.server.com/http-bind");
conn.attach(data.jid, data.sid, data.rid, function (status) {
if (status === Strophe.Status.CONNECTED) {
$(document).trigger('connected');
} else if (status === Strophe.Status.DISCONNECTED) {
$(document).trigger('disconnected');
} else if (status === Strophe.Status.ATTACHED){
$(document).trigger('attached');
}
});
Object.connection = conn;
});
I think the problem starts on the XMPPHP side which is not creating the session properly.
'attached' is triggered but never 'connected', is status 'connected' supposed to be sent?
What am I missing?
Ok, solved, I saw that XMPPHP lib didn't create a session at all on the openfire server, so I wrote a simple test for the XMPP class which was good and created the session, and for the XMPP_BOSH class that didn't manage create one. Then I saw the issue report here: http://code.google.com/p/xmpphp/issues/detail?id=47 comment no.9 worked, it fixed the issue by copying the processUntil() function from the XMLStream.php to BOSH.php, still can't figure out why this is working. Then I found I had an overlapping bug also with some of the passwords set for users on the openfire server. These passwords contained these ! # % ^ characters, for some reason the XMPP_BOSH is sending the password corrupted or changed so I got Auth Failed exception. Changing the password fixed the issue and I can now attach to the session XMPPHP created with the Strophe.js library.
I'm using resque/redis on heroku to send emails as a background job. It works fine for the first email I send, but after that I get the error: (ActiveRecord::StatementInvalid: PG::Error: SSL error: decryption failed or bad record mac :) ...
I've seen the other questions/answers that say to add to an initializer:
Resque.after_fork = Proc.new { ActiveRecord::Base.establish_connection }
OR
Resque.before_fork = Proc.new { ActiveRecord::Base.establish_connection }
However, I have done this (and tried any other thing I can think of) and I'm still getting the same error. I have run the code with only before_fork and only after_fork to no avail.
I am also using APN_sender to send apple push notifications. The workers for these have had no problems (but I'm calling default Heroku workers to do these rather than Resque workers).
Here are my relevant files, please help! This is my first SO question as well.. apologies if it's not done perfectly.
#config/resque.rb
after_fork do |server, worker, resque|
logger.info("Got to after_fork in resque.rb config file")
defined?(ActiveRecord::Base) and
ActiveRecord::Base.establish_connection
end
--------------------
#config/initializers/resque.rb
require 'resque'
require 'resque/server'
heroku_environments = ["staging", "production"]
rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'
rails_env = ENV['RAILS_ENV'] || 'development'
#confusing wording.. determines whether or not we are in Production, tested and working
unless heroku_environments.include?(rails_env)
resque_config = YAML.load_file(rails_root + '/config/resque.yml')
Resque.redis = resque_config[rails_env]
else
uri = URI.parse(ENV["REDISTOGO_URL"] || "redis://localhost:6379/")
Resque.redis = Redis.new(:host => uri.host, :port => uri.port, :password => uri.password)
end
----------------------
#resque.rake
require 'resque/tasks'
task "resque:setup" => :environment do
ENV['QUEUE'] = '*'
Resque.after_fork = Proc.new { ActiveRecord::Base.establish_connection }
Resque.before_fork = Proc.new { ActiveRecord::Base.establish_connection }
end
task "apn:setup" => :environment do
ENV['QUEUE'] = '*'
end
desc "Alias for resque:work (To run workers on Heroku)"
task "jobs:work" => "resque:work"
desc "Alias for apn:work (To run workers on Heroku)"
task "jobs:work" => "apn:work"
-----------------------
#Sending the email
#event.guests.each do |g|
Resque.enqueue(MailerCallback, "Notifier", :time_change, #event.id, g.id)
end
I am running one Heroku worker and one Resque worker from the Heroku ps:scale command. As I said, the first email sends error-free, and then any emails after get the above error.
Thanks in advance!
Mike
OK, so couldn't figure this out...
Switched over to delayed job and it's working much better. A little frustrating that I can't send push notifications with apn_sender anymore, but apn_on_rails is working fine.
Weird problem, still curious...
I try to execute simple PostgreSQL query with erlang and epgsql.
I do:
{ok, C} = pgsql:connect("localhost", "shk", "qwerty", [{database, "mydb"}]).
>> {ok,<0.34.0>}
Then:
{ok, Cols, Rows} = pgsql:squery(C, "select * from users").
But i got error:
=ERROR REPORT==== 27-Apr-2012::17:58:23 ===
** State machine <0.34.0> terminating
** Last message in was {'EXIT',<0.32.0>,
{{badmatch,
{error,
{error,'ð\236ð¨ð\230ð\221ð\232ð\220',<<"42P01">>,
<<208,190,209,130,208,189,208,190,209,136,208,181,
208,189,208,184,208,181,32,34,109,121,100,98,34,
32,208,189,208,181,32,209,129,209,131,209,137,
208,181,209,129,209,130,208,178,209,131,208,181,
209,130>>,
[{position,<<"15">>}]}}},
[{erl_eval,expr,3}]}}
** When State == ready
** Data == {state,undefined,<0.35.0>,5000,
[{<<"application_name">>,<<>>},
{<<"client_encoding">>,<<"UTF8">>},
{<<"DateStyle">>,<<"ISO, DMY">>},
{<<"integer_datetimes">>,<<"on">>},
{<<"IntervalStyle">>,<<"postgres">>},
{<<"is_superuser">>,<<"off">>},
{<<"server_encoding">>,<<"UTF8">>},
{<<"server_version">>,<<"9.0.7">>},
{<<"session_authorization">>,<<"shk">>},
{<<"standard_conforming_strings">>,<<"off">>},
{<<"TimeZone">>,<<"posixrules">>}],
undefined,undefined,undefined,
{30932,488494147},
{statement,undefined,undefined,undefined},
73}
** Reason for termination =
** {{badmatch,{error,{error,'ð\236ð¨ð\230ð\221ð\232ð\220',<<"42P01">>,
<<208,190,209,130,208,189,208,190,209,136,208,181,
208,189,208,184,208,181,32,34,109,121,100,98,34,
32,208,189,208,181,32,209,129,209,131,209,137,
208,181,209,129,209,130,208,178,209,131,208,181,
209,130>>,
[{position,<<"15">>}]}}},
[{erl_eval,expr,3}]}
What's wrong i do? How can i fix it?
Thank you.
The error string seems to be in russian if I recognize the characters.
To view the response you can use the following command:
io:format("~ts",[<<208,190,209,130,208,189,208,190,209,136,208,181,
208,189,208,184,208,181,32,34,109,121,100,98,34,
32,208,189,208,181,32,209,129,209,131,209,137,
208,181,209,129,209,130,208,178,209,131,208,181,
209,130>>]).
отношение "mydb" не существует
A quick google translate makes me think the database mydb does not exist or you do not have permissions to use it.
try simply doing
Response = pgsql:squery(C, "select * from mydb"),
io:format("~p~n",[Response]).
And see what he is giving back from the server, maybe you have typo or table don't exists etc.
also check this out http://www.erlangatwork.com/2009/01/erlang-and-postgresql.html
From epgsql docs:
Errors
Errors originating from the PostgreSQL backend are returned as {error, #error{}},
see pgsql.hrl for the record definition. epgsql functions may also return
{error, What} where What is one of the following:
{unsupported_auth_method, Method} - required auth method is unsupported
timeout - request timed out
closed - connection was closed
sync_required - error occured and pgsql:sync must be called
Try to include pgsql.hrl, capture the error and print the error message, that should point you to the right direction.