Accessing config variables from other config files - sails.js

I am having problems using in a config file a config var set in another config file. E.g.
// file - config/local.js
module.exports = {
mongo_db : {
username : 'TheUsername',
password : 'ThePassword',
database : 'TheDatabase'
}
}
// file - config/connections.js
module.exports.connections = {
mongo_db: {
adapter: 'sails-mongo',
host: 'localhost',
port: 27017,
user: sails.config.mongo_db.username,
password: sails.config.mongo_db.password,
database: sails.config.mongo_db.database
},
}
When I 'sails lift', I get the following error:
user: sails.config.mongo_db.username,
^
ReferenceError: sails is not defined
I can access the config variables in other places - e.g, this works:
// file - config/bootstrap.js
module.exports.bootstrap = function(cb) {
console.log('Dumping config: ', sails.config);
cb();
}
This dumps all the config settings to the console - I can even see the config settings for mongo_db in there!
I so confuse.

You can't access sails inside of config files, since Sails config is still being loaded when those files are processed! In bootstrap.js, you can access the config inside the bootstrap function, since that function gets called after Sails is loaded, but not above the function.
In any case, config/local.js gets merged on top of all the other config files, so you can get what you want this way:
// file - config/local.js
module.exports = {
connections: {
mongo_db : {
username : 'TheUsername',
password : 'ThePassword',
database : 'TheDatabase'
}
}
}
// file - config/connections.js
module.exports.connections = {
mongo_db: {
adapter: 'sails-mongo',
host: 'localhost',
port: 27017
},
}
If you really need to access one config file from another you can always use require, but it's not recommended. Since Sails merges config files together based on several factors (including the current environment), it's possible you'd be reading some invalid options. Best to do things the intended way: use config/env/* files for environment-specific settings (e.g. config/env/production.js), config/local.js for settings specific to a single system (like your computer) and the rest of the files for shared settings.

Related

Gradle Liquibase SSH tunnel creating to Aurora Postgresql

need your help,
I need to connect to AWS Aurora Postgresql using liquibase, it's already configured for local machine, and works fine, but have issues with ssh configuration to it.
I'm using id 'org.hidetake.ssh' version '2.10.1', and id 'org.liquibase.gradle' version '2.0.4'
I'm able to run command directly on host machine, like getting date execute ('date') below, but have no idea why liquibase fails with
Unexpected error running Liquibase: liquibase.exception.DatabaseException: liquibase.exception.DatabaseException: Connection could not be created to jdbc:postgresql://xxxx.rds.amazonaws.com:5432/postgres with driver org.postgresql.Driver. The connection attempt failed.
here is my build.gradle setting:
ssh.settings {
knownHosts = allowAnyHosts
logging = 'stdout'
identity = file("${System.properties['user.home']}/myfolder/.ssh/id_rsa")}
remotes {
dev {
host = 'xxx.xxx.xxx.xxx'
port = 22
user = 'ec2-user'
identity = file("${System.properties['user.home']}/myfolder/.ssh/id_rsa")
}
}
ssh.run {
session(remotes.dev) {
forwardLocalPort port: 5432, hostPort: 5432
execute ('date')
liquibase {
activities {
main {
//changeLogFile changeLog
url 'jdbc:postgresql://xxxx.rds.amazonaws.com:5432/postgres'
username feedSqlUserDev
password feedSqlUserPasswordDev
logLevel 'debug'
}
}
}
}
}
Could you please help me with it, what am I doing wrong?
Also had to connect to SSH bastion host before running liquibase updates. My solution is based on https://github.com/int128/gradle-ssh-plugin/issues/246 answer by the plugin author.
Here is my setup:
ssh.settings {
knownHosts = allowAnyHosts
logging = 'stdout'
identity = file("${System.properties['user.home']}/.ssh/id_rsa")
}
remotes {
bastion {
host = '<hostname>'
user = '<username>'
}
}
liquibase {
activities {
main {
changeLogFile '...'
url 'jdbc:postgresql://localhost:5438/***'
username '***'
password '***'
driver 'org.postgresql.Driver'
}
}
}
task('sshTunnelStart') {
doFirst {
project.ext.ready = new CountDownLatch(1)
project.ext.done = new CountDownLatch(1)
Thread.start {
ssh.run {
session(remotes.bastion) {
forwardLocalPort port: 5438,
host: '<real db hostname>',
hostPort: 5432
project.ready.countDown()
project.done.await(5, TimeUnit.MINUTES) // liquibase update timeout
}
}
}
ready.await(10, TimeUnit.SECONDS) // start tunnel timeout
}
}
task('sshTunnelStop') {
doLast {
// teardown tunnel
project.done.countDown()
}
}
update.dependsOn(sshTunnelStart)
update.finalizedBy(sshTunnelStop)
Note that in liquibase config I use localhost:5438 as it is a local port forwarded to the remote. Later the same port is used in forwardLocalPort as a 'port' parameter. 'host' parameter is set to the remote database host, and 'hostPort' is accordingly the database port. The last part of the config adds dependencies between tasks to liquibase update and start/stop the tunnel.

typeorm:migration create on New Project Does Not Recognize Entities - "No changes in database schema were found - cannot generate a migration."

I'm having trouble creating the initial migration for a nestjs-typeorm-mongo project.
I have cloned this sample project from nestjs that uses typeorm with mongodb. The project does work in that when I run it locally after putting a "Photo" document into my local mongo with db named "test" and collection "photos" then I can call to localhost:3000/photo and receive the photo documents.
Now I'm trying to create migrations with the typeorm cli using this command:
./node_modules/.bin/ts-node ./node_modules/typeorm/cli.js migration:generate -n initial
...but it's not working. I am not able to create an initial commit- even after setting "synchronize: false" in my app.module.ts file I always get the error:
No changes in database schema were found - cannot generate a migration. To create a new empty migration use "typeorm migration:create" command
when trying to generate a migration... 🤔
Other than changing synchronize to false, the only other change I made was adding an ormconfig.json file in the project root by running typeorm init --database mongodb:
{
"type": "mongodb",
"database": "test",
"synchronize": true,
"logging": false,
"entities": [
"src/**/*.entity.ts"
],
"migrations": [
"src/migration/**/*.ts"
],
"subscribers": [
"src/subscriber/**/*.ts"
],
"cli": {
"entitiesDir": "src",
"migrationsDir": "src/migration",
"subscribersDir": "src/subscriber"
}
}
Once you are using MongoDB, you don't have tables and have no need to create your collections ahead of time. Essentially, MongoDB schemas are created on the fly!
Under the hood, if the driver is MongoDB, the command typeorm migration:create is bypassed so it is useless in this case. You can check yourself the PR #3304 and Issue #2867.
However, there is an alternative called migrate-mongo which provides a way to archive incremental, reversible, and version-controlled way to apply schema and data changes. It’s well documented and actively developed.
migrate-mongo example
Run npm install -g migrate-mongo to install it.
Run migrate-mongo init to init the migrations tool. This will create a migrate-mongo-config.js configuration file and a migrations folder at the root of our project:
|_ src/
|_ migrations/
|- 20200606204524-migration-1.js
|- 20200608124524-migration-2.js
|- 20200808114324-migration-3.js
|- migrate-mongo.js
|- package.json
|- package-lock.json
Your migrate-mongo-config.js configuration file may look like:
// In this file you can configure migrate-mongo
const env = require('./server/config')
const config = {
mongodb: {
// TODO Change (or review) the url to your MongoDB:
url: env.mongo.url || "mongodb://localhost:27017",
// TODO Change this to your database name:
databaseName: env.mongo.dbname || "YOURDATABASENAME",
options: {
useNewUrlParser: true, // removes a deprecation warning when connecting
useUnifiedTopology: true, // removes a deprecating warning when connecting
// connectTimeoutMS: 3600000, // increase connection timeout up to 1 hour
// socketTimeoutMS: 3600000, // increase socket timeout up to 1 hour
}
},
// The migrations dir can be a relative or absolute path. Only edit this when really necessary.
migrationsDir: "migrations",
// The MongoDB collection where the applied changes are stored. Only edit this when really necessary.
changelogCollectionName: "changelog"
};
module.exports = config;
Run migrate-mongo create name-of-my-script to add a new migration script. A new file will be created with a corresponding timestamp.
/*
|_ migrations/
|- 20210108114324-name-of-my-script.js
*/
module.exports = {
function up(db) {
return db.collection('products').updateMany({}, { $set: { quantity: 10 } })
}
function down(db) {
return db.collection('products').updateMany({}, { $unset: { quantity: null } })
}
}
The database changelog: In order to know the current database version and which migration should apply next, there is a special collection that stores the database changelog with information such as migrations applied, and when where they applied.
To run your migrations, simply run the command: migrate-mongo up
You can find a full example in this article MongoDB Schema Migrations in Node.js

how to avoid having mongodb as default datasource when working with multiple datasources in grails 3

I have my application.groovy set up as :
environments {
development {
mongo {
host = 'localhost'
port = 27107
username = dbusername
password = dbpassword
databaseName = dbname
}
dataSources {
dataSource {
pooled = true
jmxExport = true
driverClassName = 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
dbCreate = ''
username = dbusername
password = dbpassword
url = 'jdbc:sqlserver://${dbserver}:${dbport};databaseName=${dbname}'
}
}
}
}
But now it seems like all of my domain's data source points to the mongodb so I can no longer query my domains that are linked to mssql db. How can I avoid this?
Secondary question though not that important: The mongodb plugin documentation says to put the connection config within the environment->development - I wonder why we can't put it within dataSources so its much neater(in domain I can just point to the dataSource). I tried to move the config within dataSources and it didn't work!
In the debugger if I run MyDomain.list() and I get
result = {MongoQuery$MongoResultList#12334} size = 0
Any help will be much appreciated, thanks in advance
Dee
I was trying to use the "mongodb" plugin, I am not sur eif its supported in grails 3. I have things working with gmongo. I added these two dependencies in by build.gradle :
compile "org.mongodb:mongo-java-driver:3.0.2"
compile "com.gmongo:gmongo:1.5"
and removed the mongo config.
environments {
development {
mongo {
host = 'localhost'
port = 27107
username = dbusername
password = dbpassword
databaseName = dbname
}
....
}
}
gmongo seems to take default database credentials. This is how I created db instance to work off of it:
def mongo = new GMongo()
def db = mongo.getDB("dnName")
Hope this helps someone facing similar problem.

Fiware Orion - pepProxy

i'm part of a team that is developing an application that uses the Fiware GE's has part of the Smart-AgriFood accelerator.
We are using the Orion Context Broker for gathering the data provided by the sensor network, and we intend to use the Pep-Proxy to authenticate the sensor node for access the Orion instance. We have tried the following pepProxy's:
https://github.com/telefonicaid/fiware-orion-pep
https://github.com/ging/fi-ware-pep-proxy
We only have success implementing the second (fi-ware-pep-proxy) implementation of the proxy. With the fiware-orion-pep we haven't been able to connect to the Keystone Global instance (account.lab.fi-ware.org), we have tried the account.lab... and the cloud.lab..., my question are:
1) is the keystone (IDM) instance for authentication the account.lab or the cloud.lab?? and what port's to use or address's?
2) is the fiware-orion-pep prepared for authenticate at the account.lab.fi-ware.org?? here is way i ask this:
This one works with the curl command at >> cloud.lab.fiware.org:4730/v2.0/tokens
{
"auth": {
"passwordCredentials": {
"username": "<my_user>",
"password": "<my_password>"
}
}
}'
This one does't work with the curl comand at >> account.lab.fi-ware.org:5000/v3/auth/tokens
{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"domain": {
"name": "<my_domain>"
},
"name": "<my_user>",
"password": "<my_password>"
}
}
}
} }'
3) what is the implementation that i should be using for authenticate the devices or other calls to the Orion instance???
Here are the configuration that i used:
fiware-orion-pep
config.authentication = {
checkHeaders: true,
module: 'keystone',
user: '<my_user>',
password: '<my_password>',
domainName: '<my_domain>',
retries: 3,
cacheTTLs: {
users: 1000,
projectIds: 1000,
roles: 60
},
options: {
protocol: 'http',
host: 'account.lab.fiware.org',
port: 5000,
path: '/v3/role_assignments',
authPath: '/v3/auth/tokens'
}
};
fi-ware-pep-proxy (this one works), i have set the listing port to 1026 at the source code
var config = {};
config.account_host = 'https://account.lab.fiware.org';
config.keystone_host = 'cloud.lab.fiware.org';
config.keystone_port = 4731;
config.app_host = 'localhost';
config.app_port = '10026';
config.username = 'pepProxy';
config.password = 'pepProxy';
// in seconds
config.chache_time = 300;
config.check_permissions = false;
config.magic_key = undefined;
module.exports = config;
Thanks in advance for the time ... :)
The are currently some differences in how both PEP Proxies authenticate and validate against the global instances, so they do not behave in exactly the same way.
The one in telefonicaid/fiware-orion-pep was developed to fulfill the PEP Proxy requirements (authentication and validation against a Keystone and Access Control) in individual projects with their own Keystone and Keypass (a flavour of Access Control) installations, and so it evolved faster than the one in ging/fi-ware-pep-proxy and in a slightly different direction. As an example, the former supports multitenancy using the fiware-service and fiware-servicepath headers, while the latter is transparent to those mechanisms. This development direction meant also that the functionality slightly differs from time to time from the one in the global instance.
That being said, the concrete answer:
- Both PEP Proxies should be able to contact the global instance. If one doesn't, please, fill a bug in the issues of the Github repository and we will fix it as soon as possible.
- The ging/fi-ware-pep-proxy was specifically designed for accessing the global instance, so you should be able to use it as expected.
Please, if you try to proceed with the telefonicaid/fiware-orion-pep take note also that:
- the configuration flag authentication.checkHeaders should be false, as the global instance does not currently support multitenancy.
- current stable release (0.5.0) is about to change to next version (probably today) so maybe some of the problems will solve with the update.
Hope this clarify some of your doubts.
[EDIT]
1) I have already install the telefonicaid/fiware-orion-pep (v 0.6.0) from sources and from the rpm package created following the tutorial available in the github. When creating the rpm package, this is created with the following name pep-proxy-0.4.0_next-0.noarch.rpm.
2) Here is the configuration that i used:
/opt/fiware-orion-pep/config.js
var config = {};
config.resource = {
original: {
host: 'localhost',
port: 10026
},
proxy: {
port: 1026,
adminPort: 11211
} };
config.authentication = {
checkHeaders: false,
module: 'keystone',
user: '<##################>',
password: '<###################>',
domainName: 'admin_domain',
retries: 3,
cacheTTLs: {
users: 1000,
projectIds: 1000,
roles: 60
},
options: { protocol: 'http',
host: 'cloud.lab.fiware.org',
port: 4730,
path: '/v3/role_assignments',
authPath: '/v3/auth/tokens'
} };
config.ssl = {
active: false,
keyFile: '',
certFile: '' }
config.logLevel = 'DEBUG'; // List of component
config.middlewares = {
require: 'lib/plugins/orionPlugin',
functions: [
'extractCBAction'
] };
config.componentName = 'orion';
config.resourceNamePrefix = 'fiware:';
config.bypass = false;
config.bypassRoleId = '';
module.exports = config;
/etc/sysconfig/pepProxy
# General Configuration
############################################################################
# Port where the proxy will listen for requests
PROXY_PORT=1026
# User to execute the PEP Proxy with
PROXY_USER=pepproxy
# Host where the target Context Broker is located
# TARGET_HOST=localhost
# Port where the target Context Broker is listening
# TARGET_PORT=10026
# Maximum level of logs to show (FATAL, ERROR, WARNING, INFO, DEBUG)
LOG_LEVEL=DEBUG
# Indicates what component plugin should be loaded with this PEP: orion, keypass, perseo
COMPONENT_PLUGIN=orion
#
# Access Control Configuration
############################################################################
# Host where the Access Control (the component who knows the policies for the incoming requests) is located
# ACCESS_HOST=
# Port where the Access Control is listening
# ACCESS_PORT=
# Host where the authentication authority for the Access Control is located
# AUTHENTICATION_HOST=
# Port where the authentication authority is listening
# AUTHENTICATION_PORT=
# User name of the PEP Proxy in the authentication authority
PROXY_USERNAME=XXXXXXXXXXXXX
# Password of the PEP Proxy in the Authentication authority
PROXY_PASSWORD=XXXXXXXXXXXXX
In the files above i have tried the following parameters:
Keystone instance: account.lab.fiware.org or cloud.lab.fiware.org
User: pep or pepProxy or "user from fiware account"
Pass: pep or pepProxy or "user password from account"
Port: 4730, 4731, 5000
The result it's the same as before... the telefonicaid/fiware-orion-pep is unable to authenticate:
log file at /var/log/pepProxy/pepProxy
time=2015-04-13T14:49:24.718Z | lvl=ERROR | corr=71a34c8b-10b3-40a3-be85-71bd3ce34c8a | trans=71a34c8b-10b3-40a3-be85-71bd3ce34c8a | op=/v1/updateContext | msg=VALIDATION-GEN-003] Error connecting to Keystone authentication: KEYSTONE_AUTHENTICATION_ERROR: There was a connection error while authenticating to Keystone: 500
time=2015-04-13T14:49:24.721Z | lvl=DEBUG | corr=71a34c8b-10b3-40a3-be85-71bd3ce34c8a | trans=71a34c8b-10b3-40a3-be85-71bd3ce34c8a | op=/v1/updateContext | msg=response-time: 50745 statusCode: 500
result from the client console
{
"message": "There was a connection error while authenticating to Keystone: 500",
"name": "KEYSTONE_AUTHENTICATION_ERROR"
}
I'm doing something wrong here??

grunt-sftp-deploy unable to connect to server

I am a noob to grunt and would like to start using it.
Here is my gruntfile:
module.exports = function(grunt) {
// Project configuration.
grunt.initConfig({
pkg: grunt.file.readJSON('package.json'),
devDir: 'dev/dir',
prodDir: 'prod/dir',
'sftp-deploy': {
prod: {
auth: {
host: 'server.com',
port: 22,
authKey: {
"username": "username1",
"password": "password2"
}
},
src: '<%=devDir%>',
dest: '/test/env/',
concurrency: 4,
progress: true
}
}
});
// load modules
grunt.loadNpmTasks('grunt-sftp-deploy');
// Default task(s).
grunt.registerTask('default', ['sftp-deploy']);
};
I am getting this error when i run 'grunt' in powershell:
Running "sftp-deploy:prod" (sftp-deploy) task
Logging in with username username1
Concurrency : 4
Fatal error: Connection :: error
What am I doing wrong?
thanks!
Ok, a few things to try... (sorry - a month late!)
run:
grunt sftp-deploy --verbose
This will give you a little more info regarding your error.
I solved my error after realising I couldn't create folders on my server, only upload files. So it might be worth testing that you can accomplish manually what your asking grunt to do.
Lastly, try moving your username / password into a .ftppass file
link [here] (https://www.npmjs.com/package/grunt-sftp-deploy)