How to get data from Amazon RDS using a Lambda function in Node.js? - postgresql

I have a database in Aurora PostgreSQL and I’m using an API Gateway to invoke the Lambda functions made in Node.js for the APIs. Here is my code for a simple GET request with no URL parameters:
var pg = require("pg");
exports.handler = function(event, context) {
var conn = “//Connection string";
var client = new pg.Client(conn);
client.connect();
//var id = event.id;
console.log('Connected to PostgreSQL database');
var query = client.query("SELECT * from USERS;");
query.on("row", function (row, result) {
result.addRow(row);
});
query.on("end", function (result) {
var jsonString = JSON.stringify(result.rows);
var jsonObj = JSON.parse(jsonString);
console.log(jsonString);
client.end();
context.succeed(jsonObj);
});
};
I am able to get all the records from this table successfully. What changes must I make to the code and to the API Gateway itself to make a GET request with a parameter for a WHERE clause to select a specific user from their username, and a POST request to insert new users into the table?

For proxy lambda integration, all the GET and POST parameters submitted into the API Gateway will be available in in the event object. Thus, you have to get the values submitted for WHERE and INSERT from the event.
The event structure is shown in:
Input format of a Lambda function for proxy integration
You will also need to ensure correct return data from the lambda. Return data also requires proper format:
Output format of a Lambda function for proxy integration

You can code your Lambda function to connect to a RDS database. Now when you do this, there are some things to consider.
You need to configure your Lambda function to use the same VPC as the security group that RDS is using. This is discussed here: https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html.
Next from your Lambda function, you still have to set a connection to the database. How this is done is dependent on the programming language that you are using. For example, if you are using the Lambda Java runtime API, you can use a Java connection. See below.
Once you do this - you can connect to RDS and perform SQL statements. This makes it possible to write Lambda functions that can query data from RDS and then use that Lambda function within a larger cloud based workflow using AWS Step Functions.
Code:
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
public class ConnectionHelper {
private String url;
private static ConnectionHelper instance;
private ConnectionHelper() {
url = "jdbc:mysql://formit.xxxxxxshym6k.us-west-2.rds.amazonaws.com:3306/mydb?useSSL=false";
}
public static Connection getConnection() throws SQLException {
if (instance == null) {
instance = new ConnectionHelper();
}
try {
Class.forName("com.mysql.jdbc.Driver").newInstance();
return DriverManager.getConnection(instance.url, "root","root1234");
} catch (SQLException | ClassNotFoundException | InstantiationException | IllegalAccessException e) {
e.getStackTrace();
}
return null;
}
public static void close(Connection connection) {
try {
if (connection != null) {
connection.close();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}

Accessing table data from RDS using lambda function with encrypted key (KMS) and Environment variable
Step 1 :- First Enable key in KMS(Key Management Service (KMS))
Review your key Policy and Done! with KMS creation
{
"Id": "key-consolepolicy-3",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::163806924483:root"
},
"Action": "kms:*",
"Resource": "*"
},
{
"Sid": "Allow access for Key Administrators",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::163806924483:user/User1#gmail.com"
},
"Action": [
"kms:Create*",
"kms:Describe*",
"kms:Enable*",
"kms:List*",
"kms:Put*",
"kms:Update*",
"kms:Revoke*",
"kms:Disable*",
"kms:Get*",
"kms:Delete*",
"kms:TagResource",
"kms:UntagResource",
"kms:ScheduleKeyDeletion",
"kms:CancelKeyDeletion"
],
"Resource": "*"
},
{
"Sid": "Allow use of the key",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::163806924483:user/User1#gmail.com",
"arn:aws:iam::163806924483:user/User2#gmail.com",
"arn:aws:iam::163806924483:user/User3#gmail.com"
]
},
"Action": [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
],
"Resource": "*"
},
{
"Sid": "Allow attachment of persistent resources",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::163806924483:user/User1.dilip#gmail.com",
"arn:aws:iam::163806924483:user/User2#gmail.com",
"arn:aws:iam::163806924483:user/User3#gmail.com"
]
},
"Action": [
"kms:CreateGrant",
"kms:ListGrants",
"kms:RevokeGrant"
],
"Resource": "*",
"Condition": {
"Bool": {
"kms:GrantIsForAWSResource": "true"
}
}
}
]
}
Step:- 2 Create a policy in IAM for KMS assign to ur each lambda function
"StringEquals": {
"kms:EncryptionContext:LambdaFunctionName": [
"LambdaFunction-1",
"LambdaFunction-2",
"LambdaFunction-3"
]
}
Step 3:- Assign a Policy created in Step-2 to ur default lambda Role(1st Lambda need to be created to get default lambda role)
Step 4:- Create lambda Function
Node.js Code for lambda Function
const mysql = require('mysql');
const aws = require("aws-sdk");
const functionName = process.env.AWS_LAMBDA_FUNCTION_NAME;
let res;
let response={};
exports.handler = async(event) => {
reset_globals();
// load env variables
const rds_user = await kms_decrypt(process.env.RDS_USERNAME);
const rds_pwd = await kms_decrypt(process.env.RDS_PASSWORD)
// setup rds connection
var db_connection = await mysql.createConnection({
host: process.env.RDS_HOSTNAME,
user: rds_user,
password: rds_pwd,
port: process.env.RDS_PORT,
database: process.env.RDS_DATABASE
});
var sqlQuery = `SELECT doc_id from documents`;
await getValues(db_connection,sqlQuery);
}
async function getValues(db_connection,sql) {
await new Promise((resolve, reject) => {
db_connection.query(sql, function (err, result) {
if (err) {
response = {statusCode: 500, body:{message:"Database Connection Failed",
error: err}};
console.log(response);
resolve();
}
else {
console.log("Number of records retrieved: " + JSON.stringify(result));
res = result;
resolve();
}
});
});
}
async function kms_decrypt(encrypted) {
const kms = new aws.KMS();
const req = { CiphertextBlob: Buffer.from(encrypted, 'base64'), EncryptionContext: {
LambdaFunctionName: functionName } };
const decrypted = await kms.decrypt(req).promise();
let cred = decrypted.Plaintext.toString('ascii');
return cred;
}
function reset_globals() {
res = (function () { return; })();
response = {};
}
Now u should see KMS in Lambda.
Step 5:- Set Environment Variable and encrypt it.
Lambda ->Functions -> Configuration -> Environment Variable -> Edit
RDS_DATABASE docrds
RDS_HOSTNAME docrds-library.c1k3kcldebmp.us-east-1.rds.amazonaws.com
RDS_PASSWORD root123
RDS_PORT 3306
RDS_USERNAME admin
In Lambda Function to decrypt the encrypted environment variabled use below code
function kms_decrypt(encrypted) {
const kms = new aws.KMS();
const req = { CiphertextBlob: Buffer.from(encrypted, 'base64'), EncryptionContext: {
LambdaFunctionName: functionName } };
const decrypted = await kms.decrypt(req).promise();
let cred = decrypted.Plaintext.toString('ascii');
return cred;
}
My RDS document table looks like:-
I am accessing column doc_id using sqlQuery in lambda function
var sqlQuery = `SELECT doc_id from documents`;
After testing the lambda function, I get below output.
If u gets SQL import Error, then can must add a layer.
errorType": "Runtime.ImportModuleError",
"errorMessage": "Error: Cannot find module 'mysql'\nRequire stack:\n-
/var/task/index.js\n- /var/runtime/UserFunction.js\n- /var/runtime/index.js",
"trace": [
"Runtime.ImportModuleError: Error: Cannot find module 'mysql'",
You can configure your Lambda function to use additional code and
content in the form of layers. A layer is a ZIP archive that contains
libraries, a custom runtime, or other dependencies. With layers, you
can use libraries in your function without needing to include them in
your deployment package.
To include libraries in a layer, place them in the directory structure
that corresponds to your programming language.
Node.js – nodejs/node_modules
Python – python
Ruby – ruby/gems/2.5.0
Java – java/lib
First create a zip archieve that contain mysql archieve.
First create a folder with name same as lambda function in any react-project . Let say for example:- mkdir lambda-function
Then in terminal $project-path > lambda-function-folder > # npm init
Then $project-path > lambda-function-folder > # npm install mysql
You should see node_modules folder created.
Create another folder name nodejs inside lambda-function-folder [mkdir nodejs]
Move the node_modules inside nodejs folder
Zip nodejs folder [nodejs.zip] and upload on layer as shown below.
Then, Goto Lambda--> Layer-->Create layer.

Related

Strapi email designer plugin reference template to record

I'm currently developing a multi-tenant API with Strapi and for one of the parts I use the Strapi email designer plugin because I want to send some emails but I want them to be custom designed for each tenant, the problem is that the plugin's table is not accessible in the content manager of Strapi so I can only hard code the template to a specific endpoint, is there a way to have the plugin table in the content manager or for it to be referenced to a content manager table something like:
(table)tenant->(field)templateId => (ref-table)plugin-email-designer->(ref-field)templateId
you know so I can switch and set dynamically from the Strapi panel and not with hard-coded endpoints
I've checked your issue briefly, and there is option you are going to like, but it involves using patch-package...
So, let's assume that you have strapi project created and you have added strapi-plugin-email-designer and you are using yarn v1.xx.xx:
yarn add patch-package postinstall-postinstall
Go to node_modules/strapi-plugin-email-designer/server/content-types/email-template/schema.json
change following fileds:
{
...
"pluginOptions": {
"content-manager": {
"visible": true
},
"content-type-builder": {
"visible": true
}
},
...
}
now run
yarn patch-package strapi-plugin-email-designer
now open your projects package.json and add to scripts:
{
"scripts": {
...
"postinstall": "patch-package"
}
}
run
yarn build
yarn develop
head to admin ui, you should see new Collection:
so now you can do that:
Sending Email
Let's assume you added a relation has one called email_template to your model.
Next we need to add custom route, so in /src/api/tenant/routes/ create file called routes.js
/src/api/tenant/routes/routes.js
module.exports = {
routes: [
{
method: 'POST',
path: `/tenants/:id/send`,
handler: `tenant.send`
}
]
}
now, we need to add handler to controller:
/src/api/tenant/controllers/tenant.js
"use strict";
/**
* tenant controller
*/
const { createCoreController } = require("#strapi/strapi").factories;
module.exports = createCoreController("api::tenant.tenant", ({ strapi }) => ({
async send(ctx) {
const { id } = ctx.params;
const { data } = ctx.request.body;
// notice, if you need extra validation you add it here
// if (!data) return ctx.badRequest("no data was provided");
const { to, subject } = data;
const { email_template, ...tenant } = await strapi.db
.query("api::tenant.tenant")
// if you have extra relations it's better to populate them directly here
.findOne({ where: { id }, populate: ["email_template"] });
console.log(email_template);
try {
await strapi
.plugin("email-designer")
.service("email")
.sendTemplatedEmail(
{
to,
//from, < should be set in /config/plugins.js email.settings.defaultFrom
//replayTo < should be set in /config/plugins.js email.settings.defaultReplyTo
},
{
templateReferenceId: email_template.templateReferenceId,
subject,
},
{
...tenant,
// this equals to apply all the data you have in tenant
// this may need to be aligned between your tenant and template
}
);
return { success: `Message sent to ${to}` };
} catch (e) {
strapi.log.debug("📺: ", e);
return ctx.badRequest(null, e);
}
},
}));
don't forget to enable access to /api/tenants/:id/send in admin panel, Settings - Roles
POST http://localhost:1337/api/tenants/1/send
{
"data": {
"to" : "email#example.com",
"subject": "Hello World"
}
}
response:
{
"success": "Message sent to email#example.com"
}
pls note, there is no template validation, e.g. if you give it a wrong template it would not be happy

Hardhat: how to deploy using a custom signer

Normally, to deploy contracts to a network, we specify the private keys in accounts section of the network config, Like below, and these accounts get used in signing the transactions.
module.exports = {
defaultNetwork: "rinkeby",
networks: {
hardhat: {
},
rinkeby: {
url: "https://eth-rinkeby.alchemyapi.io/v2/1234",
accounts: [privateKey1, privateKey2, ...]
}
},
But we need to use a custom signer that will sign the transactions instead. All transactions that are part of the deployment process needs to be signed via this custom signer.
How do I do this using Hardhat/ethers.js ?
You have to modify the deployment script to attach your custom signer to the contract factory (https://docs.ethers.io/v4/api-contract.html):
async function main() {
const HelloBar = await ethers.getContractFactory("HelloBar");
const signer = createYourCustomSigner();
// attach the signer to the factory
HelloBar = HelloBar.connect(signer);
const hellobar = await HelloBar.deploy();
await hellobar.deployed();
console.log("Address:", hellobar.address);
}
main()
.then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});
Then run the script with:
npx hardhat run --network localhost scripts/deploy.js

After successfully deploying Next.js app on AWS Amplify, https://www.example.com/api/any-route showing below error in console

The app is deployed successfully but the API routes (/pages/api) are not working as expected, showing below error in the console.
Build is successful and deployed on aws-amplify, I have added environment variables correctly, don't know why this is happening?
Does aws-amplify doesn't support serverless functions writer inside /api folder??
{
"error": {
"message": "connect ECONNREFUSED 127.0.0.1:80",
"name": "Error",
"stack": "Error: connect ECONNREFUSED 127.0.0.1:80\n at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1148:16)",
"config": {
"url": "undefined/campaigns",
"method": "get",
"headers": {
"Accept": "application/json, text/plain, */*",
"User-Agent": "axios/0.21.4"
},
"auth": {},
"transformRequest": [null],
"transformResponse": [null],
"timeout": 0,
"xsrfCookieName": "XSRF-TOKEN",
"xsrfHeaderName": "X-XSRF-TOKEN",
"maxContentLength": -1,
"maxBodyLength": -1,
"transitional": {
"silentJSONParsing": true,
"forcedJSONParsing": true,
"clarifyTimeoutError": false
}
},
"code": "ECONNREFUSED"
}
}
Here is the code
import axios from 'axios';
import Cors from 'cors';
import rateLimit from '../../../utils/rate-limit';
import initMiddleware from '../../../lib/init-middleware';
// Initialize the cors middleware
const cors = initMiddleware(
Cors({
methods: ['POST'],
origin: ['https://www.example.com', /\.example\.com$/],
})
);
const limiter = rateLimit({
interval: 60 * 1000, // 60 seconds
uniqueTokenPerInterval: 500, // Max 500 users per second
});
const handler = async (req, res) => {
await cors(req, res);
if (req.method === 'GET') {
try {
await limiter.check(res, 50, 'CACHE_TOKEN');
const { data } = await axios.get(`${process.env.BASE_URL}/campaigns`, {
auth: {
username: process.env.MAIL_SERVER_USERNAME,
password: process.env.MAIL_SERVER_PASSWORD,
},
});
return res.status(200).json(data);
} catch (error) {
return res.status(429).json({ error });
}
} else {
try {
await limiter.check(res, 10, 'CACHE_TOKEN'); // 10 requests per minute
return res.status(200).json('not allowed');
} catch (err) {
return res.status(429).json({ error: 'Rate limit exceeded' });
}
}
};
export default handler;
I figured out that environment variables are not getting reflected, so did some googling; found this solution and it worked for me.
Add your desired environment variable in the Amplify Console-like normal (steps)
Update (or create) your next.config.js file with the environment variable you added in the Amplify Console. E.g if you created an environment variable named MY_ENV_VAR in the console in step 1) above, then you would add the following:
module.exports = {
env: {
MY_ENV_VAR: process.env.MY_ENV_VAR
}
};
Now after your next build you will be able to reference your environment variable (process.env.MY_ENV_VAR) in your SSR lambdas!
Here is the link: Github
Ran into the same problem Amplify only supports NextJS 11 at the moment. If you go with the default settings it will use the latest NextJS 12 and /api routes wont work, they return a 503 with a cloudformation permission error.
Specify next 11.1.3 in your package.json
https://aws.amazon.com/about-aws/whats-new/2021/08/aws-amplify-hosting-support-next-js-version-11/
I also faced that problem. Amplify does not support v12 of next. Simply downgrade your next.js version in package.json to v1.1.3 and your routes will work as normal.
Best regrets.
Artem Meshkov

Firebase rules test does not pass even when using lockdown

I'm trying to test my firebase rules, but they seem to not pass even when I use lock down mode. I followed the guide at https://firebase.google.com/docs/firestore/security/test-rules-emulator
const firebase = require('#firebase/rules-unit-testing');
const fs = require('fs');
const projectId = 'test-judge';
function getAuthedFirestore(auth) {
return firebase.initializeAdminApp({
projectId: projectId,
auth: auth
}).firestore();
}
beforeEach(async () => {
await firebase.clearFirestoreData({ projectId });
});
before(async () => {
const rules = fs.readFileSync('firestore.rules', 'utf8');
await firebase.loadFirestoreRules({
projectId: projectId,
rules: rules
});
});
after(async () => {
await Promise.all(firebase.apps().map(app => app.delete()));
});
describe('locked down', () => {
it("require users to log in before creating a profile", async () => {
const db = getAuthedFirestore(null);
const profile = db.collection("users").doc("alice");
await firebase.assertFails(profile.set({ birthday: "January 1" }));
});
});
here is my firebase.json
{
"firestore": {
"rules": "firestore.rules",
"indexes": "firestore.indexes.json"
}
}
and my package.json
{
"devDependencies": {
"firebase-admin": "^9.11.0",
"#firebase/app": "^0.6.29",
"#firebase/rules-unit-testing": "^1.3.12",
"mocha": "^9.0.3",
"fs-extra": "^10.0.0"
},
"scripts": {
"test": "mocha"
}
}
and here is firestore.rules
rules_version = '2';
service cloud.firestore {
match /databases/{database}/documents {
match /{document=**} {
allow read, write: if false;
}
}
}
It doesn't seem to me like I'm doing anything wrong, but if I run npm test the test fails. I expect it to pass since asserFails is used and in the rules I return false
I should not be able to set the value the test should pass
here is my output
Warning: FIRESTORE_EMULATOR_HOST not set, using default value localhost:8080
locked down
1) require users to log in before creating a profile
0 passing (324ms)
1 failing
1) locked down
require users to log in before creating a profile:
Error: Expected request to fail, but it succeeded.
at C:\Users\Moneer\Desktop\judge_rules\node_modules\#firebase\rules-unit-testing\dist\index.cjs.js:581:31
at async Context.<anonymous> (test\test.js:33:9)
npm ERR! Test failed. See above for more details.
This is expected behavior, Security Rules are what protect your backend services from malicious Client requests. The "AdminApp" is in relation to the Admin-SDK which is a service tool that interacts with the Firebase services directly behind the Security Rules.
For reference, you will notice that the Admin-SDK would normally require Service Account credentials which allows the SDK to authenticate with the GCP IAM services
Just realized I should be using initializeTestApp not initializeAdminApp

Using Grunt to Mock Endpoints

I'm using Yeoman, Grunt, and Bower, to construct a platform for building a frontend independently of a a backend. The idea would be that all of my (AngularJS) controller, services, factories, etc live in this project, and get injected afterwards into my serverside codebase based off the result of grunt build.
My question is:
How can I mock endpoints so that the Grunt server responds to the same endpoints as my (Rails) App will?
At the moment I am using:
angular.module('myApp', ['ngResource'])
.run(['$rootScope', function ($rootScope) {
$rootScope.testState = 'test';
}]);
And then in each of my individual services:
mockJSON = {'foo': 'myMockJSON'}
And on every method:
if($rootScope.testState == 'test'){
return mockJSON;
}
else {
real service logic with $q/$http goes here
}
Then after grunt build, testState = 'test' gets removed.
This is clearly a relatively janky architecture. How can I avoid it? How can I have Grunt respond to the same endpoints as my app (some of which have dynamic params) apply some logic (if necessary), and serve out a json file (possibly dependent on path params)?
I've fixed this issue by using express to write a server that responds with static json.
First I created a directory in my project called 'api'. Within that directory I have the following files:
package.json:
{
"name": "mockAPI",
"version": "0.0.0",
"dependencies": {
"express": "~3.3.4"
}
}
Then I run npm install in this directory.
index.js:
module.exports = require('./lib/server');
lib/server.js:
express = require('express');
var app = express();
app.get('/my/endpoint', function(req, res){
res.json({'foo': 'myMockJSON'});
});
module.exports = app
and finally in my global Gruntfile.js:
connect: {
options: {
port: 9000,
hostname: 'localhost',
},
livereload: {
options: {
middleware: function (connect, options) {
return [
lrSnippet,
mountFolder(connect, '.tmp'),
mountFolder(connect, yeomanConfig.app),
require('./api')
];
}
}
},
Then the services make the requests, and the express server serves the correct JSON.
After grunt build, the express server is simply replaced by a rails server.
As of grunt-contrib-connect v.0.7.0 you can also just add your custom middleware to the existing middleware stack without having to manually rebuild the existing middleware stack.
livereload: {
options: {
open: true,
base: [
'.tmp',
'<%= config.app %>'
],
middleware: function(connect, options, middlewares) {
// inject a custom middleware into the array of default middlewares
middlewares.push(function(req, res, next) {
if (req.url !== '/my/endpoint') {
return next();
}
res.writeHead(200, {'Content-Type': 'application/json' });
res.end("{'foo': 'myMockJSON'}");
});
return middlewares;
}
}
},
See https://github.com/gruntjs/grunt-contrib-connect#middleware for the official documentation.
Alternatively you can use the grunt-connect-proxy to proxy everything that is missing in your test server to an actual backend.
It's quite easy to install, just one thing to remember when adding proxy to your livereload connect middleware is to add it last, like this:
middleware: function (connect) {
return [
lrSnippet,
mountFolder(connect, '.tmp'),
mountFolder(connect, yeomanConfig.app),
proxySnippet
];
}
grunt-connect-prism is similar to the Ruby project VCR. It provides an easy way for front end developers to record HTTP responses returned by their API (or some other remote source) and replay them later. It's basically an HTTP cache, but for developers working on a Single Page Application (SPA). You can also generate stubs for API calls that don't exist, and populate them the way you want.
It's useful for mocking complex & high latency API calls during development. It's also useful when writing e2e tests for your SPA only, removing the server from the equation. This results in much faster execution of your e2e test suite.
Prism works by adding a custom connect middleware to the connect server provided by the grunt-contrib-connect plugin. While in 'record' mode it will generate a file per response on the filesystem with content like the following:
{
"requestUrl": "/api/ponies",
"contentType": "application/json",
"statusCode": 200,
"data": {
"text": "my little ponies"
}
}
DISCLAIMER: I'm the author of this project.
You can use Apache proxy and connect your REST server with gruntjs.
Apache would do this:
proxy / -> gruntjs
proxy /service -> REST server
you would use your application hitting Apache and angular.js application would think that is talking with itself so no cross domain problem.
Here is a great tutorial on how to set this up:
http://alfrescoblog.com/2014/06/14/angular-js-activiti-webapp-with-activiti-rest/
Just my alternative way that based on Abraham P's answer. It does not need to install express within 'api' folder. I can separate the mock services for certain files. For example, my 'api' folder contains 3 files:
api\
index.js // assign all the "modules" and then simply require that.
user.js // all mocking for user
product.js // all mocking for product
file user.js
var user = function(req, res, next) {
if (req.method === 'POST' && req.url.indexOf('/user') === 0) {
res.end(
JSON.stringify({
'id' : '5463c277-87c4-4f1d-8f95-7d895304de12',
'role' : 'admin'
})
);
}
else {
next();
}
}
module.exports = user;
file product.js
var product = function(req, res, next) {
if (req.method === 'POST' && req.url.indexOf('/product') === 0) {
res.end(
JSON.stringify({
'id' : '5463c277-87c4-4f1d-8f95-7d895304de12',
'name' : 'test',
'category': 'test'
})
);
}
else {
next();
}
}
module.exports = product;
index.js just assigns all the "modules" and we simply require that.
module.exports = {
product: require('./product.js'),
user: require('./user.js')
};
My Gruntfile.js file
connect: {
options: {
port: 9000,
// Change this to '0.0.0.0' to access the server from outside.
hostname: 'localhost',
livereload: 35729
},
livereload: {
options: {
open: true,
middleware: function (connect) {
return [
connect.static('.tmp'),
connect().use(
'/bower_components',
connect.static('./bower_components')
),
connect.static(appConfig.app),
require('./api').user,
require('./api').product,
];
}
}
}