How do I set the Skipper body Limit in SailsJS v1.0? - sails.js

I am currently having an issue when uploading files bigger then 10mb. I am uploading them to an s3 bucket.
I have tried to set the limit within the skipper that gets built in the middleware with the bodyparser.
order: [
'cookieParser',
'session',
'myRequestLogger',
'bodyParser',
'compress',
'poweredBy',
'router',
'www',
'favicon',
],
myRequestLogger: function (req, res, next) {
sails.log("Requested :: ", req.method, req.url);
return next();
},
/***************************************************************************
* *
* The body parser that will handle incoming multipart HTTP requests. *
* *
* https://sailsjs.com/config/http#?customizing-the-body-parser *
* *
***************************************************************************/
bodyParser: (function _configureBodyParser() {
var skipper = require('skipper');
var middlewareFn = skipper({
strict: true,
limit: '50mb'
});
return middlewareFn;
})(),
This doesn't seem to be using the limit property being set at all.
Any advice on this would help.

I'm not entirely sure where you found the limit option for Skipper, but limiting the file size of an upload is kinda documented between skipper-s3 the skipper.
Specifying the maxBytes option when receiving the upload in your action/controller should work.
If you're going to be uploading files to multiple actions/controllers then I'd keep the max file size somewhere like sails.config.custom.maxUploadFilesize so there's a single place to configure it - couldn't find any global Skipper options but I could have missed it.
const MAX_UPLOAD_BYTES = 10 * 1024 * 1024;
req.file('avatar')
.upload({
// Required
adapter: require('skipper-s3'),
key: 'thekyehthethaeiaghadkthtekey',
secret: 'AB2g1939eaGAdesoccertournament',
bucket: 'my_stuff',
maxBytes: MAX_UPLOAD_BYTES
}, function whenDone(err, uploadedFiles) {
if (err) {
return res.serverError(err);
}
return res.ok({
files: uploadedFiles,
textParams: req.params.all()
});
});

Related

Store cookie information with express-rate-limit

Is there a way by which I can store user cookies (jwt) in my mongodb database with express-rate-limit and rate-limit-mongo packages?
Code that I am currently using :
var limiter = new RateLimit({
store: new MongoStore({
uri: process.env.MONGO_URI,
expireTimeMs: 60 * 1000 * 60,
}),
max: 150,
windowMs: 10 * 60 * 1000,
message: "Too many requests in a short duration, IP Banned for an hour.",
});
I want to know the jwt cookie (if it exists) associated with the request too somehow so that I can know who the culprit was.
Basically, how can I access the request object and store it in the rate limiting database
I was able to do this with the onLimitReached function available in express-rate-limit like so:
var queslimiter = new RateLimit({
store: new MongoStore({
uri: process.env.MONGO_URI,
expireTimeMs: 60 * 1000 * 60,
collectionName: "ansForce",
}),
max: 30,
message: "Too many requests in a short duration, IP Banned for an hour.",
onLimitReached: async function (req, res) {
try {
const blacklist = new Blacklist({
team: req.cookies.team,
});
try {
const bad = await blacklist.save();
} catch (e) {
console.log(e);
}
} catch (e) {}
},
});

where is the real quasar ssr express server?

I am building a quasar and vue.js app and I want to add a MongoDB API with an express server, there that /ssr-src/ dir where there is an index.js file with basic express app routing:
/*
* This file runs in a Node context (it's NOT transpiled by Babel), so use only
* the ES6 features that are supported by your Node version. https://node.green/
*
* WARNING!
* If you import anything from node_modules, then make sure that the package is specified
* in package.json > dependencies and NOT in devDependencies
*
* Note: This file is used only for PRODUCTION. It is not picked up while in dev mode.
* If you are looking to add common DEV & PROD logic to the express app, then use
* "src-ssr/extension.js"
*/
console.log("got here!") // I added
const express = require("express"),
compression = require("compression");
const ssr = require("quasar-ssr"),
extension = require("./extension"),
app = express(),
port = process.env.PORT || 3000;
const serve = (path, cache) =>
express.static(ssr.resolveWWW(path), {
maxAge: cache ? 1000 * 60 * 60 * 24 * 30 : 0
});
// gzip
app.use(compression({ threshold: 0 }));
// serve this with no cache, if built with PWA:
if (ssr.settings.pwa) {
app.use("/service-worker.js", serve("service-worker.js"));
}
// serve "www" folder
app.use("/", serve(".", true));
// we extend the custom common dev & prod parts here
extension.extendApp({ app, ssr });
// this should be last get(), rendering with SSR
app.get("*", (req, res) => {
res.setHeader("Content-Type", "text/html");
// SECURITY HEADERS
// read more about headers here: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers
// the following headers help protect your site from common XSS attacks in browsers that respect headers
// you will probably want to use .env variables to drop in appropriate URLs below,
// and potentially look here for inspiration:
// https://ponyfoo.com/articles/content-security-policy-in-express-apps
// https://developer.mozilla.org/en-us/docs/Web/HTTP/Headers/X-Frame-Options
// res.setHeader('X-frame-options', 'SAMEORIGIN') // one of DENY | SAMEORIGIN | ALLOW-FROM https://example.com
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
// res.setHeader('X-XSS-Protection', 1)
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options
// res.setHeader('X-Content-Type-Options', 'nosniff')
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin
// res.setHeader('Access-Control-Allow-Origin', '*') // one of '*', '<origin>' where origin is one SINGLE origin
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-DNS-Prefetch-Control
// res.setHeader('X-DNS-Prefetch-Control', 'off') // may be slower, but stops some leaks
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
// res.setHeader('Content-Security-Policy', 'default-src https:')
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/sandbox
// res.setHeader('Content-Security-Policy', 'sandbox') // this will lockdown your server!!!
// here are a few that you might like to consider adding to your CSP
// object-src, media-src, script-src, frame-src, unsafe-inline
ssr.renderToString({ req, res }, (err, html) => {
if (err) {
if (err.url) {
res.redirect(err.url);
} else if (err.code === 404) {
console.log(404,'!!')
res.status(404).send("404 | Page Not Found foo bar"); // I added foo bar
} else {
// Render Error Page or Redirect
res.status(500).send("500 | Internal Server Error");
if (ssr.settings.debug) {
console.error(`500 on ${req.url}`);
console.error(err);
console.error(err.stack);
}
}
} else {
res.send(html);
}
});
});
app.listen(port, () => {
console.log(`Server listening at port ${port}`);
});
but none of my logs or changings are happening when I run $ queser dev -m ssr
also the Server listening at port ${port} is not showing.
need your help!
quasar version 1.0.7
debian 10
src-ssr/index.js
From the comment notes, it seems like it is for Production only and will not visible in dev mode.
"Note: This file is used only for PRODUCTION"
You may want to use the src-ssr/extension.js instead.

Image returned from REST API always displays broken

I am building a content management system for an art portfolio app, with React. The client will POST to the API which uses Mongoose to insert into a MongoDB. The API then queries the DB for the newly inserted image, and returns it to the client.
Here's my code to connect to MongoDB using Mongoose:
mongoose.connect('mongodb://localhost/test').then(() =>
console.log('connected to db')).catch(err => console.log(err))
mongoose.Promise = global.Promise
const db = mongoose.connection
db.on('error', console.error.bind(console, 'MongoDB connection error:'))
const Schema = mongoose.Schema;
const ImgSchema = new Schema({
img: { data: Buffer, contentType: String }
})
const Img = mongoose.model('Img', ImgSchema)
I am using multer and fs to handle the image file. My POST endpoint looks like this:
router.post('/', upload.single('image'), (req, res) => {
if (!req.file) {
res.send('no file')
} else {
const imgItem = new Img()
imgItem.img.data = fs.readFileSync(req.file.path)
imgItem.contentType = 'image/png'
imgItem
.save()
.then(data =>
Img.findById(data, (err, findImg) => {
console.log(findImg.img)
fs.writeFileSync('api/uploads/image.png', findImg.img.data)
res.sendFile(__dirname + '/uploads/image.png')
}))
}
})
I can see in the file structure that writeFileSync is writing the image to the disk. res.sendFile grabs it and sends it down to the client.
Client side code looks like this:
handleSubmit = e => {
e.preventDefault()
const img = new FormData()
img.append('image', this.state.file, this.state.file.name)
axios
.post('http://localhost:8000/api/gallery', img, {
onUploadProgress: progressEvent => {
console.log(progressEvent.loaded / progressEvent.total)
}
})
.then(res => {
console.log('responsed')
console.log(res)
const returnedFile = new File([res.data], 'image.png', { type: 'image/png' })
const reader = new FileReader()
reader.onloadend = () => {
this.setState({ returnedFile, returned: reader.result })
}
reader.readAsDataURL(returnedFile)
})
.catch(err => console.log(err))
}
This does successfully place both the returned file and the img data url on state. However, in my application, the image always displays broken.
Here's some screenshots:
How to fix this?
Avoid sending back base64 encoded images (multiple images + large files + large encoded strings = very slow performance). I'd highly recommend creating a microservice that only handles image uploads and any other image related get/post/put/delete requests. Separate it from your main application.
For example:
I use multer to create an image buffer
Then use sharp or fs to save the image (depending upon file type)
Then I send the filepath to my controller to be saved to my DB
Then, the front-end does a GET request when it tries to access: http://localhost:4000/uploads/timestamp-randomstring-originalname.fileext
In simple terms, my microservice acts like a CDN solely for images.
For example, a user sends a post request to http://localhost:4000/api/avatar/create with some FormData:
It first passes through some Express middlewares:
libs/middlewares.js
...
app.use(cors({credentials: true, origin: "http://localhost:3000" })) // allows receiving of cookies from front-end
app.use(morgan(`tiny`)); // logging framework
app.use(multer({
limits: {
fileSize: 10240000,
files: 1,
fields: 1
},
fileFilter: (req, file, next) => {
if (!/\.(jpe?g|png|gif|bmp)$/i.test(file.originalname)) {
req.err = `That file extension is not accepted!`
next(null, false)
}
next(null, true);
}
}).single(`file`))
app.use(bodyParser.json()); // parses header requests (req.body)
app.use(bodyParser.urlencoded({ limit: `10mb`, extended: true })); // allows objects and arrays to be URL-encoded
...etc
Then, hits the avatars route:
routes/avatars.js
app.post(`/api/avatar/create`, requireAuth, saveImage, create);
It then passes through some user authentication, then goes through my saveImage middleware:
services/saveImage.js
const createRandomString = require('../shared/helpers');
const fs = require("fs");
const sharp = require("sharp");
const randomString = createRandomString();
if (req.err || !req.file) {
return res.status(500).json({ err: req.err || `Unable to locate the requested file to be saved` })
next();
}
const filename = `${Date.now()}-${randomString}-${req.file.originalname}`;
const filepath = `uploads/${filename}`;
const setFilePath = () => { req.file.path = filepath; return next();}
(/\.(gif|bmp)$/i.test(req.file.originalname))
? fs.writeFile(filepath, req.file.buffer, (err) => {
if (err) {
return res.status(500).json({ err: `There was a problem saving the image.`});
next();
}
setFilePath();
})
: sharp(req.file.buffer).resize(256, 256).max().withoutEnlargement().toFile(filepath).then(() => setFilePath())
If the file is saved, it then sends a req.file.path to my create controller. This gets saved to my DB as a file path and as an image path (the avatarFilePath or /uploads/imagefile.ext is saved for removal purposes and the avatarURL or [http://localhost:4000]/uploads/imagefile.ext is saved and used for the front-end GET request):
controllers/avatars.js (I'm using Postgres, but you can substitute for Mongo)
create: async (req, res, done) => {
try {
const avatarurl = `${apiURL}/${req.file.path}`;
await db.result("INSERT INTO avatars(userid, avatarURL, avatarFilePath) VALUES ($1, $2, $3)", [req.session.id, avatarurl, req.file.path]);
res.status(201).json({ avatarurl });
} catch (err) { return res.status(500).json({ err: err.toString() }); done();
}
Then when the front-end tries to access the uploads folder via <img src={avatarURL} alt="image" /> or <img src="[http://localhost:4000]/uploads/imagefile.ext" alt="image" />, it gets served up by the microservice:
libs/server.js
const express = require("express");
const path = app.get("path");
const PORT = 4000;
//============================================================//
// EXPRESS SERVE AVATAR IMAGES
//============================================================//
app.use(`/uploads`, express.static(`uploads`));
//============================================================//
/* CREATE EXPRESS SERVER */
//============================================================//
app.listen(PORT);
What it looks when logging requests:
19:17:54 INSERT INTO avatars(userid, avatarURL, avatarFilePath) VALUES ('08861626-b6d0-11e8-9047-672b670fe126', 'http://localhost:4000/uploads/1536891474536-k9c7OdimjEWYXbjTIs9J4S3lh2ldrzV8-android.png', 'uploads/1536891474536-k9c7OdimjEWYXbjTIs9J4S3lh2ldrzV8-android.png')
POST /api/avatar/create 201 109 - 61.614 ms
GET /uploads/1536891474536-k9c7OdimjEWYXbjTIs9J4S3lh2ldrzV8-android.png 200 3027 - 3.877 ms
What the user sees upon successful GET request:

Updating AWS API Gateway Body Mapping Templates at API level with JavaScript SDK

What is the right JavaScript API to update the body mapping templates at the API level?
updateIntegrationResponse cannot do it at this level.
We end up using the api below. A bit of tricky part is you need to replace the slash with ~1 to make it work.
const params = {
responseType: response.type.toString(), /* required */
restApiId: apiId, /* required */
patchOperations:[{
op: 'add',
path: '/responseTemplates/'+ response.bodyMappingTemplate.contentType.replace("/", "~1"),
value: response.bodyMappingTemplate.content
}
]
};
this.apiGatewaySDK.updateGatewayResponse(params, (err, data) => {
if (err) {
reject(err);
}
else {
this.serverless.cli.log('API Gateway Configuring: Body mapping templates are set correctly');
resolve('Body Mapping Templates set successfully:', response.type);
}
});

Problems with security policy for Filepicker Convert?

I use Filepicker to "read" then "store" an image from clients' computer. Now I want to resize the image using Filepicker but always get a 403 error:
POST https://www.filepicker.io/api/file/w11b6aScR1WRXKFbcXON/convert?_cacheBust=1380818787693 403 (FORBIDDEN)
I am using the same security policy and signature for the "read", "store", and "convert" calls. Is this wrong? Because when "read" and "store" are called there is no file handle yet (e.g. the last string part in InkBlob.url). But it seems the "convert" policy/signature must be generated using the file handle returned with the "store" InkBlob? And if this is the case, what's a more convenient way to do in javascript? Because in "convert" I have no access to the Python function that generates security policies unless I write an API call for that.
My code snippet as below (initialFpSecurityObj was pre-generated in Python using an empty handle):
filepicker.store(thumbFile, {
policy: initialFpSecurityObj.policy,
signature: initialFpSecurityObj.signature,
location: "S3",
path: 'thumbs/' + initialFpSecurityObj.uniqueName + '/',
},function(InkBlob){
console.log("Store successful:", JSON.stringify(InkBlob));
processThumb(InkBlob);
}, function(FPError){
console.error(FPError.toString());
});
var processThumb = function(InkBlob){
filepicker.convert(InkBlob, {
width: 800,
height: 600,
format: "jpg",
policy: initialFpSecurityObj.policy,
signature: initialFpSecurityObj.signature,
}, function(InkBlob){
console.log("thumbnail converted and stored at:", InkBlob);
}, function(FPError){
console.error(FPError);
};
}
Thanks a lot for the help.
--- EDIT ---
Below is the snippet for the Python code that generates initialFpSecurityObj
def generateFpSecurityOptions(handle, userId, policyLife=DEFAULT_POLICY_LIFE):
expiry = int(time() + policyLife)
json_policy = json.dumps({'handle': handle, 'expiry': expiry})
policy = base64.urlsafe_b64encode(json_policy)
secret = 'XXXXXXXXXXXXXX'
signature = hmac.new(secret, policy, hashlib.sha256).hexdigest()
uniqueName = hashlib.md5()
uniqueName.update(signature + repr(time()))
uniqueName = uniqueName.hexdigest() + str(userId)
return {'policy':policy, 'signature':signature, 'expiry':expiry, 'uniqueName':uniqueName}
fp_security_options = generateFpSecurityOptions(None, request.user.id)
Then in the django template fp_security_options is retrieved:
var initialFpSecurityObj = {{fp_security_options|as_json|safe}};
The way that generates fp_security_options is suspicious to me (former colleague's code) because the handle is None.
My recommendation would be to create two policies: one that is handle-bound and allows storing of the file, and another that is not handle-bound for the convert. In this case, you can set a shorter expiry time to increase the level of security, given that you are not specifying a handle.
Your problem is probably that your policy does not contain any "call" specifications. I suggest:
json_policy = json.dumps({'handle': handle, 'expiry': expiry, 'call':['pick','store','read','convert']})
but as our (very busy ;) brettcvz suggests, for conversion only, this is already enough:
json_policy = json.dumps({'handle': handle, 'expiry': expiry, 'call':'convert'})
You can find this in the security docs https://developers.inkfilepicker.com/docs/security/
If you still have issues, use a REST call, it's free. The following method is JavaScript and returns an url to the REST endpoint of filepicker which can be used to retrieve the converted image. The _options object looks like this
var myOptions = {
w: 150,
h: 150,
fit: "crop",
align: "faces",
format: "jpg",
quality: 86
};
and will work with all parameters specified of file pickers REST-API (check out https://developers.inkfilepicker.com/docs/web/#inkblob-images).
function getConvertedURL(_handle, _options, _policy, _signature) {
// basic url piece
var url = "https://www.filepicker.io/api/file/" + _handle + "/convert?";
// appending options
for (var option in _options) {
if (_options.hasOwnProperty(option)) {
url += option + "=" + _options[option] + "&";
}
}
// appending signed policy
url += "signature=" + _signature + "&policy=" + _policy;
return url;
}
So I finally figured it out myself, although I saw brettcvz's suggestion afterwards. The key is for 'convert' to work, I have to specify the exact handle of the uploaded file (i.e. the last bit of the string in InkBlob's url property returned from the 'store' or 'pickAndStore' call.
First thing I did was to edit the Python function generating the security policy and signature:
def generateFpSecurityOptions(handle, userId, policyLife=DEFAULT_POLICY_LIFE):
expiry = int(time() + policyLife)
json_policy = json.dumps({'handle': handle, 'expiry': expiry})
policy = base64.urlsafe_b64encode(json_policy)
secret = 'XXXXXXXXXXXXXX'
signature = hmac.new(secret, policy, hashlib.sha256).hexdigest()
if not handle == None:
uniqueName = handle
else:
uniqueName = hashlib.md5()
uniqueName.update(signature + repr(time()))
uniqueName = uniqueName.hexdigest() + str(userId)
return {'policy':policy, 'signature':signature, 'expiry':expiry, 'uniqueName':uniqueName}
fp_security_options = generateFpSecurityOptions(None, request.user.id)
Then I have to established the API call in our Django framework to get this security policy object dynamically via AJAX. I am fortunate that my colleague has previously written it. So I just call the API function in Javascript to retrieve the file-specific security policy object:
var initialFpSecurityObj = {{fp_security_options|as_json|safe}};
filepicker.store(thumbFile, {
policy: initialFpSecurityObj.policy,
signature: initialFpSecurityObj.signature,
access: "public"
}, function(InkBlob) {
processThumb(InkBlob);
}, function(FPError) {
console.error(FPError.toString());
}, function(progress) {
console.log("Loading: " + progress + "%");
});
var processThumb = function(InkBlob) {
var fpHandle = InkBlob.url.split('/').pop();
$.ajax({
url: API_BASE + 'file_picker_policy',
type: 'GET',
data: {
'filename': fpHandle
},
dataType: 'json',
success: function(data) {
var newFpSecurityObj = data.data;
filepicker.convert(InkBlob, {
width: 800,
height: 600,
format: "jpg",
policy: newFpSecurityObj.policy,
signature: newFpSecurityObj.signature,
}, {
location: "S3",
path: THUMB_FOLDER + '/' + newFpSecurityObj.uniqueName + '/',
}, function(fp) { // onSuccess
console.log("successfully converted and stored!");
// do what you want with the converted file
}, function(FPError) { // onError
console.error(FPError);
});
},
failure: function() {
alert("There was an error converting the thumbnail! Please try again.");
}
});
};