Send message and leave server on ready event if not whitelisted (Discord.JS + MongoDB) - mongodb

I'm coding a whitelist system for my discord bot that, on ready event (and after a 3 seconds delay), checks if every server it is in has it's ID added to the whitelist database on MongoDB. If not, the bot sends an embed and leaves the server. I managed to get it working on the guildCreate event, but on ready event it performs the message and leave actions on every single server without filtering conditions, even though those are added to the list. I cannot figure out why. Also, I'm still new to JavaScript, so it could be just a minor mistake.
//VARIABLES
const { Client, MessageEmbed } = require("discord.js")
const config = require('../../Files/Configuration/config.json');
const DB = require("../../Schemas/WhitelistDB");
//READY EVENT
module.exports = {
name: "ready",
once: false,
async execute(client) {
//[ ... ] <--- OTHER UNNECESSARY CODE IN BETWEEN
setTimeout(function() { // <--- 3 SECONDS DELAY
client.guilds.cache.forEach(async (guild) => { // <--- CHECK EVERY SERVER
await DB.find({}).then(whitelistServers => { // <--- CHECK MONGODB ID LIST
if(!whitelistServers.includes(guild.id)) {
const channel = guild.channels.cache.filter(c => c.type === 'GUILD_TEXT').random(1)[0]; // <--- SEND MESSAGE TO RANDOM TEXT CHANNEL (It is sending to every server, when it should be sending only to the not whitelisted ones)
if(channel) {
const WhitelistEmbed = new MessageEmbed()
WhitelistEmbed.setColor(config.colors.RED)
WhitelistEmbed.setDescription(`${config.symbols.ERROR} ${config.messages.SERVER_NOT_WHITELISTED}`)
channel.send({embeds: [WhitelistEmbed]});
}
client.guilds.cache.get(guild.id).leave(); // <--- LEAVE SERVER (It is leaving every server, when it should be leaving only the not whitelisted ones)
} else { return }
});
});
}, 1000 * 3);
}
}

I found the solution myself!
Instead of finding the array of whitelisted ID's for each guild, find one at a time and instead of checking the content of the array, check if the array exists. This is the updated code:
//WHITELIST
setTimeout(function() {
client.guilds.cache.forEach(async (guild) => {
await DB.findOne({ GuildID: guild.id }).then(whitelistServers => {
if(!whitelistServers) {
const channel = guild.channels.cache.filter(c => c.type === 'GUILD_TEXT').random(1)[0];
if(channel) {
const WhitelistEmbed = new MessageEmbed()
WhitelistEmbed.setColor(config.colors.RED)
WhitelistEmbed.setDescription(`${config.symbols.ERROR} ${config.messages.SERVER_NOT_WHITELISTED}`)
channel.send({embeds: [WhitelistEmbed]});
}
client.guilds.cache.get(guild.id).leave();
} else { return }
});
});
}, 1000 * 3);

Related

Amplify Datastore subscription cost

I am trying to understand the cost of Datastore. It seems that it subscribes to all Mutations. So if there are 50 users, then each message will be send 50 times, even if it not required.
As each real time mutation costs money, we will be paying unnecessary 49 times for this real time message mutation.
Also , it seems to me SyncExpression doesn't have any effect on this Subscription.
I am really stuck here. It will be great of someone can clarify
Amplify generates the datastore boilerplate code for you, but you still need to call it. You won't pay for every user and every mutation.
You will only subscribe to a mutation (explicitly call the code to listen for changes) on a per-user basis for things that user is interested in. e.g. if you are viewing a TODO item, you'd subscribe the user to that item and they'll immediately see if someone else modify it on another device.
UPDATE
Long story... I was triggering back-end computation via GraphQL by making a lambda resolver. The computation took too long and the GQL call would timeout. I updated the code so the GQL call called itself asynchronously (re-trigger the lambda), and returned immediately. Then when the long-running task completed in the spun-up lambda, I updated the a record in the database.
I update the record using AppSync instead of direct GQL so it would trigger mutations, and in the react client, I listen to a mutation for the specific record that will be updated. This way, there is just 1 user listening (if they've triggered the long running action) and that user is only notified about changes to the single DB record they're interested in, and not receiving other user's updates.
I don't know if all this is applicable to your situation. The code snippets below may help you, but they're somewhat out of context.
// In amplify/backend/api/projectname/schema.graphql
type Subscription {
onCouponWithIdUpdated(id: ID!): Coupon #aws_subscribe(mutations: ["updateCoupon"])
}
// In my useSendCoupon hook...
// Subscribe to coupon updates
useEffect(() => {
if (0 === couponId) {
return
}
console.log(`subscribe to coupon updates for couponId:`, couponId)
const onCouponWithIdUpdated = /* GraphQL */ `
subscription OnCouponWithIdUpdated($id: ID!) {
onCouponWithIdUpdated(id: $id) {
id
proofLink
owner
}
}
`
const subscription = API
.graphql(graphqlOperation(onCouponWithIdUpdated, { id: couponId }))
.subscribe({
next: ({ provider, value }) => {
const coupon = value.data.onCouponWithIdUpdated
//console.log(`Proof Link:`, coupon.proofLink)
setProofLinks([coupon.proofLink])
setSendCouponState(COUPON_STATE_PREVIEW_SUCCESS)
},
error: error => console.warn(error)
})
console.log('subscribed: ', subscription)
return () => {
console.log(`unsubscribe to coupon updates`)
subscription.unsubscribe()
}
}, [couponId])
// inside a lambda...
const updateCouponWithProof = async (authorization, couponId, proofLink) => {
const initializeClient = () => new AWSAppSyncClient({
url: process.env.API_XXXX_GRAPHQLAPIENDPOINTOUTPUT,
region: process.env.REGION,
auth: {
type: AUTH_TYPE.AMAZON_COGNITO_USER_POOLS,
jwtToken: authorization
},
disableOffline: true,
})
const executeMutation = async (mutation, operationName, variables) => {
const client = initializeClient()
try {
const response = await client.mutate({
mutation: gql(mutation),
variables,
fetchPolicy: "no-cache",
})
return response.data[operationName]
} catch (err) {
console.log("Error while trying to mutate data", err)
throw JSON.stringify(err)
}
}
const updateCoupon = /* GraphQL */ `
mutation UpdateCoupon(
$input: UpdateCouponInput!
$condition: ModelCouponConditionInput
) {
updateCoupon(input: $input, condition: $condition) {
id
proofLink
owner
}
}
`
const variables = { input: { id: couponId, proofLink } }
try {
return await executeMutation(updateCoupon, 'updateCoupon', variables)
} catch (error) {
console.log(`executeMutation error`, error)
}
}

Google Action Webhook Inline Editor Returns Before the API call

This is my first Google Action project. I have a simple slot after the invocation. User enters the value on prompt and slot invokes the webhook and make a call to API using the user input. All works fine. However the webhook returns to users even before the API call finish processing and returns the value (line 1 conv.add). I do see in the logs that everything from API is logged fine after the webhook returns to user. Below is the code I am using. I am using inline editor. What am I missing? Thanks for help in advance.
const { conversation } = require('#assistant/conversation');
const functions = require('firebase-functions');
var https = require('https');
const fetch = require('node-fetch');
const app = conversation({debug: true});
app.handle('SearchData', conv => {
const body = JSON.stringify({
val: "this is my body"
});
// prepare the header
var postheaders = {
'Content-Type' : 'application/json',
'Auth' : 'MyAuthCreds'
};
fetch('https://host.domain.com/data', {
method: 'post',
body: body,
headers: postheaders,
})
.then(res => res.json())
.then(d => {
console.log(d);
var profile = d;//JSON.parse(d);
console.log(d.entries);
console.log("Length: "+ d.entries.length);
if(d.entries.length > 0)
{
console.log("Data found");
conv.add("Data found"); //line 1
}
else
{
console.log("no data found");
conv.add("no data found"); //line 1
}
})
.catch(function (err) {
// POST failed...
console.log(err);
});
});
exports.ActionsOnGoogleFulfillment = functions.https.onRequest(app);
Your issue is that your handler is making API calls which are asynchronous, but the Assistant Conversation library doesn't know that you're doing so. So as soon as the handler finishes, it tries to send back a response, but your asynchronous responses (the stuff in the then() blocks) haven't executed yet.
To address this, you need to return a Promise object so the library knows to wait till the Promise is fulfilled before it returns.
Fortunately, in your case, this should be pretty straightforward. fetch and all the .then() blocks return a Promise. So all you need to do is add a return statement in front of the call to fetch. So something like this:
return fetch('https://host.domain.com/data', {

Firestore simple collectionGroup makes the function timeout

I have a cloud function that is scheduled to run every 2 mins.
I am currently testing the function using an Http one, and trying to run a simple collectionGroup query as follows
Update 1
I updated my code to handle offset and limit but now it's working fine for the first record by then hangs while fetching the second record.
please check the snippet
/* eslint-disable promise/no-nesting */
/**
* #description
* This module will send out emails to different users who have alerts not sent out yet
* #algorithm
* 1 - Get all alerts where sent = false
* 2 - Send out those alerts via the email module.
*/
const functions = require("firebase-functions");
const { db } = require("../helpers/firestore");
// console.log('admin: ', admin);
let _allUserAlerts = [];
let counter = 0;
exports.sendEmailsToUsers = functions.pubsub
.schedule("every 2 minutes")
.onRun(context => {
return sendEmailsToUsers(context);
});
exports.testSendEmailsToUsers = functions.https.onRequest((req, res) => {
return sendEmailsToUsers("hello");
});
async function sendEmailsToUsers(context) {
try {
console.log("function execution")
let alerts = await db
.collectionGroup("alerts")
.where("sent", "==", false)
.limit(1)
.offset(counter)
.get();
alerts.forEach(_alert => {
if (_alert.exists) {
console.log("alert exists")
_allUserAlerts.push(_alert.data());
counter++;
return sendEmailsToUsers()
} else {
console.log("alert ends")
sendAlertsToEmail(_allUserAlerts)
}
})
} catch (err) {
console.log(err)
}
}
async function sendAlertsToEmail(alerts) {
console.log('alerts: ', alerts);
}
Old code
/* eslint-disable promise/no-nesting */
/**
* #description
* This module will send out emails to different users who have alerts not sent out yet
* #algorithm
* 1 - Get all alerts where sent = false
* 2 - Send out those alerts via the email module.
*/
const functions = require("firebase-functions");
const { db } = require("../helpers/firestore");
const { Email } = require("../EmailModule");
const { groupBy } = require("../helpers/groupBy");
exports.sendEmailsToUsers = functions.pubsub
.schedule("every 2 minutes")
.onRun(context => {
return sendEmailsToUsers(context);
});
async function sendEmailsToUsers(context) {
try {
console.log("function execution")
let alerts = await db
.collectionGroup("alerts")
.where("sent", "==", false)
.get();
console.log('alerts: ', alerts);
} catch (err) {
console.log(err)
}
}
This is literally the entire code in the function, but the function times out and I don't get to the log() of alerts.
I am not getting what could be the problem here ? Any thing I might be missing or not getting here ? Any help would be appreciated.
Thanks
Maybe the process simply takes more than 60s. You can increase the timeout of the cloud function. You can raise it up to 9 minutes and it should work.
Otherwise the problem does not seems to be from the function itself. The issue may be somewhere here : const { db } = require("../helpers/firestore"); . "db" is not coming, and that's way your code stops at let alerts = await db .
It sounds like you might be requesting too much data to transmit and store in memory without problems. I suggest paginating your data with limit() instead so that you can batch process results without exceeding the limits of Cloud Functions server instances (which are quite limited). Start small and work your way up.
The solution to this problem was to upgrade the current node version.
By default Cloud functions work on Node 8 which has stopped support from the official node.js team. So when I upgrade to Node 10 it started working again perfectly !

Editing My HTTP Call to Use Sockets (socket.io) to Receive Data via an Observable in my Angular 2 App

Right now I have an http get call handling data coming from an api into my Angular 2 app. Now we're switching to using sockets via socket.io. I have been using an observable to get the data, and I know I can continue to do that while using socket.io sockets. But I'm having difficulty figuring out exactly what it should look like - i.e., how I need to edit my getByCategory function call to receive the data via a socket connection. This is what my getByCategory function currently looks like in my client-side Angular service:
private _url: string = 'https://api.someurl';
getByCategory() {
return this._http.get(this._url)
.map((response:Response) => response.json())
.catch(this._errorsHandler);
}
_errorsHandler(error: Response) {
console.error(error);
return Observable.throw(error || "Server Error");
}
And, on the server side, this is what my function export looks like in our mongoDB setup (already set up to use sockets via socket.io):
exports.getByCategory = function(req, res, next) {
let skip, limit, stage, ioOnly = false;
let role='office_default';
if (_.isUndefined(req.params)) {
stage = req.stage;
skip = parseInt(req.skip) || 0;
limit = parseInt(req.limit) || 0;
role = req.role;
ioOnly=true;
}
else {
stage = req.params.stage;
skip = parseInt(req.query.skip) || 0;
limit = parseInt(req.query.limit) || 0;
role = req.query.role;
}
console.log(role);
Category[role].find({'services.workflow.status': stage}).skip(skip).limit(limit).exec(function(err, doc) {
if (err) { if (!ioOnly) { return next(err) } else { return res(err)}}
else if(doc) ((!ioOnly) ? res.json(doc) : res(doc));
else ((!ioOnly) ? res.sendStatus(204) : res(doc));
});
};
How should I edit my getByCategory function to use socket.io instead of http in my service? Do I need an emit function coming from my api to act on in my Angular 2 service - or can I just adjust my current getByCategory function to use sockets within the existing observable instead?
I thought about editing the function to look something like this:
getByStage() {
this.socket.on('getByCategory')
.map((response:Response) => response.json())
.catch(this._errorsHandler);
}
}
... but to do that I'd need the server function export to make it available via an "emit" or something similar, wouldn't I? Would it work if I did that? Am I missing something here?
If you need to work with socket connection (like socket.io), you should depend on callbacks.
So, you should set up callback functions to work with them.
A demo is given here-
import { Subject } from 'rxjs/Subject';
import { Observable } from 'rxjs/Observable';
import * as io from 'socket.io-client';
export class ChatService {
private url = 'http://localhost:5000';
private socket;
sendMessage(message){
this.socket.emit('add-message', message);
}
getMessages() {
let observable = new Observable(observer => {
this.socket = io(this.url);
this.socket.on('message', (data) => {
observer.next(data);
});
return () => {
this.socket.disconnect();
};
})
return observable;
}
}
A complete tutorial of using Angular2 with socket.io is given here.
Hope you have your answer.

Meteor Collections.count() too slow on UI

I have done a Simple Publish/subscribe in my meteor project and whenever User lands on home page I have to show count of total users. Users are around 15000. Now in template helper I have code written a code as,
CLIENT-SIDE
Template.voters.helpers({
voters : function() {
return voters.find({});
},
count : voterscount
});
then on SERVER-SIDE
voters = new Mongo.Collection("voters");
voterscount = function() {
return voters.find({}).count();
}
Meteor.publish('voters', function() {
return voters.find({});
});
Meteor.publish('voterscount', function() {
return voterscount;
});
The output that I receive is that the count starts from 0 to 15000 on UI Which is irritating.
I don't want rolling up of the digits on UI and should show the static count on UI as 15000 whenever page refreshed.
Why is this so slow it. In production i will have around 10 million documents in collection. This is big drawback. Any help, please?
This is a good use case for a universal publication, i.e. one that is automatically sent to all clients.
Server:
stats = new Mongo.collection('stats'); // define a new collection
function upsertVoterCount(){
stats.upsert('numberOfVoters',{ numberOfVoters: voters.find().count() });
}
upsertVoterCount();
Meteor.publish(null,function(){ // null name means send to all clients
return stats.find();
});
var voterCursor = voters.find();
voterCursor.observe({
added: upsertVoterCount,
removed: upsertVoterCount
});
Then on the client you can get the voter count anytime with:
var nVoters = stats.findOne('numberOfVoters').numberOfVoters;
SERVER-publish.js
function upsertVoterCount(){
voterscount.upsert('numberOfVoters',
{
numberOfVoters : voters.find().count()
});
}
// null name means send to all clients
Meteor.publish(null ,function() {
upsertVoterCount();
return voterscount.find();
});
var voterCursor = voters.find();
voterCursor.observe({
added: upsertVoterCount,
removed: upsertVoterCount
});
LIB-collection.js
// define a new collection
voterscount = new Mongo.Collection('voterscount');
CLIENT-home.js
Template.voter.helpers({
count : function() {
return voterscount.findOne().numberOfVoters;
}
});