Only 2 intents work 'MAIN' and 'TEXT' - actions-on-google

I'm trying building my first app with actions-on-google / google-assistant-sdk, I wanted to start using 3 intents, the MAIN, respond to input TEXT, and HELP that the user can call anytime:
The action.json is:
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "conversation_1"
},
"intent": {
"name": "actions.intent.MAIN"
}
},
{
"description": "Help Intent",
"name": "Help",
"fulfillment": {
"conversationName": "conversation_1"
},
"intent": {
"name": "app.StandardIntents.HELP",
"trigger": {
"queryPatterns": [
"Help",
"HELP",
"help"
]
}
}
}
],
"conversations": {
"conversation_1": {
"name": "conversation_1",
"url": "https://us-central1-sillytest-16570.cloudfunctions.net/sayNumber",
"fulfillmentApiVersion": 2
}
}
}
The index.js:
'use strict';
process.env.DEBUG = 'actions-on-google:*';
const ActionsSdkApp = require('actions-on-google').ActionsSdkApp;
const functions = require('firebase-functions');
const NO_INPUTS = [
'I didn\'t hear that.',
'If you\'re still there, say that again.',
'We can stop here. See you soon.'
];
exports.sayNumber = functions.https.onRequest((request, response) => {
const app = new ActionsSdkApp({request, response});
function mainIntent (app) {
console.log('mainIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can read out an ordinal like ' +
'<say-as interpret-as="ordinal">123</say-as>. Say a number.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function rawInput (app) {
console.log('rawInput');
if (app.getRawInput() === 'bye') {
app.tell('Goodbye!');
} else {
let inputPrompt = app.buildInputPrompt(true, '<speak>You said, <say-as interpret-as="ordinal">' +
app.getRawInput() + '</say-as></speak>', NO_INPUTS);
app.ask(inputPrompt);
}
}
function helpHandler (app) {
console.log('rawInput');
app.ask('<speak>What kind of help do you need?</speak>');
}
let actionMap = new Map();
actionMap.set(app.StandardIntents.MAIN, mainIntent);
actionMap.set(app.StandardIntents.TEXT, rawInput);
actionMap.set(app.StandardIntents.HELP, helpHandler);
app.handleRequest(actionMap);
});
I pushed the firebase as:
firebase deploy --only functions
And pushed the Google Actions as:
gactions update --action_package action.json --project <YOUR_PROJECT_ID>
While testing the assistant here, it started in a good way, and repeat the number that I enter, wait for another number, and so on, but when I enter help it is terminated and not responding!
UPDATE
I tried the below, but did not work:
actionMap.set("app.StandardIntents.HELP", helpHandler);
I should expect the app to "What kind of help do you need?" when I enter/say "Help", but what happened is just re-writing it, same way it do with any other number.

Non-built-in Intents are only supported for the first message in a conversation. After that, while you can use them for speech biasing, you will only get a built-in one such as the TEXT Intent.

Your actionMap is looking for app.StandardIntents.HELP but it doesn't exist. You can view all of the standard intents in the GitHub repo.
app.StandardIntents.MAIN returns another string which corresponds to "'actions.intent.MAIN'". It does not read your action.json and generate new intents. Thus, app.StandardIntents.HELP actually returns undefined and is never called.
Your map should use a string for your help intent since it is not available as a constant in the app object.
actionMap.set("app.StandardIntents.HELP", helpHandler);
This should resolve your issue. Let me know if it does not.

Related

Why is my webhook call fulfillment deleting my parameter in Dialogflow?

I'm working on a dialogflow chatbot. It retrieves the user's name and email at the start of the conversation, and I have a sessions-vars output context with a lifespan of 60 to keep these parameters throughout the conversation. I know they are kept by checking diagnostic info and referring to them in chatbot replies as #context.parameter.
Near the end of my conversation path there in an intent called 110checklistemail.sendemail where my chatbot asks the user if they want information emailed to their email or sent in the chat. If the user says "Email it to me" I have webhook call enabled, where it redirects to fulfillment code, pasted below.
I followed a guide to integrate the chatbot with sendgrid, and the email is sent and does work if I prompt for the email at that specific intent. However, if I don't prompt for the email address (eg. the user says 'email it to me') then the agent is not able to send the email because the email parameter is now blank, despite being populated before in a context with a long lifespan.
Fulfillment code is the following:
'use strict';
const functions = require('firebase-functions');
const {WebhookClient} = require('dialogflow-fulfillment');
const {Card, Suggestion} = require('dialogflow-fulfillment');
const sgMail = require('#sendgrid/mail');
process.env.DEBUG = 'dialogflow:debug'; // enables lib debugging statements
process.env.SENDGRID_API_KEY = 'SG._APIKEYHERE';
exports.dialogflowFirebaseFulfillment = functions.https.onRequest((request, response) => {
const agent = new WebhookClient({ request, response });
console.log('Dialogflow Request headers: ' + JSON.stringify(request.headers));
console.log('Dialogflow Request body: ' + JSON.stringify(request.body));
function sendEmail(agent) {
sgMail.setApiKey(process.env.SENDGRID_API_KEY);
const emailParam = agent.parameters.email;
const msg = {
to: emailParam,
from: 'tyler#mailfence.com',
subject: 'Just a quick note',
text: 'Just saying Hi ${agent.parameters.given-name} from Dialogflow...',
html: 'Just saying <strong>Hi Hi ${agent.parameters.given-name} from Dialogflow</strong>...',
};
console.log(msg);
sgMail.send(msg);
agent.add(`What a beauty!`);
}
// Run the proper function handler based on the matched Dialogflow intent name
let intentMap = new Map();
intentMap.set('110checklistemail.sendemail', sendEmail);
// intentMap.set('your intent name here', yourFunctionHandler);
// intentMap.set('your intent name here', googleAssistantHandler);
agent.handleRequest(intentMap);
});
Snippet from Diagnostic info:
{
"responseId": "130742d9-7453-41c6-8b27-ee8f91d9d02d-5a74d3f9",
"queryResult": {
"queryText": "Email it to me",
"parameters": {
"email": ""
},
"allRequiredParamsPresent": true,
"fulfillmentText": "What a beauty!",
"fulfillmentMessages": [
{
"text": {
"text": [
"What a beauty!"
]
}
}
],
"outputContexts": [
{
"name": "projects/tyler-vhyo/locations/global/agent/sessions/aef22896f9/contexts/1await_checklist_type",
"lifespanCount": 4,
"parameters": {
"Jobtype.original": "custom shower",
"email.original": "",
"Jobtype": "custom shower",
"email": ""
}
},
{
"name": "projects/tyler-vhyo/locations/global/agent/sessions/aef3d36-18d696f9/contexts/session-vars",
"lifespanCount": 54,
"parameters": {
"last-name.original": "",
"email.original": "",
"email": "",
"given-name.original": "Tim",
"Jobtype": "custom shower",
"Jobtype.original": "custom shower",
"given-name": "Tim",
"last-name": ""
}
I was wondering why it wasn't working unless I prompt for the email in that specific intent, and I realised it's because the email parameters are now all blank (and definitely populated before!) I don't want to reprompt for the email when the user already has to input it at the start of the conversation.
How can I solve this issue and get the email to send off the pre-existing email parameter?
Also, how can I get the ${agent.parameters.given-name} code to reference the name parameter correctly in the email body? It hasn't been working for me and I don't know if there is a better way to pull parameters in the email.
I'm an extreme novice at coding at the code above is something I made following a guide. I really do have no idea what is happening so any advice is very welcome and appreciated. Thank you in advance!
Managed to solve the issue. Steps that I think led to it working:
Added all the parameters to 'action and parameters' of the intent
Checkmarked 'Is list'
Added a default value for each parameter referring to itself from the earlier context in the form #session-vars.parametername
I don't understand 100% why it wouldn't work without the above steps, as the intent shouldn't modify or clear the parameters, but it seems to have done the trick.

Calling OpenWeather API from Watson Assistant: "Direct CloudFunctions call was not successful"

I am trying to use the openweathermap API with Watson Assistant, but I am getting "Webhook call was not successful. Response code is [404]. (and there is 1 more error in the log)."
(I am working from the book by Sabharwal, et al., with my own improvisations for the obsolete elements, like #sys-location.)
I created a Cloud Functions Action called "https://us-south.functions.appdomain.cloud/api/v1/web/my-account-email%40dev/default/Weather-Connection" and checked Enable as Web Action. The action code was imported from the git repo for the book:
let rp = require('request-promise')
function main(params) {
const options = {
uri: "http://api.openweathermap.org/data/2.5/weather?q=" + encodeURIComponent(params.object_of_interest)+ "&units=metric&APPID=19e8588cb3d7d0623e3a5a8ec529232f" ,
json: true
}
return rp(options)
.then(res => {
WeatherReport = "Current Temperature : " +res.main.temp+ ", Pressure : " + res.main.pressure + ", Humidity : " + res.main.humidity + ", temp min : " + res.main.temp_min + " , temp max : " + res.main.temp_max
return { WeatherReport
}
})
}
In the Assistant Options the webhook URI is set to
https://us-south.functions.appdomain.cloud/api/v1/web/my-account-email%40dev/default/Weather-Connection.json.
The "Assistant responds" JSON is
The "Assistant responds" JSON is
{
"output": {
"text": {
"values": [],
"selection_policy": "sequential"
}
},
"actions": [
{
"name": "/my-account-email%40dev/default/Weather-Connection.json",
"type": "cloud_function",
"parameters": {
"object_of_interest": "$location"
},
"credentials": "$credentials",
"result_variable": "$response"
}
],
"context": {
"credentials": {
"api_key": "[my-openweathermap-api-key]"
},
"object_of_interest": "#object_of_interest"
}
}
For debugging, I included a dialog node that displays the value of $location, and it is okay (e.g. "London").
The "Try it out" pane prints {"cloud_functions_call_error":"The requested resource does not exist."} When I click on the Error icon I get a Runtime error pop-up saying, Direct CloudFunctions call was not successful. Http response code is [404]. (and there is 1 more error in the log).
I am not getting any output from running the CLI command ibmcloud fn activation list(I'm not sure that's the right way to check the logs).
I have tested the Weather-Connection function by invoking the Action with parameter {"object_of_interest": "London"}, and it works.
Everything is deployed in the same region (us-south) and namespace.
I can't think of anything else to try.
I just cracked it. I was trying to show the result using the text response is <? $webhook_result_1.response ?> when it should just have been response is <? $webhook_result_1 ?>.

How to handle different languages with Google Actions and DiaglogflowApp with Firebase functions

I have configured multiple languages in my Dialogflow agent. I cannot figure out how to detect the language of the request in my firebase function in order to answer with the right language. Is there a standard approach to handle this? I don't see any function to detect the language in https://github.com/actions-on-google/actions-on-google-nodejs
I would expect to be able to do something like this:
const app = new DialogflowApp({request: request, response: response});
if (app.getLang == 'en') {
\\ Do something in english
}
else if (app.getLang == 'es') {
\\ Do something in spanish
}
There is a public sample on the AoG GitHub for Number Genie, which is in both French and English.
In this sample they define JSON objects for English and French locales:
{
"images": {
"cold": {
"url": "COLD.gif",
"altText": "cold genie",
"cardText": [
"Freezing like an ice cave in Antarctica?",
"I can't feel my face anymore",
"Hurry, before I turn into an icicle"
]
},
...
{
"images": {
"cold": {
"url": "COLD.gif",
"altText": "Génie froid",
"cardText": [
"Je me gèle comme un glaçon en Antartique",
"Je ne sens plus mon visage",
"Dépêchez-vous avant que je ne me transforme en glaçon"
]
},
...
Then there is a central strings.js file which will pull the correct string for that locale.
const i18n = require("i18n");
i18n.configure({
"directory": __dirname + "/locales",
"objectNotation": true,
"fallbacks": {
"fr-FR": "fr",
"fr-CA": "fr"
}
});
const prompts = () => ({
"welcome": {
"visual": {
"elements": [
[i18n.__("variants.greeting"), i18n.__("variants.invocation")],
i18n.__("variants.invocationGuess"),
i18n.__("images.intro")
],
"suggestions": onlyNumberSuggestions
}
},
...
Which is then used to map to each intent:
[Actions.GENERATE_ANSWER] () {
this.data.answer = strings.getRandomNumber(strings.numbers.min,
strings.numbers.max);
this.data.guessCount = 0;
this.data.fallbackCount = 0;
this.data.steamSoundCount = 0;
this.ask(strings.prompts.welcome, strings.numbers.min, strings.numbers.max);
}
The locale is set by getting that from the app.getUserLocale() method:
/**
* Get the Dialogflow intent and handle it using the appropriate method
*/
run () {
strings.setLocale(this.app.getUserLocale());
/** #type {*} */
const map = this;
const action = this.app.getIntent();
console.log(action);
if (!action) {
return this.app.ask(`I didn't hear a number. What's your guess?`);
}
map[action]();
}
There's definitely a lot here, and you don't need to do this exactly the same way. app.getUserLocale() should return the current locale, which you can then use in any way that you want to return the response.

Having an issue with Dialogflow API WebhookResponse V2 for Actions for Google

I'm testing Actions for Google, so I created some simple Sinatra application which looks something like:
require 'sinatra'
require 'json'
post '/google_assistant_api' do
content_type :json
case intent_name
when "input_welcome"
decorated_response
when "Recipe name"
basic_card
end
end
private
def decorated_response
{
source: "test source",
speech: "speech",
display_text: "something"
}.to_json
end
def intent_name
parsed_request["queryResult"]["intent"]["displayName"]
end
def parsed_request
#parsed_request ||= JSON.parse(request.body.read)
end
def basic_card
{
"fulfillmentText": "ACTIONS_ON_GOOGLE",
"fulfillmentMessages": [
{
"platform": "PLATFORM_UNSPECIFIED",
"text": {
"text": [
"string text"
]
},
"image": {
"imageUri": "https://avatars3.githubusercontent.com/u/119195?
s=400&v=4"
},
"basicCard": {
"title": "title string",
"subtitle": "subtitle",
"formattedText": "formatted text",
"image": {
"imageUri": "https://avatars3.githubusercontent.com/u/119195"
},
"buttons": []
}
}
],
"source": "source string"
}.to_json
end
Please note that I'm using V2 of the API and testing using google assistant:
I tried many other response formats based on https://gist.github.com/manniru/f52af230669bd3ed2e69ffe4a76ab309 with no luck. I keep getting:
Sorry! there was no response from the Agent. Please try later.
Is there anyone who tried non nodejs response with luck? I would appreciate any sample response as the simple response seems to be working, however as for the basic card I'm having no luck.
Dialogflow's v2 API uses a different format for webhook requests and responses which is documented here:
Dialogflow v2 Webhook Request
Dialogflow v2 Webhook Response
It appears that your code is using the old format.

How to ask permission in Actions on Google without the SDK?

I would like to know the name of the user, however I cannot use the nodejs sdk since I use another language.
How can I ask for permission?
I would prefer a way with the normal json responses.
I hacked this minimal script to get the JSON reponse which the nodejs sdk would return:
gaction.js:
const DialogflowApp = require('actions-on-google').DialogflowApp;
const app = new DialogflowApp({
request: {
body: {
result: {
action: 'Test',
contexts: []
}
},
get: (h) => h
},
response: {
append: (h, v) => console.log(`${h}: ${v}`),
status: (code) => {
return {send: (resp) => console.log(JSON.stringify(resp, null, 2))}
}
}
});
function testCode(app) {
app.askForPermission('To locate you', app.SupportedPermissions.DEVICE_PRECISE_LOCATION);
}
app.handleRequest(new Map().set('Test', testCode));
I'm still no node.js expert so this might be not an optimal solution. When you have installed node and run the command npm install actions-on-google, this will install the necessary dependencies.
When done you just need to run node gaction which will create this output:
Google-Assistant-API-Version: Google-Assistant-API-Version
Content-Type: application/json
{
"speech": "PLACEHOLDER_FOR_PERMISSION",
"contextOut": [
{
"name": "_actions_on_google_",
"lifespan": 100,
"parameters": {}
}
],
"data": {
"google": {
"expect_user_response": true,
"no_input_prompts": [],
"is_ssml": false,
"system_intent": {
"intent": "assistant.intent.action.PERMISSION",
"spec": {
"permission_value_spec": {
"opt_context": "To locate you",
"permissions": [
"DEVICE_PRECISE_LOCATION"
]
}
}
}
}
}
}
If you send now the JSON above you will be asked from Google Home. Have fun!
The request/response JSON formats for the API.AI webhooks with Actions is documented at https://developers.google.com/actions/apiai/webhook
As you've discovered, the data.google.permissions_request attribute contains two fields regarding the request:
opt_context contains a string which is read to give some context about why you're asking for the information.
permissions is an array of strings specifying what information you're requesting. The strings can have the values
NAME
DEVICE_COARSE_LOCATION
DEVICE_PRECISE_LOCATION
If you are using Java or Kotlin there is an Unofficial SDK. It matches the official SDK api nearly exactly.
https://github.com/TicketmasterMobileStudio/actions-on-google-kotlin