I'm following the sample code for Action on Google responses at the following link:
https://developers.google.com/actions/assistant/responses
I want the list response to appear when the user initiates the text intent, but all I get is "Your App isn’t responding right now. Try again soon." Here is the code that I'm using (it's copy and paste for the most part from the link):
function textIntent(app) {
app.askWithList(app.buildRichResponse()
.addSimpleResponse('Alright')
.addSuggestions(
['Basic Card', 'List', 'Carousel', 'Suggestions']),
// Build a list
app.buildList('Things to learn about')
// Add the first item to the list
.addItems(app.buildOptionItem('MATH_AND_PRIME',
['math', 'math and prime', 'prime numbers', 'prime'])
.setTitle('Math & prime numbers')
.setDescription('42 is an abundant number because the sum of its ' +
'proper divisors 54 is greater…')
)
// Add the second item to the list
.addItems(app.buildOptionItem('EGYPT',
['religion', 'egypt', 'ancient egyptian'])
.setTitle('Ancient Egyptian religion')
.setDescription('42 gods who ruled on the fate of the dead in the ' +
'afterworld. Throughout the under…')
)
// Add third item to the list
.addItems(app.buildOptionItem('RECIPES',
['recipes', 'recipe', '42 recipes'])
.setTitle('42 recipes with 42 ingredients')
.setDescription('Here\'s a beautifully simple recipe that\'s full ' +
'of flavor! All you need is some ginger and…')
)
);
}
let actionMap = new Map();
actionMap.set(app.StandardIntents.MAIN, mainIntent);
actionMap.set(app.StandardIntents.TEXT, textIntent);
app.handleRequest(actionMap);
Here is my action.json:
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "welcome"
},
"intent": {
"name": "actions.intent.MAIN"
}
},
],
"conversations": {
"welcome": {
"name": "welcome",
"url": "https://example.com"
},
}
}
Any help would be much appreciated! Thank you in advance.
You're using Actions version 2 features, but the actions.json package isn't specifying a version, so it defaults to version 1.
The "conversations" section of actions.json should look something like:
"conversations": {
"welcome": {
"name": "welcome",
"url": "https://example.com",
"fulfillmentApiVersion": 2
},
}
Related
I am using the actions SDK to build fulfilments. I am using Google Functions for the same. I have the following in the action.json
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "App"
},
"intent": {
"name": "actions.intent.MAIN",
"trigger": {
"queryPatterns": [
. . .
]
}
}
}
],
"conversations": {
"App": {
"name": " ... ",
"url": " ...",
"fulfillmentApiVersion": 2
}
},
"locale": "en"
}
In the function code I notice that the custom intent code of actions.intent.CANCEL is not getting called when the user says/ types exit or Goodbye. In the emulator only the <earcon> appears. The JS code is as below:
app.intent('actions.intent.MAIN', (conv) => {
conv.ask('Welcome to ...');
});
app.intent('actions.intent.TEXT', (conv, input) => {
// the main logic of the application is here
});
app.intent('actions.intent.CANCEL', (conv) => {
conv.close(`Okay, let's try this again later.`);
// this code does not get called
});
Is something to be set in action.json for the cancel intent to work
Yes, you need to add something to your action.json for it to send you the CANCEL Intent. In your existing conversations object, add an inDialogIntents attribute with an array of objects giving the name of the CANCEL Intent. Something like this:
"conversations": {
"App": {
"name": "...",
"url": "...",
"fulfillmentApiVersion": 2
"inDialogIntents": [
{
"name": "actions.intent.CANCEL"
}
]
}
}
I decided to upgrade my Google Assistant action to use "dialogFlow V2 API" and my webhook returns an object like this
{
"fulfillmentText": "Testing",
"fulfillmentMessages": [
{
"text": {
"text": [
"fulfillmentMessages text attribute"
]
}
}
],
"payload": {
"google": {
"richResponse": {
"items": [
{
"mediaResponse": {
"mediaType": "AUDIO",
"mediaObjects": [
{
"name": "mediaResponse name",
"description": "mediaResponse description",
"largeImage": {
"url": "https://.../640x480.jpg"
},
"contentUrl": "https://.../20183832714.mp3"
}
]
},
"simpleResponse": {
"textToSpeech": "simpleResponse: testing",
"ssml": "simpleResponse: ssml",
"displayText": "simpleResponse displayText"
}
}
]
}
}
},
"source": "webhook-play-sample"
}
But I get an error message saying my action it is not available, is mediaResponse supported by V2?, should I format my object differently?, also, when I remove "mediaResponse" object works just fine and the assistant will speak the simpleResponse part.
This action was re-created this Mid March 2018 and I read about May deadline and that is why I decide to upgrade to V2, do you think I should go back to V1, I know I will have to delete it and re-created but that is fine. This is a link to the JSON object I see in the debug tab. Thanks once again
I set "API V2" in my action dialogFlow console, this is a screenshot of that setting
Here is an screenshoot of my action's integration -> Google Assistant
Thanks Allen, Yes I do have "expectUserResponse": false, I added the suggestion object you recommended but, unfortunately nothing changed, I am still getting this error
Simulator debug tag details
First of all - this is not a problem with Dialogflow V2. You also seem to be confusing the sunset of Actions on Google V1 with the release of Dialogflow V2 - they are two different creatures completely. If your project was using AoG V1, there would be a setting on the Actions integration screen, and thee isn't.
It is fine if you want to move to Dialogflow V2, but it isn't required. Media definitely works under Dialogflow V2.
The array of items must include a simpleResponse item first, before any of the other items in the RichResponse. (You also shouldn't include both ssml and textToSpeech - just one of them.) You also don't need the fulfillmentText and fulfillmentMessages components, since those are provided by the richResponse.
You also need to include suggestions chips unless you have set expectUserResponse to false. Somewhere in the simulator debug is probably a block that says
{
"name": "MalformedResponse",
"debugInfo": "expected_inputs[0].input_prompt.rich_initial_prompt: Suggestions must be provided if media_response is used..",
"subDebugEntryList": []
}
So something more like this should work:
{
"payload": {
"google": {
"richResponse": {
"items": [
{
"simpleResponse": {
"textToSpeech": "simpleResponse: testing",
"displayText": "simpleResponse displayText"
},
"mediaResponse": {
"mediaType": "AUDIO",
"mediaObjects": [
{
"name": "mediaResponse name",
"description": "mediaResponse description",
"largeImage": {
"url": "https://.../640x480.jpg"
},
"contentUrl": "https://.../20183832714.mp3"
}
]
}
}
]
"suggestions": [
{
"title": "This"
},
{
"title": "That"
}
]
}
}
},
"source": "webhook-play-sample"
}
I am using Actions SDK and I have different configurations to get the simulator to invoke my custom intent! It seems that the simulator refuses to trigger any action other than MAIN, not even TEXT. Below is my action.json:
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "SHOPPING"
},
"intent": {
"name": "actions.intent.MAIN",
"trigger": {
"queryPatterns": [
"Talk to stroller shopping expert"
]
}
}
},
{
"description": "Listing strollers for a specified age group",
"name": "SHOPPING",
"fulfillment": {
"conversationName": "SHOPPING"
},
"intent": {
"name": "SHOPPING",
"trigger": {
"queryPatterns": [
"I am looking for a jogging stroller",
"I am shopping for a jogging stroller"
]
}
}
}
],
"conversations": {
"SHOPPING": {
"name": "SHOPPING",
"url": "SOME_URL (I have a valid URL BTW)",
"fulfillmentApiVersion": 2
}
}
}
I am using firebase and in the firebase log I cannot see any logs from my custom or TEXT intent. Here are part of my index.json code:
'use strict';
process.env.DEBUG = 'actions-on-google:*';
const ActionsSdkApp = require('actions-on-google').ActionsSdkApp;
const functions = require('firebase-functions');
const NO_INPUTS = [
'Padon me, I didn\'t hear that.',
'If you\'re still there, would you please say that again.',
'We can stop here. Good luck with your shopping.'
];
const SHOPPING_INTENT = 'SHOPPING';
exports.shopStrollers = functions.https.onRequest((request, response) => {
const app = new ActionsSdkApp({request, response});
function handleMainInput(app) {
console.log('mainIntent is invoked!');
console.log("The input is %s", app.getRawInput());
console.log("It seems that %s is never invoked!", app.StandardIntents.TEXT)
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can help with finding strollers. How old is your baby?</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function handleTextInput(app) {
console.log('TEXT is invoked!');
console.log("The input is %s", app.getRawInput());
console.log("Finally TEXT HANDLER got invoked")
if (app.getRawInput() === 'bye') {
app.tell('Hope you found the service helpful and best of luck with your shopping, please come back again, goodbye!');
} else {
let inputPrompt = app.buildInputPrompt(true, '<speak>Here is a list of top' +
' <say-as interpret-as="ordinal">10</say-as>strollers' +
', say next for the next batch</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
}
let actionMap = new Map();
actionMap.set(SHOPPING_INTENT, handleTextInput);
actionMap.set(app.StandardIntents.MAIN, handleMainInput);
actionMap.set(app.StandardIntents.TEXT, handleTextInput);
app.handleRequest(actionMap);
});
Does anybody have a clue what might be wrong, I would appreciate any help.
Problem solved. I had to enable web history.
I was using a business email and I had to go through a convoluted configuration process to "turn on web history"! Now everything works fine through ngrok and my express app. I would imagine it should work on firebase as well. So, action.json and index.js were good, the issue was with incomplete configuration for the business email!
Here is the steps I followed: https://productforums.google.com/forum/#!msg/apps/-52VibOcvrY/wUow1QOJ3VQJ
I have the problem that the google home/assitant action console simulators response and request tab are not working. At least when I am using a custom action.json.
For me I am not sure if all have this problem or only some. That are using an custom action sdk. Or is it a problem only because something of my action.json is maybe not 100% correct configured.
here is the action.json:
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "testApp"
},
"intent": {
"name": "actions.intent.MAIN",
"trigger": {
"queryPatterns": [
"open special manager",
"open s p m"
]
}
}
}
],
"types": [],
"conversations": {
"testApp": {
"name": "testApp",
"url": "https://572e66a2.ngrok.io/",
"fulfillmentApiVersion": 2,
"in_dialog_intents": [
{
"name": "actions.intent.NO_INPUT"
},
{
"name": "actions.intent.SIGN_IN"
}
]
}
}
here is a picture of the request:
As you can see it is only the dummy content in request tab anyway if the chat is working.
The response tab is completly empty. But the messages and voice is correctly working. Also on my google home.
Does anybody have an idea? I will add of course more debug informations if necessary. Can it be trouble with the response or request messages from my server?
But actually the messages, they are working...
I have been playing around with the actions sdk and it seems to work but only for my main intent. I added a second intent and it never triggers.
Here is my action.json:
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "conversation_1"
},
"intent": {
"name": "actions.intent.MAIN"
}
},
{
"name": "add",
"intent": {
"name": "myintent.ADD",
"parameters": [
{
"name": "somenumber",
"type": "SchemaOrg_Number"
}
],
"trigger": {
"queryPatterns": [
"add $SchemaOrg_Number:somenumber",
"add"
]
}
},
"fulfillment": {
"conversationName": "add"
}
}
],
"conversations": {
"conversation_1": {
"name": "conversation_1",
"url": "https://myaddress/sayNumber",
"fulfillmentApiVersion": 2
},
"add": {
"name": "add",
"url": "https://myaddress/sayNumber",
"fulfillmentApiVersion": 2
}
}
}
And here is my index.js:
'use strict';
process.env.DEBUG = 'actions-on-google:*';
const ActionsSdkApp = require('actions-on-google').ActionsSdkApp;
const functions = require('firebase-functions');
const NO_INPUTS = [
'I didn\'t hear that.',
'If you\'re still there, say that again.',
'We can stop here. See you soon.'
];
exports.sayNumber = functions.https.onRequest((request, response) => {
const app = new ActionsSdkApp({request, response});
function mainIntent (app) {
console.log('mainIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can read out an ordinal like ' +
'<say-as interpret-as="ordinal">123</say-as>. Say a number.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function addIntent (app) {
console.log('addIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can add.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function rawInput (app) {
console.log('rawInput');
if (app.getRawInput() === 'bye') {
app.tell('Goodbye!');
} else {
let inputPrompt = app.buildInputPrompt(true, '<speak>You said, <say-as interpret-as="ordinal">' +
app.getRawInput() + '</say-as>'+app.getIntent()+'</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
}
let actionMap = new Map();
actionMap.set(app.StandardIntents.MAIN, mainIntent);
actionMap.set(app.StandardIntents.TEXT, rawInput);
actionMap.set("myintent.ADD", addIntent);
app.handleRequest(actionMap);
});
I can say talk to my action name and then everything I say after that gets handled as raw input even if I use the add keywords. What am I doing wrong?
That is correct. The actions.json package only defines how users can start a conversation with your Action. Once the conversation has started, you are passed TEXT (or OPTION) intents and you are expected to handle the natural language processing yourself. Additional intents can be used for speech biasing, but aren't used to parse the response.
This is different than how some other voice agents handle language parsing. The Actions SDK is primarily intended if you have your own NLP already.
If you don't, you are probably better off using something like Dialogflow or Converse.AI.