Storing data "across conversations" in Google Action - actions-on-google

I'm a bit confused by the Google Actions documentation about storing data and hoped someone can help clarify...
The docs state that data in the conv.user.storage object will be saved "across conversations". I took this to mean that if the user exited the conversation these values would be persisted and available the next time they interact with my action. Is that understanding correct?
The reason I ask is that I can't get this behaviour to work in my action.
I have built a simple action fulfilment service (using Actions on Google NodeJS library v2.4.0 and Koa v2.5.3). The fulfilment is triggered from an intent defined in Dialogflow (after an account has been linked with Google Sign In) and stores a value in conversation storage. The code is as follows:
server.js (base server - loads actions dynamically from the local ./actions/ dir)
/* Load the environment */
const dotenv = require('dotenv');
const path = require('path');
const packageJson = require('./package.json');
dotenv.config({
silent: true,
path: process.env.ENV_FILE!=undefined && process.env.ENV_FILE.trim()!='' ? path.normalize(process.env.ENV_FILE) : path.join(__dirname, './.env')
});
const SERVER_NAME = process.env.NAME || packageJson.name;
const SERVER_PORT = process.env.PORT||'8080';
const SERVER_HOST = process.env.HOST||'0.0.0.0';
const HANDLERS_PATH = './actions/';
/* Load the dependencies */
const logger = require('utils-general').logger('google-server');
const Koa = require('koa');
const KoaBody = require('koa-body');
const KoaActionsOnGoogle = require('koa-aog');
const fs = require('fs');
const { dialogflow } = require('actions-on-google');
/* Load and initialise the Google Assistant actions */
//Initialise DialogFlow
const action = dialogflow({ debug: process.env.ACTIONS_DEBUG==='true', clientId: process.env.GOOGLE_CLIENT_ID });
//Load the action intent handlers
const handlers = [];
let handlerFiles = fs.readdirSync(HANDLERS_PATH);
handlerFiles.forEach(function loadHandlers(file) {
let handlerImpl = require(HANDLERS_PATH+file);
let handler = {};
handler[handlerImpl.intent] = handlerImpl.action;
handlers.push(handler);
});
//Add the actions intent handlers to DialogFlow
handlers.forEach(item => {
let key = Object.keys(item)[0];
logger.info(`Adding handler for action intent ${key}`);
action.intent(key, item[key]);
});
/* Create the application server to handle fulfilment requests */
logger.info(`Initialising the ${SERVER_NAME} server (port: ${SERVER_PORT}, host: ${SERVER_HOST})`);
//Create the server
const app = new Koa();
//Add default error handler middleware
app.on('error', function handleAppError(err) {
logger.error(`Unhandled ${err.name||'Error'}: ${err.message || JSON.stringify(err)}`);
});
//Add body parsing middleware
app.use(KoaBody({ jsonLimit: '50kb' }));
//Log the request/ response
app.use(async (ctx, next) => {
logger.trace(`REQUEST ${ctx.method} ${ctx.path} ${JSON.stringify(ctx.request.body)}`);
await next();
logger.trace(`RESPONSE (${ctx.response.status}) ${ctx.response.body ? JSON.stringify(ctx.response.body) : ''}`);
});
//Make the action fulfilment endpoint available on the server
app.use(KoaActionsOnGoogle({ action: action }));
/* Start server on the specified port */
app.listen(SERVER_PORT, SERVER_HOST, function () {
logger.info(`${SERVER_NAME} server started at ${new Date().toISOString()} and listening for requests on port ${SERVER_PORT}`);
});
module.exports = app;
storage-read.js (fulfilment for the "STORAGE_READ" intent - reads stored uuid from conversation storage):
const logger = require('utils-general').logger('google-action-storage-read');
const { SimpleResponse } = require('actions-on-google');
const { getUserId } = require('../utils/assistant-util');
const _get = require('lodash.get');
module.exports = {
intent: 'STORAGE_READ',
action: async function (conv, input) {
logger.debug(`Processing STORAGE_READ intent request: ${JSON.stringify(conv)}`, { traceid: getUserId(conv) });
let storedId = _get(conv, 'user.storage.uuid', undefined);
logger.debug(`User storage UUID is ${storedId}`);
conv.close(new SimpleResponse((storedId!=undefined ? `This conversation contains stored data` : `There is no stored data for this conversation`)));
}
}
storage-write.js (fulfils the "STORAGE_WRITE" intent - writes a UUID to conversation storage):
const logger = require('utils-general').logger('google-action-storage-read');
const { SimpleResponse } = require('actions-on-google');
const { getUserId } = require('../utils/assistant-util');
const _set = require('lodash.set');
const uuid = require('uuid/v4');
module.exports = {
intent: 'STORAGE_WRITE',
action: async function (conv, input) {
logger.debug(`Processing STORAGE_WRITE intent request`, { traceid: getUserId(conv) });
let newId = uuid();
logger.debug(`Writing new UUID to conversation storage: ${newId}`);
_set(conv, 'user.storage.uuid', newId);
conv.close(new SimpleResponse(`OK, I've written a new UUID to conversation storage`));
}
}
This "STORAGE_WRITE" fulfilment stores the data and makes it available between turns in the same conversation (i.e. another intent triggered in the same conversation can read the stored data). However, when the conversation is closed, subsequent (new) conversations with the same user are unable to read the data (i.e. when the "STORAGE_READ" intent is fulfilled) - the conv.user.storage object is always empty.
I have voice match set up on the Google account/ Home Mini I'm using, but I can't see how I determine in the action if the voice is matched (although it seems to be as when I start a new conversation my linked account is used). I'm also getting the same behaviour on the simulator.
Sample request/ responses (when using the simulator) are as follows:
STORAGE_WRITE request:
{
"user": {
"userId": "AB_Hidden_EWVzx3q",
"locale": "en-US",
"lastSeen": "2018-10-18T12:52:01Z",
"idToken": "eyMyHiddenTokenId"
},
"conversation": {
"conversationId": "ABwppHFrP5DIKzykGIfK5mNS42yVzuunzOfFUhyPctG0h0xM8p6u0E9suX8OIvaaGdlYydTl60ih-WJ5kkqV4acS5Zd1OkRJ5pnE",
"type": "NEW"
},
"inputs": [
{
"intent": "actions.intent.MAIN",
"rawInputs": [
{
"inputType": "KEYBOARD",
"query": "ask my pathfinder to write something to conversation storage"
}
],
"arguments": [
{
"name": "trigger_query",
"rawText": "write something to conversation storage",
"textValue": "write something to conversation storage"
}
]
}
],
"surface": {
"capabilities": [
{
"name": "actions.capability.WEB_BROWSER"
},
{
"name": "actions.capability.AUDIO_OUTPUT"
},
{
"name": "actions.capability.SCREEN_OUTPUT"
},
{
"name": "actions.capability.MEDIA_RESPONSE_AUDIO"
}
]
},
"isInSandbox": true,
"availableSurfaces": [
{
"capabilities": [
{
"name": "actions.capability.WEB_BROWSER"
},
{
"name": "actions.capability.AUDIO_OUTPUT"
},
{
"name": "actions.capability.SCREEN_OUTPUT"
}
]
}
],
"requestType": "SIMULATOR"
}
STORAGE_WRITE response:
{
"conversationToken": "[]",
"finalResponse": {
"richResponse": {
"items": [
{
"simpleResponse": {
"textToSpeech": "OK, I've written a new UUID to conversation storage"
}
}
]
}
},
"responseMetadata": {
"status": {
"message": "Success (200)"
},
"queryMatchInfo": {
"queryMatched": true,
"intent": "a7e54fcf-8ff1-4690-a311-e4c6a8d1bfd7"
}
},
"userStorage": "{\"data\":{\"uuid\":\"7dc835fa-0470-4028-b8ed-3374ed65ac7c\"}}"
}
Subsequent STORAGE_READ request:
{
"user": {
"userId": "AB_Hidden_EWVzx3q",
"locale": "en-US",
"lastSeen": "2018-10-18T12:52:47Z",
"idToken": "eyMyHiddenTokenId"
},
"conversation": {
"conversationId": "ABwppHHVvp810VEfa4BhBJPf1NIfKUGzyvw9JCw7kKq9YBd_F8w0VYjJiSuzGLrHcXHGc9pC6ukuMB62XVkzkZOaC24pEbXWLQX5",
"type": "NEW"
},
"inputs": [
{
"intent": "STORAGE_READ",
"rawInputs": [
{
"inputType": "KEYBOARD",
"query": "ask my pathfinder what is in conversation storage"
}
],
"arguments": [
{
"name": "trigger_query",
"rawText": "what is in conversation storage",
"textValue": "what is in conversation storage"
}
]
}
],
"surface": {
"capabilities": [
{
"name": "actions.capability.WEB_BROWSER"
},
{
"name": "actions.capability.AUDIO_OUTPUT"
},
{
"name": "actions.capability.SCREEN_OUTPUT"
},
{
"name": "actions.capability.MEDIA_RESPONSE_AUDIO"
}
]
},
"isInSandbox": true,
"availableSurfaces": [
{
"capabilities": [
{
"name": "actions.capability.WEB_BROWSER"
},
{
"name": "actions.capability.AUDIO_OUTPUT"
},
{
"name": "actions.capability.SCREEN_OUTPUT"
}
]
}
],
"requestType": "SIMULATOR"
}
STORAGE_READ response:
{
"conversationToken": "[]",
"finalResponse": {
"richResponse": {
"items": [
{
"simpleResponse": {
"textToSpeech": "There is no stored data for this conversation"
}
}
]
}
},
"responseMetadata": {
"status": {
"message": "Success (200)"
},
"queryMatchInfo": {
"queryMatched": true,
"intent": "368d08d3-fe0c-4481-aa8e-b0bdfa659eeb"
}
}
}
Can someone set me straighten me out on whether I'm misinterpreting the docs or maybe I have a bug somewhere?
Thanks!

my suspicion is that personal results are turned off in your case.
You mentioned you're testing on Home Mini and Prisoner was able reproduce on device (in the comments).
Shared devices like Smart Speakers (Home, Mini) and Smart Displays have personal results disabled by default. Check this documentation to enable it.
Open Settings on your Android phone
Under "Assistant devices," select your device (e.g. Mini)
Turn Personal results on
Beware that this means personal results like Calendar entries can be accessed through the device.
To check if userStorage will persist, you can use the GUEST/VERIFIED flag, see documentation here.

Related

"How to get username by userID in google assistant action?"

I was connected my chatbot to google assistant action. They give only the userID, how to get username by using this userID?
You can get username without knowing userid, by the permissions document here. You can take a look at this sample code.
Or you can use account linking feature.
Tip! for userID, you can check out this doc
For Python:
There is no official library for developing google action using Python but,
You can add permission intent in possibleIntent array. So your Action SDK JSON will be,
{
"expectUserResponse": true,
"expectedInputs": [
{
"inputPrompt": {
"richInitialPrompt": {
"items": [
{
"simpleResponse": {
"textToSpeech": "PLACEHOLDER"
}
}
]
}
},
"possibleIntents": [
{
"intent": "actions.intent.PERMISSION",
"inputValueData": {
"#type": "type.googleapis.com/google.actions.v2.PermissionValueSpec",
"optContext": "To address you by name and know your location",
"permissions": [
"NAME",
"DEVICE_PRECISE_LOCATION"
]
}
}
]
}
],
"conversationToken": "{\"data\":{}}",
"userStorage": "{\"data\":{}}"
}
{`"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "welcome"
},
"intent": {
"name": "actions.intent.MAIN",
"trigger": {
"queryPatterns":["talk to Mr Bot"]
}
}
},
{
"description": "Rasa Intent",
"name": "TEXT",
"fulfillment": {
"conversationName": "rasa_intent"
},
"intent": {
"name": "actions.intent.TEXT",
"trigger": {
"queryPatterns":[]
}
}
}],
"conversations": {
"welcome": {
"name": "welcome",
"url": "https://ac752bb0.ngrok.io/webhooks/google_home/webhook",
"fulfillmentApiVersion": 2
},
"rasa_intent": {
"name": "rasa_intent",
"url": "https://ac752bb0.ngrok.io/webhooks/google_home/webhook",
"fulfillmentApiVersion": 2
}
} }
this is my action.json,
class GoogleConnector(InputChannel):
#classmethod
def name(cls):
return "google_home"
#def __init__(self):
# self.out_channel = CustomOutput(url, access_token)
def blueprint(self, on_new_message):
google_webhook = Blueprint('google_webhook', __name__)
#google_webhook.route("/", methods=['GET'])
def health():
return jsonify({"status": "ok"})
#google_webhook.route("/webhook", methods=['POST'])
def receive():
payload = json.loads(request.data)
sender_id = payload['user']['userId']
intent = payload['inputs'][0]['intent']
text = payload['inputs'][0]['rawInputs'][0]['query']
if intent == 'actions.intent.MAIN':
message = "<speak>Hello! <break time=\"1\"/> Welcome to the Rasa-powered Google Assistant skill. You can start by saying hi."
else:
out = CollectingOutputChannel()
on_new_message(UserMessage(text, out, sender_id))
responses = [m["text"] for m in out.messages]
message = responses[0]
r = json.dumps(
{
"conversationToken": "{\"state\":null,\"data\":{}}",
"expectUserResponse": 'true',
"expectedInputs": [
{
"inputPrompt": {
"initialPrompts": [
{
"ssml": message
}
]
},
"possibleIntents": [
{
"intent": "actions.intent.TEXT"
}
]
}
]
})
return r
return google_webhook
this my google connector python code,
how to modified this for account signin

google actions: conversation exit using actions sdk does not invoke actions.intent.CANCEL

I am using the actions SDK to build fulfilments. I am using Google Functions for the same. I have the following in the action.json
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "App"
},
"intent": {
"name": "actions.intent.MAIN",
"trigger": {
"queryPatterns": [
. . .
]
}
}
}
],
"conversations": {
"App": {
"name": " ... ",
"url": " ...",
"fulfillmentApiVersion": 2
}
},
"locale": "en"
}
In the function code I notice that the custom intent code of actions.intent.CANCEL is not getting called when the user says/ types exit or Goodbye. In the emulator only the <earcon> appears. The JS code is as below:
app.intent('actions.intent.MAIN', (conv) => {
conv.ask('Welcome to ...');
});
app.intent('actions.intent.TEXT', (conv, input) => {
// the main logic of the application is here
});
app.intent('actions.intent.CANCEL', (conv) => {
conv.close(`Okay, let's try this again later.`);
// this code does not get called
});
Is something to be set in action.json for the cancel intent to work
Yes, you need to add something to your action.json for it to send you the CANCEL Intent. In your existing conversations object, add an inDialogIntents attribute with an array of objects giving the name of the CANCEL Intent. Something like this:
"conversations": {
"App": {
"name": "...",
"url": "...",
"fulfillmentApiVersion": 2
"inDialogIntents": [
{
"name": "actions.intent.CANCEL"
}
]
}
}

How can I trigger a `action.intent.INTENT_NAME` intent from my webhook?

I want to create a chatbot with Dialogflow and Google Assistant along with Google Transactions API for enabling a user to order a chocolate box. For now my agent contains the following four intents:
Default Welcome Intent (text response: Hello, do you want to buy a chocolate box?)
Default Fallback Intent
Int1 (training phrase: Yes, I want, fulfilment: enabled webhook call)
Int2 (event: actions_intent_TRANSACTION_REQUIREMENTS_CHECK )
I am using Dialogflow Json instead of Node.js to connect my agent with Transactions API. I want to test that the user meets the transaction requirements (when ordering the chocolate box) by using the actions.intent.TRANSACTION_REQUIREMENTS_CHECK action of Google actions. For this reason, following Google docs, when Int1 is triggered I am using a webhook which connect Google Assistant to the following python script (back-end):
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
import requests
app = Flask(__name__)
CORS(app)
#app.route("/", methods=['POST'])
def index():
data = request.get_json()
intent = data["queryResult"]["intent"]["displayName"]
if (intent == 'Int1'):
return jsonify({ "data": {
"google": {
"expectUserResponse": True,
"isSsml": False,
"noInputPrompts": [],
"systemIntent": {
"data": {
"#type": "type.googleapis.com/google.actions.v2.TransactionRequirementsCheckSpec",
"paymentOptions": {
"actionProvidedOptions": {
"displayName": "VISA-1234",
"paymentType": "PAYMENT_CARD"
}
}
},
"intent": "actions.intent.TRANSACTION_REQUIREMENTS_CHECK"
}
}
}
})
else:
return jsonify({'message': 'HERE'})
if __name__== "__main__":
app.run(debug=True)
The json which I return above when intent = 'Int1' is the one specified at Google docs for "Checking requirements with your own payment method".
According to Google docs, this must be done next:
Receiving the result of a requirements check
After the Assistant fulfills the intent, it sends your fulfillment a
request with the actions.intent.TRANSACTION_REQUIREMENTS_CHECK intent
with the result of the check.
To properly handle this request, declare a Dialogflow intent that's
triggered by the actions_intent_TRANSACTION_REQUIREMENTS_CHECK event.
For this reason, I defined Int2 and as its event the actions_intent_TRANSACTION_REQUIREMENTS_CHECK.
However, I do not receive anything at my back-end like a result of the check and therefore I do not know if the action actions.intent.TRANSACTION_REQUIREMENTS_CHECK is really triggered. Why is this happening?
In general, how can I trigger one actions.intent.INTENT_NAME intent from my webhook/back-end?
When I am using the v2 version of Dialogflow, I am getting the following info/message about the webhook on Dialogflow when Int1 is triggered:
"webhookStatus": {
"code": 3,
"message": "Webhook call failed. Error: Failed to parse webhook JSON response: Cannot find field: data in message google.cloud.dialogflow.v2.WebhookResponse."
}
In the same case, I am getting the following info/message about the webhook on Google Assistant simulator when Int1 is triggered:
"responseMetadata": {
"status": {
"code": 14,
"message": "Webhook error (206)"
}
Finally, let me mention that I am testing all this with Python and ngrok at my local computer so perhaps this poses a problem because at the beginning of Google docs the following is mentioned:
Warning: The Actions Web Simulator should not be used to test an app
with transactions. Please use an Assistant-enabled Android or iOS
device to accurately test your app during development.
I finally solved this problem.
I had to replace the key "data" in the json which I was sending back when Int1 was triggered with the key "payload". In other words, I had to adjust my fulfilment response to the v2 version of Dialogflow.
Therefore, now I do get a second post request at my back-end which is sent because of the trigger of actions.intent.TRANSACTION_REQUIREMENTS_CHECK and of Int2.
Specifically, I get the following:
{
"responseId": "*****************************",
"queryResult": {
"queryText": "actions_intent_TRANSACTION_REQUIREMENTS_CHECK",
"parameters": {},
"allRequiredParamsPresent": true,
"fulfillmentText": "HERE",
"fulfillmentMessages": [
{
"text": {
"text": [
"HERE"
]
}
}
],
"outputContexts": [
{
"name": "*****************************"
},
{
"name": "*****************************"
},
{
"name": "*****************************"
},
{
"name": "*****************************"
},
{
"name": "*****************************"
},
{
"name": "*****************************",
"parameters": {
"TRANSACTION_REQUIREMENTS_CHECK_RESULT": {
"#type": "type.googleapis.com/google.actions.v2.TransactionRequirementsCheckResult",
"resultType": "OK"
}
}
}
],
"intent": {
"name": "*****************************",
"displayName": "Int2"
},
"intentDetectionConfidence": 1,
"diagnosticInfo": {},
"languageCode": "en-us"
},
"originalDetectIntentRequest": {
"source": "google",
"version": "2",
"payload": {
"isInSandbox": true,
"surface": {
"capabilities": [
{
"name": "actions.capability.WEB_BROWSER"
},
{
"name": "actions.capability.MEDIA_RESPONSE_AUDIO"
},
{
"name": "actions.capability.SCREEN_OUTPUT"
},
{
"name": "actions.capability.AUDIO_OUTPUT"
}
]
},
"inputs": [
{
"rawInputs": [
{
"inputType": "KEYBOARD"
}
],
"arguments": [
{
"extension": {
"#type": "type.googleapis.com/google.actions.v2.TransactionRequirementsCheckResult",
"resultType": "OK"
},
"name": "TRANSACTION_REQUIREMENTS_CHECK_RESULT"
}
],
"intent": "actions.intent.TRANSACTION_REQUIREMENTS_CHECK"
}
],
"user": {
"lastSeen": "2018-05-16T11:15:14Z",
"locale": "en-US",
"userId": "*****************************"
},
"conversation": {
"conversationId": "1526470000479",
"type": "ACTIVE",
"conversationToken": "[]"
},
"availableSurfaces": [
{
"capabilities": [
{
"name": "actions.capability.SCREEN_OUTPUT"
},
{
"name": "actions.capability.AUDIO_OUTPUT"
}
]
}
]
}
},
"session": "*****************************"
}
I think your response object is incorrect. the intent attribute should be inside the systemIntent object
"data": {
"google": {
"expectUserResponse": true,
"isSsml": false,
"noInputPrompts": [],
"systemIntent": {
"intent": "actions.intent.TRANSACTION_REQUIREMENTS_CHECK",
"data": {
"#type": "type.googleapis.com/google.actions.v2.TransactionRequirementsCheckSpec",
"paymentOptions": {
"actionProvidedOptions": {
"displayName": "VISA-1234",
"paymentType": "PAYMENT_CARD"
}
}
}
}
}
}

Actions-On-Google[Permission Intent] Get User Location / Name

const agent = new WebhookClient({request, response});
const {WebhookClient} = require('dialogflow-fulfillment');
const {Text, Card, Image, Suggestion, Payload} = require('dialogflow-fulfillment');
let payload = {
"systemIntent": {
"intent": "actions.intent.PERMISSION",
"data": {
"#type": "type.googleapis.com/google.actions.v2.PermissionValueSpec",
"optContext": "To deliver your order",
"permissions": [
"NAME",
"DEVICE_PRECISE_LOCATION"
]
}
}
};
agent.add('PLACEHOLDER_FOR_PERMISSION');
agent.add(new Payload(PLATFORMS.ACTIONS_ON_GOOGLE, payload));
Simple payload to get UserLocation and Name using the PERMISSION intent
The response to the above I get it
To deliver your order, I'll need to get your name and street address
from Google. Is that ok?
Follow intent to this intent is also set up with event actions_intent_PERMISSION in it
I have been trying to solve this for 2 days by trying to fire actions_intent_PERMISSION using suggestion chips etc but nothing happens post this ?
Where am I going wrong I am not able to comprehend. There is some silly mistake somewhere - please if someone can point it out - would help a lot.
Thanks
===========EDIT============IMAGES ATTACHED FOR THE INTENTS============
permissions intent
permissions response with event actions_intent_PERMISSION
edit: cant embed images because of points. above links are there. thanks
=================================request-response json===================
when the intent permissions is triggered below is the request
{
"responseId": "54a4be35-3d0b-4cc8-b036-46fab0d09361",
"queryResult": {
"queryText": "permissions",
"action": "permissions",
"parameters": {},
"allRequiredParamsPresent": true,
"fulfillmentMessages": [
{
"text": {
"text": [
""
]
}
}
],
"intent": {
"name": "projects/projectid/agent/intents/95237653-0af0-4d0c-9101-0cd8ee0db186",
"displayName": "permissions"
},
"intentDetectionConfidence": 1,
"diagnosticInfo": {},
"languageCode": "en"
},
"originalDetectIntentRequest": {
"payload": {}
},
"session": "projects/projectid/agent/sessions/13213e7f-dba5-4d0c-979a-f626f7ac4691"
}
fulfillment response
{
"conversationToken": "[]",
"expectUserResponse": true,
"expectedInputs": [
{
"inputPrompt": {},
"possibleIntents": [
{
"intent": "actions.intent.PERMISSION",
"inputValueData": {
"#type": "type.googleapis.com/google.actions.v2.PermissionValueSpec",
"permissions": [
"NAME",
"DEVICE_PRECISE_LOCATION"
],
"optContext": "To locate you"
}
}
],
"speechBiasingHints": [
"$name-type",
"$sports",
"$gender"
]
}
],
"responseMetadata": {
"status": {
"message": "Success (200)"
},
"queryMatchInfo": {
"queryMatched": true,
"intent": "95237653-0af0-4d0c-9101-0cd8ee0db186"
}
}
}
The response from simulator for permissions intent
=======================================================================
Issue has been resolved
Points to note:
Actions-on-Google simulator works very weirdly to be trusted upon
whether your webhook is working or not
Promise resolution was an
issue - agent was waiting for a promise to be resolved before that
it was getting passed to next time
Correct way to test your bot
is to publish your bot in ALPHA on the assistant directory rather
than testing on the Simulator because it is very unstable in terms
of you can never predict its behavior. Will never tell you the
correct error to be debugged and will abruptly stop working for no
reason whatsoever

I can't trigger my second action on google assistant

I have been playing around with the actions sdk and it seems to work but only for my main intent. I added a second intent and it never triggers.
Here is my action.json:
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "conversation_1"
},
"intent": {
"name": "actions.intent.MAIN"
}
},
{
"name": "add",
"intent": {
"name": "myintent.ADD",
"parameters": [
{
"name": "somenumber",
"type": "SchemaOrg_Number"
}
],
"trigger": {
"queryPatterns": [
"add $SchemaOrg_Number:somenumber",
"add"
]
}
},
"fulfillment": {
"conversationName": "add"
}
}
],
"conversations": {
"conversation_1": {
"name": "conversation_1",
"url": "https://myaddress/sayNumber",
"fulfillmentApiVersion": 2
},
"add": {
"name": "add",
"url": "https://myaddress/sayNumber",
"fulfillmentApiVersion": 2
}
}
}
And here is my index.js:
'use strict';
process.env.DEBUG = 'actions-on-google:*';
const ActionsSdkApp = require('actions-on-google').ActionsSdkApp;
const functions = require('firebase-functions');
const NO_INPUTS = [
'I didn\'t hear that.',
'If you\'re still there, say that again.',
'We can stop here. See you soon.'
];
exports.sayNumber = functions.https.onRequest((request, response) => {
const app = new ActionsSdkApp({request, response});
function mainIntent (app) {
console.log('mainIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can read out an ordinal like ' +
'<say-as interpret-as="ordinal">123</say-as>. Say a number.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function addIntent (app) {
console.log('addIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can add.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function rawInput (app) {
console.log('rawInput');
if (app.getRawInput() === 'bye') {
app.tell('Goodbye!');
} else {
let inputPrompt = app.buildInputPrompt(true, '<speak>You said, <say-as interpret-as="ordinal">' +
app.getRawInput() + '</say-as>'+app.getIntent()+'</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
}
let actionMap = new Map();
actionMap.set(app.StandardIntents.MAIN, mainIntent);
actionMap.set(app.StandardIntents.TEXT, rawInput);
actionMap.set("myintent.ADD", addIntent);
app.handleRequest(actionMap);
});
I can say talk to my action name and then everything I say after that gets handled as raw input even if I use the add keywords. What am I doing wrong?
That is correct. The actions.json package only defines how users can start a conversation with your Action. Once the conversation has started, you are passed TEXT (or OPTION) intents and you are expected to handle the natural language processing yourself. Additional intents can be used for speech biasing, but aren't used to parse the response.
This is different than how some other voice agents handle language parsing. The Actions SDK is primarily intended if you have your own NLP already.
If you don't, you are probably better off using something like Dialogflow or Converse.AI.