diff --git a/dialogflow/detect-intent-TTS-response.v2.js b/dialogflow/detect-intent-TTS-response.v2.js new file mode 100644 index 00000000000..f7a66175d9c --- /dev/null +++ b/dialogflow/detect-intent-TTS-response.v2.js @@ -0,0 +1,70 @@ +/** + * Copyright 2018, Google, LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; +async function main( + projectId = 'YOUR_PROJECT_ID', + sessionId = 'YOUR_SESSION_ID', + query = 'YOUR_QUERY', + languageCode = 'YOUR_LANGUAGE_CODE', + outputFile = 'YOUR_OUTPUT_FILE' +) { + // [START dialogflow_detect_intent_with_texttospeech_response] + // Imports the Dialogflow client library + const dialogflow = require('dialogflow').v2; + + // Instantiate a DialogFlow client. + const sessionClient = new dialogflow.SessionsClient(); + + /** + * TODO(developer): Uncomment the following lines before running the sample. + */ + // const projectId = 'ID of GCP project associated with your Dialogflow agent'; + // const sessionId = `user specific ID of session, e.g. 12345`; + // const query = `phrase(s) to pass to detect, e.g. I'd like to reserve a room for six people`; + // const languageCode = 'BCP-47 language code, e.g. en-US'; + // const outputFile = `path for audio output file, e.g. ./resources/myOutput.wav`; + + // Define session path + const sessionPath = sessionClient.sessionPath(projectId, sessionId); + const fs = require(`fs`); + const util = require(`util`); + + async function detectIntentwithTTSResponse() { + // The audio query request + const request = { + session: sessionPath, + queryInput: { + text: { + text: query, + languageCode: languageCode, + }, + }, + outputAudioConfig: { + audioEncoding: `OUTPUT_AUDIO_ENCODING_LINEAR_16`, + }, + }; + sessionClient.detectIntent(request).then(responses => { + console.log('Detected intent:'); + const audioFile = responses[0].outputAudio; + util.promisify(fs.writeFile)(outputFile, audioFile, 'binary'); + console.log(`Audio content written to file: ${outputFile}`); + }); + } + detectIntentwithTTSResponse(); + // [END dialogflow_detect_intent_with_texttospeech_response] +} + +main(...process.argv.slice(2)); diff --git a/dialogflow/detect-intent-sentiment.v2.js b/dialogflow/detect-intent-sentiment.v2.js new file mode 100644 index 00000000000..ad9eaf1ec36 --- /dev/null +++ b/dialogflow/detect-intent-sentiment.v2.js @@ -0,0 +1,86 @@ +/** + * Copyright 2018, Google, LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; +async function main( + projectId = 'YOUR_PROJECT_ID', + sessionId = 'YOUR_SESSION_ID', + query = 'YOUR_QUERY', + languageCode = 'YOUR_LANGUAGE_CODE' +) { + // [START dialogflow_detect_intent_with_sentiment_analysis] + // Imports the Dialogflow client library + const dialogflow = require('dialogflow').v2; + + // Instantiate a DialogFlow client. + const sessionClient = new dialogflow.SessionsClient(); + + /** + * TODO(developer): Uncomment the following lines before running the sample. + */ + // const projectId = 'ID of GCP project associated with your Dialogflow agent'; + // const sessionId = `user specific ID of session, e.g. 12345`; + // const query = `phrase(s) to pass to detect, e.g. I'd like to reserve a room for six people`; + // const languageCode = 'BCP-47 language code, e.g. en-US'; + + // Define session path + const sessionPath = sessionClient.sessionPath(projectId, sessionId); + + async function detectIntentandSentiment() { + // The text query request. + const request = { + session: sessionPath, + queryInput: { + text: { + text: query, + languageCode: languageCode, + }, + }, + queryParams: { + sentimentAnalysisRequestConfig: { + analyzeQueryTextSentiment: true, + }, + }, + }; + + // Send request and log result + const responses = await sessionClient.detectIntent(request); + console.log('Detected intent'); + const result = responses[0].queryResult; + console.log(` Query: ${result.queryText}`); + console.log(` Response: ${result.fulfillmentText}`); + if (result.intent) { + console.log(` Intent: ${result.intent.displayName}`); + } else { + console.log(` No intent matched.`); + } + if (result.sentimentAnalysisResult) { + console.log(`Detected sentiment`); + console.log( + ` Score: ${result.sentimentAnalysisResult.queryTextSentiment.score}` + ); + console.log( + ` Magnitude: ${ + result.sentimentAnalysisResult.queryTextSentiment.magnitude + }` + ); + } else { + console.log(`No sentiment Analysis Found`); + } + // [END dialogflow_detect_intent_with_sentiment_analysis] + } + detectIntentandSentiment(); +} +main(...process.argv.slice(2)); diff --git a/dialogflow/system-test/detect.test.js b/dialogflow/system-test/detect.test.js index 817dc28a577..d3a30ac43aa 100644 --- a/dialogflow/system-test/detect.test.js +++ b/dialogflow/system-test/detect.test.js @@ -20,7 +20,12 @@ const {assert} = require('chai'); const execa = require('execa'); const cmd = 'node detect.js'; +const cmd_tts = 'node detect-intent-TTS-response.v2.js'; +const cmd_sentiment = 'node detect-intent-sentiment.v2.js'; const cwd = path.join(__dirname, '..'); +const projectId = + process.env.GCLOUD_PROJECT || process.env.GOOGLE_CLOUD_PROJECT; +const testQuery = 'Where is my data stored?'; const audioFilepathBookARoom = path .join(__dirname, '../resources/book_a_room.wav') @@ -52,4 +57,23 @@ describe('basic detection', () => { ); assert.include(stdout, 'Detected intent'); }); + + it('should detect Intent with Text to Speech Response', async () => { + const {stdout} = await execa.shell( + `${cmd_tts} ${projectId} 'SESSION_ID' '${testQuery}' 'en-US' './resources/output.wav'`, + {cwd} + ); + assert.include( + stdout, + 'Audio content written to file: ./resources/output.wav' + ); + }); + + it('should detect sentiment with intent', async () => { + const {stdout} = await execa.shell( + `${cmd_sentiment} ${projectId} 'SESSION_ID' '${testQuery}' 'en-US'`, + {cwd} + ); + assert.include(stdout, 'Detected sentiment'); + }); });