diff --git a/voice_to_text/__manifest__.py b/voice_to_text/__manifest__.py index 3349a6ef6..b74f3a1ec 100644 --- a/voice_to_text/__manifest__.py +++ b/voice_to_text/__manifest__.py @@ -51,6 +51,6 @@ ], }, 'external_dependencies': { - 'python': ['speech_recognition'] + 'python': ['Speechrecognition','pyaudio'] }, } diff --git a/voice_to_text/doc/RELEASE_NOTES.md b/voice_to_text/doc/RELEASE_NOTES.md index ad0936eea..81b04fc0c 100644 --- a/voice_to_text/doc/RELEASE_NOTES.md +++ b/voice_to_text/doc/RELEASE_NOTES.md @@ -5,3 +5,10 @@ #### ADD - Initial Commit for - Voice To Text + + +#### 01.10.2024 +#### Version 16.0.1.0.0 +##### BUGFIX +- Optimized and added notifications for recording. +- Added required external dependencies diff --git a/voice_to_text/models/res_config_settings.py b/voice_to_text/models/res_config_settings.py index f1154f95f..a67abc9cb 100644 --- a/voice_to_text/models/res_config_settings.py +++ b/voice_to_text/models/res_config_settings.py @@ -26,8 +26,9 @@ class ResConfigSettings(models.TransientModel): """Used to add new fields and sets and gets the value for that fields""" _inherit = 'res.config.settings' - select_fastest_methode = fields.Selection( + select_fastest_method = fields.Selection( [('chrome', 'Google Chrome(Fastest Way)'), ('all_browser', 'All Browser(May occur buffering)')], + default='all_browser', string="Select the Method", help="The fastest method", config_parameter='voice_to_text.select_fastest_method') diff --git a/voice_to_text/models/voice_recognition.py b/voice_to_text/models/voice_recognition.py index bd1fdfc49..c17ffc0bc 100644 --- a/voice_to_text/models/voice_recognition.py +++ b/voice_to_text/models/voice_recognition.py @@ -20,7 +20,7 @@ # ############################################################################# import speech_recognition as sr -from odoo import models, _ +from odoo import api,models, _ class VoiceRecognition(models.Model): @@ -31,9 +31,10 @@ class VoiceRecognition(models.Model): def get_the_browser(self): """Used to retrieve the browser/fastest method tht the user choose.""" methode_browser = self.env['ir.config_parameter'].sudo().get_param( - 'voice_to_text.select_fastest_methode') + 'voice_to_text.select_fastest_method') return methode_browser + @api.model def recognize_speech(self): """This is used to recognizes the voice""" r = sr.Recognizer() @@ -45,6 +46,7 @@ class VoiceRecognition(models.Model): except sr.UnknownValueError: return 0 + @api.model def update_field(self, field, model, script, id): """This is used to write the voice text into the field""" if script: diff --git a/voice_to_text/static/src/js/command_palette.js b/voice_to_text/static/src/js/command_palette.js index f6c00df44..45ff3ecf5 100644 --- a/voice_to_text/static/src/js/command_palette.js +++ b/voice_to_text/static/src/js/command_palette.js @@ -1,24 +1,41 @@ /** @odoo-module **/ -import { CommandPalette } from "@web/core/commands/command_palette"; -import { patch } from "@web/core/utils/patch"; +import {CommandPalette} from "@web/core/commands/command_palette"; +import {patch} from "@web/core/utils/patch"; +import {useService} from "@web/core/utils/hooks"; import rpc from 'web.rpc'; + patch(CommandPalette.prototype, '@web/core/commands/command_palette', { setup() { this._super.apply(); + this.notification = useService("notification"); }, //This function is used to recognize the voice async recordVoice(event) { - if (location.href.includes('http:')){ - var response = await rpc.query({ - model: 'voice.recognition', - method: 'recognize_speech', - args: [,], - }) - if (response){ + if (location.href.includes('http:')) { + const closeFun = this.notification.add( + "Recording....", + { + title: "Recording", + type: "success", + sticky: true + }, + ); + setTimeout(() => closeFun(), 15000) + var response = await rpc.query({ + model: 'voice.recognition', + method: 'recognize_speech', + args: [], + }) + if (response) { this.state.searchValue = response - } - else{ - this.state.searchValue = "Your voice could not be recognizing......" + } else { + this.notification.add( + "Couldn't recognize the voice, please try again.", + { + title: "Recording", + type: "info", + }, + ) } } }, diff --git a/voice_to_text/static/src/js/search_bar_voice.js b/voice_to_text/static/src/js/search_bar_voice.js index d481ac2a9..50a83045f 100644 --- a/voice_to_text/static/src/js/search_bar_voice.js +++ b/voice_to_text/static/src/js/search_bar_voice.js @@ -1,45 +1,73 @@ /** @odoo-module **/ -import { SearchBar } from "@web/search/search_bar/search_bar"; -import { patch } from "@web/core/utils/patch"; +import {SearchBar} from "@web/search/search_bar/search_bar"; +import {patch} from "@web/core/utils/patch"; import rpc from 'web.rpc'; -var microphone = false +import {useService} from "@web/core/utils/hooks"; + +const {useEffect, useState} = owl; + patch(SearchBar.prototype, '@web/search/search_bar/search_bar', { setup() { this._super(...arguments); + this.microphone = false; + this.data = useState({ + response: false + }); + this.notification = useService("notification"); + useEffect(() => { + this.onSearchInput(); + }, () => [this.data.response]); }, - //This function is used to recognize the voice in the search bar - async recordVoiceBar(event) { - this.microphone = true - if (location.href.includes('http:')){ - var response = await rpc.query({ - model: 'voice.recognition', - method: 'recognize_speech', - args: [,], - }) - if (response){ - this.response = response - } - else{ - this.response= "False" - var w_response = confirm("can't recognize try again....") + + // Function to recognize the voice in the search bar + async recordVoiceBar() { + this.microphone = true; + + if (location.href.includes('http:')) { + const closeFun = this.notification.add( + "Recording....", + { + title: "Recording", + type: "success", + sticky: true + }, + ); + setTimeout(() => closeFun(), 15000) + try { + const response = await rpc.query({ + model: 'voice.recognition', + method: 'recognize_speech', + args: [], + }); + this.data.response = response || false; + if (!response) { + this.notification.add( + "Couldn't recognize the voice, please try again.", + { + title: "Recording", + type: "info", + }, + ) + } + } catch (error) { + console.error("RPC error: ", error); } } }, + onSearchInput(ev) { - if (this.microphone == true){ - if(this.response != "False"){ - ev.target.value = this.response; - } - else{ - ev.target.value = "Your Voice can't recognize please try again."; - } + let query = ev?.target?.value?.trim(); + + if (this.microphone && this.data.response) { + query = this.data.response; + this.microphone = false; } - const query = ev.target.value - if (query.trim()) { - this.computeState({ query, expanded: [], focusedIndex: 0, subItems: [] }); + + if (query) { + this.computeState({query, expanded: [], focusedIndex: 0, subItems: []}); } else if (this.items.length) { this.resetState(); } } -}) +}); diff --git a/voice_to_text/static/src/js/text_field.js b/voice_to_text/static/src/js/text_field.js index 631a2ed11..548558b29 100644 --- a/voice_to_text/static/src/js/text_field.js +++ b/voice_to_text/static/src/js/text_field.js @@ -1,63 +1,88 @@ /** @odoo-module **/ -import { TextField } from "@web/views/fields/text/text_field"; -import { patch } from "@web/core/utils/patch"; +import {TextField} from "@web/views/fields/text/text_field"; +import {patch} from "@web/core/utils/patch"; +import {useService} from "@web/core/utils/hooks"; import rpc from 'web.rpc'; -patch(TextField.prototype,'@web/views/fields/text/text_field',{ + +patch(TextField.prototype, '@web/views/fields/text/text_field', { setup() { - this._super.apply(); + this._super.apply(); + this.notification = useService("notification"); }, - // This function is used to recognize voice on the text fields - async recordText(ev){ + // This function is used to recognize voice on the text fields + async recordText(ev) { var self = this var browser = await rpc.query({ - model: 'voice.recognition', - method: 'get_the_browser', - args: [,], - }).then(function (data) { - if (data =='chrome'){ + model: 'voice.recognition', + method: 'get_the_browser', + args: [,], + }).then((data) => { + console.log(this.notification); + const closeFun = this.notification.add( + "Recording....", + { + title: "Recording", + type: "success", + sticky: true + }, + ); + setTimeout(() => closeFun(), 15000) + if (data === 'chrome') { let final_transcript = ""; + let interim_transcript = ""; // Define this variable for interim results + if ("webkitSpeechRecognition" in window) { let speechRecognition = new webkitSpeechRecognition(); - if(speechRecognition){ + + if (speechRecognition) { speechRecognition.continuous = true; - navigator.mediaDevices.getUserMedia({ - audio: true}).then( - speechRecognition.start()) - speechRecognition.onresult = (e) => { + + navigator.mediaDevices.getUserMedia({audio: true}).then(() => { + speechRecognition.start(); + }); + + speechRecognition.onresult = (e) => { for (let i = e.resultIndex; i < e.results.length; ++i) { if (e.results[i].isFinal) { final_transcript += e.results[i][0].transcript; } else { - interim_consttranscript += event.results[i][0].transcript; + interim_transcript += e.results[i][0].transcript; } } - if(final_transcript){ - var field = this.__owl__.bdom.parentEl.attributes.name.nodeValue - var model = this.props.record.resModel - var browser = rpc.query({ + + if (final_transcript) { + const field = this.__owl__.bdom.parentEl.attributes.name.nodeValue; + const model = this.props.record.resModel; + const id = this.env.model.__bm_load_params__.res_id; + console.log(id) + console.log(final_transcript) + rpc.query({ model: 'voice.recognition', method: 'update_field', - args: [,field,model,final_transcript], - }) - + args: [field, model, final_transcript,id], + }).then(() => { + this.env.searchModel._notify(); + }); } - - } + }; } - } + } } - else if(data=='all_browser') { - var field = self.__owl__.bdom.parentEl.attributes.name.nodeValue - var model = self.props.record.resModel - var id = self.env.model.__bm_load_params__.res_id - var browser = rpc.query({ - model: 'voice.recognition', - method: 'update_field', - args: [,field,model,false,id], - }) + else if (data === 'all_browser') { + const field = this.__owl__.bdom.parentEl.attributes.name.nodeValue; + const model = this.props.record.resModel; + const id = this.env.model.__bm_load_params__.res_id; + rpc.query({ + model: 'voice.recognition', + method: 'update_field', + args: [field, model, false, id], + }).then(() => { + this.env.searchModel._notify(); + }); } - }) + }); + } }) diff --git a/voice_to_text/static/src/xml/text_field.xml b/voice_to_text/static/src/xml/text_field.xml index f1ed94299..343c1820a 100644 --- a/voice_to_text/static/src/xml/text_field.xml +++ b/voice_to_text/static/src/xml/text_field.xml @@ -4,11 +4,11 @@ -
+
diff --git a/voice_to_text/views/res_config_setting_views.xml b/voice_to_text/views/res_config_setting_views.xml index ba0f86562..3293e9f29 100644 --- a/voice_to_text/views/res_config_setting_views.xml +++ b/voice_to_text/views/res_config_setting_views.xml @@ -15,14 +15,14 @@
-