Browse Source

Oct 03: [FIX] Bug fixed 'voice_to_text'

pull/337/head
Cybrosys Technologies 7 months ago
parent
commit
603ccc0709
  1. 2
      voice_to_text/__manifest__.py
  2. 7
      voice_to_text/doc/RELEASE_NOTES.md
  3. 3
      voice_to_text/models/res_config_settings.py
  4. 6
      voice_to_text/models/voice_recognition.py
  5. 41
      voice_to_text/static/src/js/command_palette.js
  6. 86
      voice_to_text/static/src/js/search_bar_voice.js
  7. 99
      voice_to_text/static/src/js/text_field.js
  8. 4
      voice_to_text/static/src/xml/text_field.xml
  9. 4
      voice_to_text/views/res_config_setting_views.xml

2
voice_to_text/__manifest__.py

@ -51,6 +51,6 @@
], ],
}, },
'external_dependencies': { 'external_dependencies': {
'python': ['speech_recognition'] 'python': ['Speechrecognition','pyaudio']
}, },
} }

7
voice_to_text/doc/RELEASE_NOTES.md

@ -5,3 +5,10 @@
#### ADD #### ADD
- Initial Commit for - Voice To Text - Initial Commit for - Voice To Text
#### 01.10.2024
#### Version 16.0.1.0.0
##### BUGFIX
- Optimized and added notifications for recording.
- Added required external dependencies

3
voice_to_text/models/res_config_settings.py

@ -26,8 +26,9 @@ class ResConfigSettings(models.TransientModel):
"""Used to add new fields and sets and gets the value for that fields""" """Used to add new fields and sets and gets the value for that fields"""
_inherit = 'res.config.settings' _inherit = 'res.config.settings'
select_fastest_methode = fields.Selection( select_fastest_method = fields.Selection(
[('chrome', 'Google Chrome(Fastest Way)'), [('chrome', 'Google Chrome(Fastest Way)'),
('all_browser', 'All Browser(May occur buffering)')], ('all_browser', 'All Browser(May occur buffering)')],
default='all_browser',
string="Select the Method", help="The fastest method", string="Select the Method", help="The fastest method",
config_parameter='voice_to_text.select_fastest_method') config_parameter='voice_to_text.select_fastest_method')

6
voice_to_text/models/voice_recognition.py

@ -20,7 +20,7 @@
# #
############################################################################# #############################################################################
import speech_recognition as sr import speech_recognition as sr
from odoo import models, _ from odoo import api,models, _
class VoiceRecognition(models.Model): class VoiceRecognition(models.Model):
@ -31,9 +31,10 @@ class VoiceRecognition(models.Model):
def get_the_browser(self): def get_the_browser(self):
"""Used to retrieve the browser/fastest method tht the user choose.""" """Used to retrieve the browser/fastest method tht the user choose."""
methode_browser = self.env['ir.config_parameter'].sudo().get_param( methode_browser = self.env['ir.config_parameter'].sudo().get_param(
'voice_to_text.select_fastest_methode') 'voice_to_text.select_fastest_method')
return methode_browser return methode_browser
@api.model
def recognize_speech(self): def recognize_speech(self):
"""This is used to recognizes the voice""" """This is used to recognizes the voice"""
r = sr.Recognizer() r = sr.Recognizer()
@ -45,6 +46,7 @@ class VoiceRecognition(models.Model):
except sr.UnknownValueError: except sr.UnknownValueError:
return 0 return 0
@api.model
def update_field(self, field, model, script, id): def update_field(self, field, model, script, id):
"""This is used to write the voice text into the field""" """This is used to write the voice text into the field"""
if script: if script:

41
voice_to_text/static/src/js/command_palette.js

@ -1,24 +1,41 @@
/** @odoo-module **/ /** @odoo-module **/
import { CommandPalette } from "@web/core/commands/command_palette"; import {CommandPalette} from "@web/core/commands/command_palette";
import { patch } from "@web/core/utils/patch"; import {patch} from "@web/core/utils/patch";
import {useService} from "@web/core/utils/hooks";
import rpc from 'web.rpc'; import rpc from 'web.rpc';
patch(CommandPalette.prototype, '@web/core/commands/command_palette', { patch(CommandPalette.prototype, '@web/core/commands/command_palette', {
setup() { setup() {
this._super.apply(); this._super.apply();
this.notification = useService("notification");
}, },
//This function is used to recognize the voice //This function is used to recognize the voice
async recordVoice(event) { async recordVoice(event) {
if (location.href.includes('http:')){ if (location.href.includes('http:')) {
var response = await rpc.query({ const closeFun = this.notification.add(
model: 'voice.recognition', "Recording....",
method: 'recognize_speech', {
args: [,], title: "Recording",
}) type: "success",
if (response){ sticky: true
},
);
setTimeout(() => closeFun(), 15000)
var response = await rpc.query({
model: 'voice.recognition',
method: 'recognize_speech',
args: [],
})
if (response) {
this.state.searchValue = response this.state.searchValue = response
} } else {
else{ this.notification.add(
this.state.searchValue = "Your voice could not be recognizing......" "Couldn't recognize the voice, please try again.",
{
title: "Recording",
type: "info",
},
)
} }
} }
}, },

86
voice_to_text/static/src/js/search_bar_voice.js

@ -1,45 +1,73 @@
/** @odoo-module **/ /** @odoo-module **/
import { SearchBar } from "@web/search/search_bar/search_bar"; import {SearchBar} from "@web/search/search_bar/search_bar";
import { patch } from "@web/core/utils/patch"; import {patch} from "@web/core/utils/patch";
import rpc from 'web.rpc'; import rpc from 'web.rpc';
var microphone = false import {useService} from "@web/core/utils/hooks";
const {useEffect, useState} = owl;
patch(SearchBar.prototype, '@web/search/search_bar/search_bar', { patch(SearchBar.prototype, '@web/search/search_bar/search_bar', {
setup() { setup() {
this._super(...arguments); this._super(...arguments);
this.microphone = false;
this.data = useState({
response: false
});
this.notification = useService("notification");
useEffect(() => {
this.onSearchInput();
}, () => [this.data.response]);
}, },
//This function is used to recognize the voice in the search bar
async recordVoiceBar(event) { // Function to recognize the voice in the search bar
this.microphone = true async recordVoiceBar() {
if (location.href.includes('http:')){ this.microphone = true;
var response = await rpc.query({
model: 'voice.recognition', if (location.href.includes('http:')) {
method: 'recognize_speech', const closeFun = this.notification.add(
args: [,], "Recording....",
}) {
if (response){ title: "Recording",
this.response = response type: "success",
} sticky: true
else{ },
this.response= "False" );
var w_response = confirm("can't recognize try again....") setTimeout(() => closeFun(), 15000)
try {
const response = await rpc.query({
model: 'voice.recognition',
method: 'recognize_speech',
args: [],
});
this.data.response = response || false;
if (!response) {
this.notification.add(
"Couldn't recognize the voice, please try again.",
{
title: "Recording",
type: "info",
},
)
}
} catch (error) {
console.error("RPC error: ", error);
} }
} }
}, },
onSearchInput(ev) { onSearchInput(ev) {
if (this.microphone == true){ let query = ev?.target?.value?.trim();
if(this.response != "False"){
ev.target.value = this.response; if (this.microphone && this.data.response) {
} query = this.data.response;
else{ this.microphone = false;
ev.target.value = "Your Voice can't recognize please try again.";
}
} }
const query = ev.target.value
if (query.trim()) { if (query) {
this.computeState({ query, expanded: [], focusedIndex: 0, subItems: [] }); this.computeState({query, expanded: [], focusedIndex: 0, subItems: []});
} else if (this.items.length) { } else if (this.items.length) {
this.resetState(); this.resetState();
} }
} }
}) });

99
voice_to_text/static/src/js/text_field.js

@ -1,63 +1,88 @@
/** @odoo-module **/ /** @odoo-module **/
import { TextField } from "@web/views/fields/text/text_field"; import {TextField} from "@web/views/fields/text/text_field";
import { patch } from "@web/core/utils/patch"; import {patch} from "@web/core/utils/patch";
import {useService} from "@web/core/utils/hooks";
import rpc from 'web.rpc'; import rpc from 'web.rpc';
patch(TextField.prototype,'@web/views/fields/text/text_field',{
patch(TextField.prototype, '@web/views/fields/text/text_field', {
setup() { setup() {
this._super.apply(); this._super.apply();
this.notification = useService("notification");
}, },
// This function is used to recognize voice on the text fields // This function is used to recognize voice on the text fields
async recordText(ev){ async recordText(ev) {
var self = this var self = this
var browser = await rpc.query({ var browser = await rpc.query({
model: 'voice.recognition', model: 'voice.recognition',
method: 'get_the_browser', method: 'get_the_browser',
args: [,], args: [,],
}).then(function (data) { }).then((data) => {
if (data =='chrome'){ console.log(this.notification);
const closeFun = this.notification.add(
"Recording....",
{
title: "Recording",
type: "success",
sticky: true
},
);
setTimeout(() => closeFun(), 15000)
if (data === 'chrome') {
let final_transcript = ""; let final_transcript = "";
let interim_transcript = ""; // Define this variable for interim results
if ("webkitSpeechRecognition" in window) { if ("webkitSpeechRecognition" in window) {
let speechRecognition = new webkitSpeechRecognition(); let speechRecognition = new webkitSpeechRecognition();
if(speechRecognition){
if (speechRecognition) {
speechRecognition.continuous = true; speechRecognition.continuous = true;
navigator.mediaDevices.getUserMedia({
audio: true}).then( navigator.mediaDevices.getUserMedia({audio: true}).then(() => {
speechRecognition.start()) speechRecognition.start();
speechRecognition.onresult = (e) => { });
speechRecognition.onresult = (e) => {
for (let i = e.resultIndex; i < e.results.length; ++i) { for (let i = e.resultIndex; i < e.results.length; ++i) {
if (e.results[i].isFinal) { if (e.results[i].isFinal) {
final_transcript += e.results[i][0].transcript; final_transcript += e.results[i][0].transcript;
} else { } else {
interim_consttranscript += event.results[i][0].transcript; interim_transcript += e.results[i][0].transcript;
} }
} }
if(final_transcript){
var field = this.__owl__.bdom.parentEl.attributes.name.nodeValue if (final_transcript) {
var model = this.props.record.resModel const field = this.__owl__.bdom.parentEl.attributes.name.nodeValue;
var browser = rpc.query({ const model = this.props.record.resModel;
const id = this.env.model.__bm_load_params__.res_id;
console.log(id)
console.log(final_transcript)
rpc.query({
model: 'voice.recognition', model: 'voice.recognition',
method: 'update_field', method: 'update_field',
args: [,field,model,final_transcript], args: [field, model, final_transcript,id],
}) }).then(() => {
this.env.searchModel._notify();
});
} }
};
}
} }
} }
} }
else if(data=='all_browser') { else if (data === 'all_browser') {
var field = self.__owl__.bdom.parentEl.attributes.name.nodeValue const field = this.__owl__.bdom.parentEl.attributes.name.nodeValue;
var model = self.props.record.resModel const model = this.props.record.resModel;
var id = self.env.model.__bm_load_params__.res_id const id = this.env.model.__bm_load_params__.res_id;
var browser = rpc.query({ rpc.query({
model: 'voice.recognition', model: 'voice.recognition',
method: 'update_field', method: 'update_field',
args: [,field,model,false,id], args: [field, model, false, id],
}) }).then(() => {
this.env.searchModel._notify();
});
} }
}) });
} }
}) })

4
voice_to_text/static/src/xml/text_field.xml

@ -4,11 +4,11 @@
<t t-name="voiceText" t-inherit="web.TextField" t-inherit-mode="extension" <t t-name="voiceText" t-inherit="web.TextField" t-inherit-mode="extension"
owl="1"> owl="1">
<xpath expr="." position="inside"> <xpath expr="." position="inside">
<div style="margin-left: 150px;margin-top: -55px;"> <div>
<button class="o_Composer_button o_Composer_toolButton btn btn-light fa fa-microphone border-0 rounded-pill mx-1" <button class="o_Composer_button o_Composer_toolButton btn btn-light fa fa-microphone border-0 rounded-pill mx-1"
id="record_voice" title="Voice" aria-label="Voice" id="record_voice" title="Voice" aria-label="Voice"
type="button" type="button"
t-on-click="recordText"> t-on-click="recordText" t-ref="voice_record_button">
</button> </button>
</div> </div>
</xpath> </xpath>

4
voice_to_text/views/res_config_setting_views.xml

@ -15,14 +15,14 @@
<div class="row mt16 o_settings_container"> <div class="row mt16 o_settings_container">
<div class="col-12 col-lg-6 o_setting_box"> <div class="col-12 col-lg-6 o_setting_box">
<div class="o_setting_right_pane"> <div class="o_setting_right_pane">
<label for="select_fastest_methode" <label for="select_fastest_method"
string="Choose your way"/> string="Choose your way"/>
<div class="text-muted"> <div class="text-muted">
Depending on the browser the speech to text Depending on the browser the speech to text
conversion speed may change.Choose Your conversion speed may change.Choose Your
browser. browser.
</div> </div>
<field name='select_fastest_methode' <field name='select_fastest_method'
class="w-auto ps-3 fw-bold" class="w-auto ps-3 fw-bold"
widget="radio"/> widget="radio"/>
</div> </div>

Loading…
Cancel
Save