Commit 80048fd1 authored by user's avatar user

tts

parent c4a0447e
#!/bin/bash
systemctl --user start jack.service
systemctl --user start jack-plumbing.service
systemctl --user start hexchat.service
systemctl --user start liquidsoap.service
systemctl --user start mocp.service
systemctl --user start ardour.service
systemctl --user start obs.service
systemctl --user start obs-tablet-remote.service
systemctl --user start shoutloop.service
systemctl --user start sopel_leftover_ifm.service
systemctl --user start sopel_leftover_indy.service
systemctl --user start sopel_leftover.service
#!/bin/bash
systemctl --user status jack.service
systemctl --user status ardour.service
systemctl --user status jack-plumbing.service
systemctl --user status hexchat.service
systemctl --user status liquidsoap.service
systemctl --user status mocp.service
systemctl --user status obs.service
systemctl --user status obs-tablet-remote.service
systemctl --user status shoutloop.service
systemctl --user status sopel_leftover_ifm.service
systemctl --user status sopel_leftover_indy.service
systemctl --user status sopel_leftover.service
#!/usr/bin/env python
#import gst1.0
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
voice = 'default'
pitch = 0
rate = -20
highlight_cb = None
def _create_pipe():
pipeline = 'audiotestsrc freq=100 ! audiomixer name=mix ! jackaudiosink espeak name=source ! mix.'
pipe = Gst.parse_launch(pipeline)
def stop_cb(bus, message):
pipe.set_state(Gst.NULL)
def mark_cb(bus, message):
if message.structure.get_name() == 'espeak-mark':
mark = message.structure['mark']
highlight_cb(int(mark))
bus = pipe.get_bus()
bus.add_signal_watch()
bus.connect('message::eos', stop_cb)
bus.connect('message::error', stop_cb)
bus.connect('message::element', mark_cb)
return (pipe.get_by_name('source'), pipe)
def _speech(source, pipe, words):
source.props.pitch = pitch
source.props.rate = rate
source.props.voice = voice
source.props.text = words;
pipe.set_state(Gst.State.PLAYING)
info_source, info_pipe = _create_pipe()
play_source, play_pipe = _create_pipe()
# track for marks
play_source.props.track = 2
def voices():
return info_source.props.voices
def say(words):
_speech(info_source, info_pipe, words)
print words
def play(words):
_speech(play_source, play_pipe, words)
def is_stopped():
for i in play_pipe.get_state():
if isinstance(i, Gst.State) and \
i == Gst.NULL:
return True
return False
def stop():
play_pipe.set_state(Gst.State.NULL)
def is_paused():
for i in play_pipe.get_state():
if isinstance(i, Gst.State) and \
i == Gst.PAUSED:
return True
return False
def pause():
play_pipe.set_state(Gst.State.PAUSED)
def rate_up():
global rate
rate = min(99, rate + 10)
def rate_down():
global rate
rate = max(-99, rate - 10)
def pitch_up():
global pitch
pitch = min(99, pitch + 10)
def pitch_down():
global pitch
pitch = max(-99, pitch - 10)
def prepare_highlighting(label_text):
i = 0
j = 0
word_begin = 0
word_end = 0
current_word = 0
word_tuples = []
omitted = [' ', '\n', u'\r', '_', '[', '{', ']',\
'}', '|', '<', '>', '*', '+', '/', '\\' ]
omitted_chars = set(omitted)
while i < len(label_text):
if label_text[i] not in omitted_chars:
word_begin = i
j = i
while j < len(label_text) and \
label_text[j] not in omitted_chars:
j = j + 1
word_end = j
i = j
word_t = (word_begin, word_end, \
label_text[word_begin: word_end].strip())
if word_t[2] != u'\r':
word_tuples.append(word_t)
i = i + 1
return word_tuples
def add_word_marks(word_tuples):
"Adds a mark between each word of text."
i = 0
marked_up_text = '<speak> '
while i < len(word_tuples):
word_t = word_tuples[i]
marked_up_text = marked_up_text + \
'<mark name="' + str(i) + '"/>' + word_t[2]
i = i + 1
return marked_up_text + '</speak>'
say('lala')
#!/usr/bin/env python3
#import gst1.0
import sys, os, time, socket
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib, GObject
voice = 'default'
pitch = 0
rate = -20
words = 'hallo dit is een testje'
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
pipeline = 'audiotestsrc freq=100 is-live=true ! audiomixer name=mix ! jackaudiosink espeak name=source ! mix.'
pipe = Gst.parse_launch(pipeline)
pipe.set_state(Gst.State.PLAYING)
source = pipe.get_by_name('source')
source.props.pitch = pitch
source.props.rate = rate
source.props.voice = voice
source.props.text = words;
def server(host, port):
'''Initialize server and start listening.'''
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(1)
print("Listening...")
GObject.io_add_watch(sock, GObject.IO_IN, listener)
def listener(sock, *args):
'''Asynchronous connection listener. Starts a handler for each connection.'''
conn, addr = sock.accept()
print("Connected")
GObject.io_add_watch(conn, GObject.IO_IN, handler)
return True
def handler(conn, *args):
'''Asynchronous connection handler. Processes each line from the socket.'''
line = conn.recv(4096)
if not len(line):
print("Connection closed.")
return False
else:
print(line)
return True
###########3
#GObject.threads_init()
Gst.init(None)
#mainclass = CLI_Main()
#thread.start_new_thread(mainclass.start, ())
server('', 8080)
loop = GLib.MainLoop()
loop.run()
#!/usr/bin/env python
# Copyright (c) 2008 Carnegie Mellon University.
#
# You may modify and redistribute this file under the same terms as
# the CMU Sphinx system. See LICENSE for more information.
from gi import pygtkcompat
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
GObject.threads_init()
Gst.init(None)
gst = Gst
print("Using pygtkcompat and Gst from gi")
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
import gtk
class DemoApp(object):
"""GStreamer/PocketSphinx Demo Application"""
def __init__(self):
"""Initialize a DemoApp object"""
self.init_gui()
self.init_gst()
def init_gui(self):
"""Initialize the GUI components"""
self.window = gtk.Window()
self.window.connect("delete-event", gtk.main_quit)
self.window.set_default_size(400,200)
self.window.set_border_width(10)
vbox = gtk.VBox()
self.textbuf = gtk.TextBuffer()
self.text = gtk.TextView(buffer=self.textbuf)
self.text.set_wrap_mode(gtk.WRAP_WORD)
vbox.pack_start(self.text)
self.button = gtk.ToggleButton("Speak")
self.button.connect('clicked', self.button_clicked)
vbox.pack_start(self.button, False, False, 5)
self.window.add(vbox)
self.window.show_all()
def init_gst(self):
"""Initialize the speech components"""
self.pipeline = gst.parse_launch('autoaudiosrc ! audioconvert ! audioresample '
+ '! pocketsphinx ! fakesink')
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message::element', self.element_message)
self.pipeline.set_state(gst.State.PAUSED)
def element_message(self, bus, msg):
"""Receive element messages from the bus."""
msgtype = msg.get_structure().get_name()
if msgtype != 'pocketsphinx':
return
if msg.get_structure().get_value('final'):
self.final_result(msg.get_structure().get_value('hypothesis'), msg.get_structure().get_value('confidence'))
self.pipeline.set_state(gst.State.PAUSED)
self.button.set_active(False)
elif msg.get_structure().get_value('hypothesis'):
self.partial_result(msg.get_structure().get_value('hypothesis'))
def partial_result(self, hyp):
"""Delete any previous selection, insert text and select it."""
# All this stuff appears as one single action
self.textbuf.begin_user_action()
self.textbuf.delete_selection(True, self.text.get_editable())
self.textbuf.insert_at_cursor(hyp)
ins = self.textbuf.get_insert()
iter = self.textbuf.get_iter_at_mark(ins)
iter.backward_chars(len(hyp))
self.textbuf.move_mark(ins, iter)
self.textbuf.end_user_action()
def final_result(self, hyp, confidence):
"""Insert the final result."""
# All this stuff appears as one single action
self.textbuf.begin_user_action()
self.textbuf.delete_selection(True, self.text.get_editable())
self.textbuf.insert_at_cursor(hyp)
self.textbuf.end_user_action()
def button_clicked(self, button):
"""Handle button presses."""
if button.get_active():
button.set_label("Stop")
self.pipeline.set_state(gst.State.PLAYING)
else:
button.set_label("Speak")
self.pipeline.set_state(gst.State.PAUSED)
app = DemoApp()
gtk.main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment