Compare commits
10 Commits
32b1c54fbc
...
d5a4bbb7e2
Author | SHA1 | Date |
---|---|---|
Martin Bauer | d5a4bbb7e2 | |
Martin Bauer | 6128d8706d | |
Martin Bauer | 6850c9646e | |
Martin Bauer | 828d1b3360 | |
Martin Bauer | 804940bfac | |
Martin Bauer | a67c6078c5 | |
Martin Bauer | 42f0701c0b | |
Martin Bauer | 4c5db49c61 | |
Martin Bauer | 1458793d74 | |
Martin Bauer | 1a17d49599 |
|
@ -3,3 +3,4 @@ venv
|
|||
build
|
||||
*.FCStd1
|
||||
*.blend1
|
||||
__pycache__
|
||||
|
|
Binary file not shown.
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 1.3 MiB |
|
@ -3,5 +3,8 @@
|
|||
// for the documentation about the extensions.json format
|
||||
"recommendations": [
|
||||
"platformio.platformio-ide"
|
||||
],
|
||||
"unwantedRecommendations": [
|
||||
"ms-vscode.cpptools-extension-pack"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -43,5 +43,15 @@
|
|||
"cinttypes": "cpp",
|
||||
"utility": "cpp",
|
||||
"typeinfo": "cpp"
|
||||
}
|
||||
},
|
||||
"vsmqtt.brokerProfiles": [
|
||||
{
|
||||
"name": "homeassistant",
|
||||
"host": "homeassistant",
|
||||
"port": 1883,
|
||||
"username": "musicmouse",
|
||||
"clientId": "vsmqtt_client",
|
||||
"password": "KNLEFLZF94yA6Zhj141",
|
||||
}
|
||||
]
|
||||
}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,434 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<div>\n",
|
||||
"<a href=\"http://www.music-processing.de/\"><img style=\"float:left;\" src=\"../data/FMP_Teaser_Cover.png\" width=40% alt=\"FMP\"></a>\n",
|
||||
"<a href=\"https://www.audiolabs-erlangen.de\"><img src=\"../data/Logo_AudioLabs_Long.png\" width=59% style=\"float: right;\" alt=\"AudioLabs\"></a>\n",
|
||||
"</div>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<div>\n",
|
||||
"<a href=\"../C5/C5.html\"><img src=\"../data/C5_nav.png\" width=\"100\" style=\"float:right;\" alt=\"C5\"></a>\n",
|
||||
"<h1>Hidden Markov Model (HMM)</h1> \n",
|
||||
"</div>\n",
|
||||
"\n",
|
||||
"<br/>\n",
|
||||
"\n",
|
||||
"<p>\n",
|
||||
"Motivated by the chord recognition problem, we give in this notebook an overview of hidden Markov models (HMMs) and introduce three famous algorithmic problems related with HMMs following Section 5.3 of <a href=\"http://www.music-processing.de/\">[Müller, FMP, Springer 2015]</a>. For a detailed introduction of HMMs, we refer to the famous tutorial paper by Rabiner.\n",
|
||||
"\n",
|
||||
"<ul>\n",
|
||||
"<li><span style=\"color:black\">\n",
|
||||
"Lawrence R. Rabiner: <strong>A Tutorial on Hidden Markov Models and Selected Applications in Speech Recognition.</strong> Proceedings of the IEEE, 77 (1989), pp. 257–286. \n",
|
||||
"<br> \n",
|
||||
"<a type=\"button\" class=\"btn btn-default btn-xs\" target=\"_blank\" href=\"../data/bibtex/FMP_bibtex_Rabiner89_HMM_IEEE.txt\"> Bibtex </a>\n",
|
||||
"</span></li>\n",
|
||||
"</ul>\n",
|
||||
"</p> "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Markov Chains\n",
|
||||
"\n",
|
||||
"Certain transitions from one chord to another are more likely than others. To capture such likelihoods, one can employ a concept called **Markov chains**. Abstracting from our chord recognition scenario, we assume that the chord types to be considered are represented by a set \n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" \\mathcal{A}:=\\{\\alpha_{1},\\alpha_{2},\\ldots,\\alpha_{I}\\}\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"of size $I\\in\\mathbb{N}$. The elements $\\alpha_{i}$ for $i\\in[1:I]$ are referred to as **states**. A progression of chords is realized by a system that can be described at any time instance $n=1,2,3,\\ldots$ as being in some state $s_{n}\\in\\mathcal{A}$. The change from one state to another is specified according to a set of probabilities associated with each state. In general, a probabilistic description of such a system can be quite complex. To simplify the model, one often makes the assumption that the probability of a change from the current state $s_{n}$ to the next state $s_{n+1}$ only depends on the current state, and not on the events that preceded it. In terms of conditional probabilities, this property is expressed by\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" P[s_{n+1}=\\alpha_{j}|s_{n}=\\alpha_{i},s_{n-1}=\\alpha_{k},\\ldots]\n",
|
||||
" = P[s_{n+1}=\\alpha_{j}|s_{n}=\\alpha_{i}].\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"The specific kind of \"amnesia\" is called the **Markov property**. Besides this property, one also often assumes that the system is **invariant under time shifts**, which means by definition that the following coefficients become independent of the index $n$:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" a_{ij} := P[s_{n+1}=\\alpha_{j} | s_{n}=\\alpha_{i}] \\in [0,1]\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"for $i,j\\in[1:I]$. These coefficients are also called **state transition probabilities**. They obey the standard stochastic constraint $\\sum_{j=1}^{I} a_{ij} = 1$ and can be expressed by an $(I\\times I)$ matrix, which we denote by $A$. A system that satisfies these properties is also called a (discrete-time) **Markov chain**. The following figure illustrates these definitions. It defines a Markov chain that consists of $I=3$ states $\\alpha_{1}$, $\\alpha_{2}$, and $\\alpha_{3}$, which correspond to the major chords $\\mathbf{C}$, $\\mathbf{G}$, and $\\mathbf{F}$, respectively. In the graph representation, the states correspond to the nodes, the transitions to the edges, and the transition probabilities to the labels attached to the edges. For example, the transition probability to remain in the state $\\alpha_{1}=\\mathbf{C}$ is $a_{11}=0.8$, whereas the transition probability of changing from $\\alpha_{1}=\\mathbf{C}$ to $\\alpha_{2}=\\mathbf{G}$ is $a_{12}=0.1$.\n",
|
||||
"\n",
|
||||
"<img src=\"../data/C5/FMP_C5_F24.png\" width=\"550px\" align=\"middle\" alt=\"FMP_C5_F24\">\n",
|
||||
"\n",
|
||||
"The model expresses the probability of all possible chord changes. To compute the probability of a given chord progression, one also needs the information on how the model gets started. This information is specified\n",
|
||||
"by additional model parameters referred to as **initial state probabilities**. For a general Markov chain, these probabilities are specified by the numbers\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" c_{i} := P[s_{1}=\\alpha_{i}] \\in [0,1]\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"for $i\\in[1:I]$. These coefficients, which sum up to one, can be expressed by a vector of length $I$ denoted by $C$."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Hidden Markov Models\n",
|
||||
"\n",
|
||||
"Based on a Markov chain, one can compute a probability for a given observation consisting of a sequence of states or chord types. In our [chord recognition scenario](../C5/C5S2_ChordRec_Templates.html), however, this is not what we need. Rather than observing a sequence of chord types, we observe a **sequence of chroma vectors** that are somehow related to the chord types. In other words, the state sequence is not directly visible, but only a fuzzier observation sequence that is generated based on the state sequence. This leads to an extension of Markov chains to a statistical model referred to as a **hidden Markov model** (HMM). The idea is to represent the relation between the observed feature vectors and the chord types (the states) using a probabilistic framework. Each state is equipped with a probability function that expresses the likelihood for a given chord type to output or emit a certain feature vector. As a result, we obtain a two-layered process consisting of a **hidden layer** and an **observable layer**. The hidden layer produces a state sequence that is not observable (\"hidden\"), but generates the observation sequence on the basis of the state-dependent probability functions.\n",
|
||||
"\n",
|
||||
"The **first layer** of an HMM is a **Markov chain** as introduced above. To define the second layer of an HMM, we need to specify a space of possible output values and a probability function for each state. In general, the output space can be any set including the real numbers, a vector space, or any kind of feature space. For example, in the case of chord recognition, this space may be modeled as the feature space $\\mathcal{F}=\\mathbb{R}^{12}$ consisting of all possible $12$-dimensional chroma vectors. For the sake of simplicity, we only consider the case of a **discrete HMM**, where the output space is assumed to be discrete and even finite. In this case, the space can be modeled as a finite set \n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" \\mathcal{B} = \\{\\beta_{1},\\beta_{2},\\ldots,\\beta_{K}\\} \n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"of size $K\\in\\mathbb{N}$ consisting of distinct output elements $\\beta_{k}$, $k\\in[1:K]$, which are also referred to as **observation symbols**. An HMM associates with each state a probability function, which is also referred to as the **emission probability** or **output probability**. In the discrete case, the emission probabilities are specified by coefficients\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" b_{ik}\\in[0,1]\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"for $i\\in[1:I]$ and $k\\in[1:K]$. Each coefficient $b_{ik}$ expresses the probability of the system to output the observation symbol $\\beta_{k}$ when in state $\\alpha_{i}$. Similarly to the state transition probabilities, the emission probabilities are required to satisfy the stochastic constraint $\\sum_{k=1}^{K} \\beta_{ik} = 1$ for $i\\in[1:I]$ (thus forming a probability distribution for each state). The coefficients can be expressed by an $(I\\times K)$ matrix, which we denote by $B$. In summary, an HMM is specified by a tuple\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" \\Theta:=(\\mathcal{A},A,C,\\mathcal{B},B).\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"The sets $\\mathcal{A}$ and $\\mathcal{B}$ are usually considered to be fixed components of the model, while the probability values specified by $A$, $B$, and $C$ are the free parameters to be determined. This can be done explicitly by an expert based on his or her musical knowledge or by employing a learning procedure based on suitably labeled training data. Continuing the above example, the following figure illustrates a hidden Markov model, where the state-dependent emission probabilities are indicated by the labels of the dashed arrows.\n",
|
||||
"\n",
|
||||
"<img src=\"../data/C5/FMP_C5_F25.png\" width=\"400px\" align=\"middle\" alt=\"FMP_C5_F25\">"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In the following code cell, we define the state transition probability matrix $A$ and the output probability $B$ as specified by the figure. \n",
|
||||
"\n",
|
||||
"* Here, we assume that $\\alpha_{1}=\\mathbf{C}$, $\\alpha_{2}=\\mathbf{G}$, and $\\alpha_{3}=\\mathbf{F}$. \n",
|
||||
"* Furthermore, the elements of the output space $\\mathcal{B} = \\{\\beta_{1},\\beta_{2},\\beta_{3}\\}$ represent the three chroma vectors ordered from left to right. \n",
|
||||
"* Finally, we assume that the initial state probability vector $C$ is given by the values $c_{1}=0.6$, $c_{2}=0.2$, $c_{3}=0.2$."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"from sklearn.preprocessing import normalize \n",
|
||||
"\n",
|
||||
"A = np.array([[0.8, 0.1, 0.1], \n",
|
||||
" [0.2, 0.7, 0.1], \n",
|
||||
" [0.1, 0.3, 0.6]])\n",
|
||||
"\n",
|
||||
"C = np.array([0.6, 0.2, 0.2])\n",
|
||||
"\n",
|
||||
"B = np.array([[0.7, 0.0, 0.3], \n",
|
||||
" [0.1, 0.9, 0.0], \n",
|
||||
" [0.0, 0.2, 0.8]])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## HMM-Based Sequence Generation \n",
|
||||
"\n",
|
||||
"Once an HMM is specified by $\\Theta:=(\\mathcal{A},A,C,\\mathcal{B},B)$, it can be used for various analysis and synthesis applications. Since it is very instructive, we now discuss how to (artificially) generate, on the basis of a given HMM, an observation sequence $O=(o_{1},o_{2},\\ldots,o_{N})$ of length $N\\in\\mathbb{N}$ with $o_n\\in \\mathcal{B}$, $n\\in[1:N]$. The generation procedure is as follows:\n",
|
||||
"\n",
|
||||
"1. Set $n=1$ and choose an initial state $s_n=\\alpha_i$ for some $i\\in[1:I]$ according to the initial state distribution $C$.\n",
|
||||
"2. Generate an observation $o_n=\\beta_k$ for some $k\\in[1:K]$ according to the emission probability in state $s_n=\\alpha_i$ (specified by the $i^{\\mathrm{th}}$ row of $B$).\n",
|
||||
"3. If $n=N$ then terminate the process. Otherwise, if $n<N$, transit to the new state $s_{n+1}=\\alpha_{j}$ according to the state transition probability at state $s_n=\\alpha_i$ (specified by the $i^{\\mathrm{th}}$ row of $A$). Then increase $n$ by one and return to step 2.\n",
|
||||
"\n",
|
||||
"In the next code cell, we implement this procedure and apply it to the example HMM specified above. Note that, due to Python conventions, we start in our implementation with index `0`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"n = 0, S[0] = 0, O[0] = 0\n",
|
||||
"n = 1, S[1] = 0, O[1] = 0\n",
|
||||
"n = 2, S[2] = 0, O[2] = 2\n",
|
||||
"n = 3, S[3] = 0, O[3] = 0\n",
|
||||
"n = 4, S[4] = 0, O[4] = 0\n",
|
||||
"n = 5, S[5] = 0, O[5] = 0\n",
|
||||
"n = 6, S[6] = 0, O[6] = 2\n",
|
||||
"n = 7, S[7] = 0, O[7] = 0\n",
|
||||
"n = 8, S[8] = 0, O[8] = 0\n",
|
||||
"n = 9, S[9] = 0, O[9] = 0\n",
|
||||
"State sequence S: [0 0 0 0 0 0 0 0 0 0]\n",
|
||||
"Observation sequence O: [0 0 2 0 0 0 2 0 0 0]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def generate_sequence_hmm(N, A, C, B, details=False):\n",
|
||||
" \"\"\"Generate observation and state sequence from given HMM\n",
|
||||
"\n",
|
||||
" Notebook: C5/C5S3_HiddenMarkovModel.ipynb\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" N (int): Number of observations to be generated\n",
|
||||
" A (np.ndarray): State transition probability matrix of dimension I x I\n",
|
||||
" C (np.ndarray): Initial state distribution of dimension I\n",
|
||||
" B (np.ndarray): Output probability matrix of dimension I x K\n",
|
||||
" details (bool): If \"True\" then shows details (Default value = False)\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" O (np.ndarray): Observation sequence of length N\n",
|
||||
" S (np.ndarray): State sequence of length N\n",
|
||||
" \"\"\"\n",
|
||||
" assert N > 0, \"N should be at least one\"\n",
|
||||
" I = A.shape[1]\n",
|
||||
" K = B.shape[1]\n",
|
||||
" assert I == A.shape[0], \"A should be an I-square matrix\"\n",
|
||||
" assert I == C.shape[0], \"Dimension of C should be I\"\n",
|
||||
" assert I == B.shape[0], \"Column-dimension of B should be I\"\n",
|
||||
"\n",
|
||||
" O = np.zeros(N, int)\n",
|
||||
" S = np.zeros(N, int)\n",
|
||||
" for n in range(N):\n",
|
||||
" if n == 0:\n",
|
||||
" i = np.random.choice(np.arange(I), p=C)\n",
|
||||
" else:\n",
|
||||
" i = np.random.choice(np.arange(I), p=A[i, :])\n",
|
||||
" k = np.random.choice(np.arange(K), p=B[i, :])\n",
|
||||
" S[n] = i\n",
|
||||
" O[n] = k\n",
|
||||
" if details:\n",
|
||||
" print('n = %d, S[%d] = %d, O[%d] = %d' % (n, n, S[n], n, O[n]))\n",
|
||||
" return O, S\n",
|
||||
"\n",
|
||||
"N = 10\n",
|
||||
"O, S = generate_sequence_hmm(N, A, C, B, details=True)\n",
|
||||
"print('State sequence S: ', S)\n",
|
||||
"print('Observation sequence O:', O)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As a sanity check for the plausibility of our sequence generation approach, we now check if the generated sequences reflect well the probabilities of our HMM. To this end, we estimate the original transition probability matrix $A$ and the output probability matrix $B$ from a generated observation sequence $O$ and state sequence $S$.\n",
|
||||
"\n",
|
||||
"* To obtain an estimate of the entry $a_{ij}$ of $A$, we count all transitions from $n$ to $n+1$ with $S(n)=\\alpha_i$ and $S(n+1)=\\alpha_j$ and then divide this number by the total number of transitions starting with $\\alpha_i$.\n",
|
||||
"\n",
|
||||
"* Similarly, to obtain an estimate of the entry $b_{ik}$ of $B$, we count the number of occurrences $n$ with $S(n)=\\alpha_i$ and $O(n)=\\beta_k$ and divide this number by the total number of occurrences of $\\alpha_i$ in $S$.\n",
|
||||
"\n",
|
||||
"When generating longer sequences by increasing the number $N$, the resulting estimates should approach the original values in $A$ and $B$. This is demonstrated by the subsequent experiment. \n",
|
||||
"\n",
|
||||
"<div class=\"alert alert-block alert-warning\">\n",
|
||||
"Note: In practice, when estimating HMM model parameters from training data, only <strong>observation sequences</strong> are typically available, and the state sequences (that reflect the hidden generation process) are generally not known. Learning parameters only from observation sequences leads to much harder estimation problems as discussed below. \n",
|
||||
"</div> "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"======== Estimation results when using N = 100 ========\n",
|
||||
"A =\n",
|
||||
"[[ 0.800 0.100 0.100]\n",
|
||||
" [ 0.200 0.700 0.100]\n",
|
||||
" [ 0.100 0.300 0.600]]\n",
|
||||
"A_est =\n",
|
||||
"[[ 0.795 0.091 0.114]\n",
|
||||
" [ 0.172 0.655 0.172]\n",
|
||||
" [ 0.154 0.269 0.577]]\n",
|
||||
"B =\n",
|
||||
"[[ 0.700 0.000 0.300]\n",
|
||||
" [ 0.100 0.900 0.000]\n",
|
||||
" [ 0.000 0.200 0.800]]\n",
|
||||
"B_est =\n",
|
||||
"[[ 0.705 0.000 0.295]\n",
|
||||
" [ 0.167 0.833 0.000]\n",
|
||||
" [ 0.000 0.423 0.577]]\n",
|
||||
"======== Estimation results when using N = 10000 ========\n",
|
||||
"A =\n",
|
||||
"[[ 0.800 0.100 0.100]\n",
|
||||
" [ 0.200 0.700 0.100]\n",
|
||||
" [ 0.100 0.300 0.600]]\n",
|
||||
"A_est =\n",
|
||||
"[[ 0.799 0.097 0.104]\n",
|
||||
" [ 0.198 0.696 0.106]\n",
|
||||
" [ 0.097 0.306 0.597]]\n",
|
||||
"B =\n",
|
||||
"[[ 0.700 0.000 0.300]\n",
|
||||
" [ 0.100 0.900 0.000]\n",
|
||||
" [ 0.000 0.200 0.800]]\n",
|
||||
"B_est =\n",
|
||||
"[[ 0.708 0.000 0.292]\n",
|
||||
" [ 0.103 0.897 0.000]\n",
|
||||
" [ 0.000 0.205 0.795]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def estimate_hmm_from_o_s(O, S, I, K):\n",
|
||||
" \"\"\"Estimate the state transition and output probability matrices from\n",
|
||||
" a given observation and state sequence\n",
|
||||
"\n",
|
||||
" Notebook: C5/C5S3_HiddenMarkovModel.ipynb\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" O (np.ndarray): Observation sequence of length N\n",
|
||||
" S (np.ndarray): State sequence of length N\n",
|
||||
" I (int): Number of states\n",
|
||||
" K (int): Number of observation symbols\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" A_est (np.ndarray): State transition probability matrix of dimension I x I\n",
|
||||
" B_est (np.ndarray): Output probability matrix of dimension I x K\n",
|
||||
" \"\"\"\n",
|
||||
" # Estimate A\n",
|
||||
" A_est = np.zeros([I, I])\n",
|
||||
" N = len(S)\n",
|
||||
" for n in range(N-1):\n",
|
||||
" i = S[n]\n",
|
||||
" j = S[n+1]\n",
|
||||
" A_est[i, j] += 1\n",
|
||||
" A_est = normalize(A_est, axis=1, norm='l1')\n",
|
||||
"\n",
|
||||
" # Estimate B\n",
|
||||
" B_est = np.zeros([I, K])\n",
|
||||
" for i in range(I):\n",
|
||||
" for k in range(K):\n",
|
||||
" B_est[i, k] = np.sum(np.logical_and(S == i, O == k))\n",
|
||||
" B_est = normalize(B_est, axis=1, norm='l1')\n",
|
||||
" return A_est, B_est\n",
|
||||
"\n",
|
||||
"N = 100\n",
|
||||
"print('======== Estimation results when using N = %d ========' % N)\n",
|
||||
"O, S = generate_sequence_hmm(N, A, C, B, details=False)\n",
|
||||
"A_est, B_est = estimate_hmm_from_o_s(O, S, A.shape[1], B.shape[1])\n",
|
||||
"np.set_printoptions(formatter={'float': \"{: 7.3f}\".format})\n",
|
||||
"print('A =', A, sep='\\n')\n",
|
||||
"print('A_est =', A_est, sep='\\n')\n",
|
||||
"print('B =', B, sep='\\n')\n",
|
||||
"print('B_est =', B_est, sep='\\n')\n",
|
||||
"\n",
|
||||
"N = 10000\n",
|
||||
"print('======== Estimation results when using N = %d ========' % N)\n",
|
||||
"O, S = generate_sequence_hmm(N, A, C, B, details=False)\n",
|
||||
"A_est, B_est = estimate_hmm_from_o_s(O, S, A.shape[1], B.shape[1])\n",
|
||||
"np.set_printoptions(formatter={'float': \"{: 7.3f}\".format})\n",
|
||||
"print('A =', A, sep='\\n')\n",
|
||||
"print('A_est =', A_est, sep='\\n')\n",
|
||||
"print('B =', B, sep='\\n')\n",
|
||||
"print('B_est =', B_est, sep='\\n')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"## Three Problems for HMMs\n",
|
||||
"\n",
|
||||
"We have seen how a given HMM can be used to generate an observation sequence. We will now look at three famous algorithmic problems for HMMs that concern the specification of the free model parameters and the evaluation of observation sequences. \n",
|
||||
"\n",
|
||||
"### 1. Evaluation Problem\n",
|
||||
"\n",
|
||||
"The first problem is known as **evaluation problem**. Given an HMM specified by $\\Theta=(\\mathcal{A},A,C,\\mathcal{B},B)$ and an observation sequence $O=(o_{1},o_{2},\\ldots,o_{N})$, the task is to compute the probability \n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" P[O|\\Theta]\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"of the observation sequence given the model. From a slightly different viewpoint, this probability can be regarded as a score value that expresses how well a given model matches a given observation sequence. This interpretation becomes useful in the case where one is trying to choose among several competing models. The solution would then be to choose the model which best matches the observation sequence. To compute $P[O|\\Theta]$, we first consider a fixed state sequence $S=(s_1,s_2,\\ldots,s_N)$ of length $N$ with $s_n=\\alpha_{i_n}\\in\\mathcal{A}$ for some suitable $i_n\\in[1:I]$, $n\\in[1:N]$. The probability $P[O,S|\\Theta]$ for generating the state sequence $S$ as well as the observation sequence $O$ is given by \n",
|
||||
"\n",
|
||||
"$$\n",
|
||||
"P[O,S|\\Theta] = c_{i_1}\\cdot b_{i_1k_1} \\cdot a_{i_1i_2}\\cdot b_{i_2k_2} \\cdot ...\\cdot a_{i_{N-1}i_N}\\cdot b_{i_Nk_N}\n",
|
||||
"$$\n",
|
||||
"\n",
|
||||
"Next, to obtain the overall probability $P[O|\\Theta]$, one needs to sum up all these probabilities considering all possible state sequences $S$ of length $|S|=N$:\n",
|
||||
"\n",
|
||||
"$$\n",
|
||||
"P[O|\\Theta] = \\sum_{S: |S|=N}P[O,S|\\Theta]\n",
|
||||
"= \\sum_{i_1=1}^I \\sum_{i_2=1}^I \\ldots \\sum_{i_N=1}^I\n",
|
||||
"c_{i_1}\\cdot b_{i_1k_1} \\cdot a_{i_1i_2}\\cdot b_{i_2k_2} \\cdot ...\\cdot a_{i_{N-1}i_N}\\cdot b_{i_Nk_N}\n",
|
||||
"$$\n",
|
||||
"\n",
|
||||
"This leads to $I^N$ summands, a number that is exponential in the length $N$ of the observation sequence. Therefore, in practice, this brute-force calculation is computationally infeasible even for a small $N$. The good news is that there is a more efficient way to compute $P[O|\\Theta]$ using an algorithm that is based on the dynamic programming paradigm. This procedure, which is known as [**Forward–Backward Algorithm**](https://en.wikipedia.org/wiki/Forward%E2%80%93backward_algorithm), requires a number of operations on the order of $I^2N$ (instead of $I^N$). For a detailed description of this algorithm, we refer to the article by [Rabiner](https://ieeexplore.ieee.org/document/18626).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### 2. Uncovering Problem\n",
|
||||
"\n",
|
||||
"The second problem is the so-called **uncovering problem**. Again, we are given an HMM specified by $\\Theta=(\\mathcal{A},A,C,\\mathcal{B},B)$ and an observation sequence $O=(o_{1},o_{2},\\ldots,o_{N})$. Instead of finding the overall probability $P[O|\\Theta]$ for $O$, where one needs to consider **all** possible state sequences, the goal of the uncovering problem is to find the **single** state sequence $S=(s_{1},s_{2},\\ldots,s_{N})$ that \"best explains\" the observation sequence. The uncovering problem stated so far is not well defined since, in general, there is not a single \"correct\" state sequence generating the observation sequence. Indeed, one needs a kind of optimization criterion that specifies what is meant when talking about a best possible explanation. There are several reasonable choices for such a criterion, and the actual choice will depend on the intended application. In the [FMP notebook on the Viterbi algorithm](../C5/C5S3_Viterbi.html), we will discuss one possible choice as well as an efficient algorithm (called **Viterbi algorithm**). This algorithm, which can be thought of as a kind of context-sensitive smoothing procedure, will apply in the [FMP notebook on HMM-based chord recognition](../C5/C5S3_ChordRec_HMM.html). \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### 3. Estimation Problem\n",
|
||||
"\n",
|
||||
"Besides the evaluation and uncovering problems, the third basic problem for HMMs is referred to as the **estimation problem**. Given an observation sequence $O$, the objective is to determine the free model parameters of $\\Theta$ (specified by by $A$, $C$, and $B$) that maximize the probability $P[O|\\Theta]$. In other words, the free model parameters are to be estimated so as to best describe the observation sequence. This is a typical instance of an **optimization problem** where a set of observation sequences serves as **training material** for adjusting or learning the HMM parameters. The estimation problem is by far the most difficult problem of HMMs. In fact, there is no known way to explicitly solve the given optimization problem. However, iterative procedures that find locally optimal solutions have been suggested. One of these procedures is known as the [**Baum–Welch Algorithm**](https://en.wikipedia.org/wiki/Baum%E2%80%93Welch_algorithm). Again, we refer to the article by [Rabiner](https://ieeexplore.ieee.org/document/18626) for more details. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<div class=\"alert\" style=\"background-color:#F5F5F5; border-color:#C8C8C8\">\n",
|
||||
"<strong>Acknowledgment:</strong> This notebook was created by <a href=\"https://www.audiolabs-erlangen.de/fau/professor/mueller\">Meinard Müller</a>.\n",
|
||||
"</div> "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<table style=\"border:none\">\n",
|
||||
"<tr style=\"border:none\">\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C0/C0.html\"><img src=\"../data/C0_nav.png\" style=\"height:50px\" alt=\"C0\"></a></td>\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C1/C1.html\"><img src=\"../data/C1_nav.png\" style=\"height:50px\" alt=\"C1\"></a></td>\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C2/C2.html\"><img src=\"../data/C2_nav.png\" style=\"height:50px\" alt=\"C2\"></a></td>\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C3/C3.html\"><img src=\"../data/C3_nav.png\" style=\"height:50px\" alt=\"C3\"></a></td>\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C4/C4.html\"><img src=\"../data/C4_nav.png\" style=\"height:50px\" alt=\"C4\"></a></td>\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C5/C5.html\"><img src=\"../data/C5_nav.png\" style=\"height:50px\" alt=\"C5\"></a></td>\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C6/C6.html\"><img src=\"../data/C6_nav.png\" style=\"height:50px\" alt=\"C6\"></a></td>\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C7/C7.html\"><img src=\"../data/C7_nav.png\" style=\"height:50px\" alt=\"C7\"></a></td>\n",
|
||||
" <td style=\"min-width:50px; border:none\" bgcolor=\"white\"><a href=\"../C8/C8.html\"><img src=\"../data/C8_nav.png\" style=\"height:50px\" alt=\"C8\"></a></td>\n",
|
||||
"</tr>\n",
|
||||
"</table>"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"anaconda-cloud": {},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,47 @@
|
|||
"""Some simple tests/examples for the Home Assistant client."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from hass_client import HomeAssistantClient
|
||||
|
||||
LOGGER = logging.getLogger()
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
logformat = logging.Formatter(
|
||||
"%(asctime)-15s %(levelname)-5s %(name)s.%(module)s -- %(message)s")
|
||||
consolehandler = logging.StreamHandler()
|
||||
consolehandler.setFormatter(logformat)
|
||||
LOGGER.addHandler(consolehandler)
|
||||
LOGGER.setLevel(logging.DEBUG)
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
LOGGER.error("usage: test.py <url> <token>")
|
||||
sys.exit()
|
||||
|
||||
url = sys.argv[1]
|
||||
token = sys.argv[2]
|
||||
loop = asyncio.get_event_loop()
|
||||
hass = HomeAssistantClient(url, token)
|
||||
|
||||
async def hass_event(event, event_details):
|
||||
"""Handle hass event callback."""
|
||||
LOGGER.info("received event %s --> %s\n", event, event_details)
|
||||
|
||||
hass.register_event_callback(hass_event)
|
||||
|
||||
async def run():
|
||||
"""Run tests."""
|
||||
await hass.async_connect()
|
||||
await asyncio.sleep(10)
|
||||
await hass.async_close()
|
||||
loop.stop()
|
||||
|
||||
try:
|
||||
loop.create_task(run())
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
loop.stop()
|
||||
loop.close()
|
|
@ -39,6 +39,15 @@ mouse_led_effect_to_message_id = {
|
|||
EffectReverseSwipe: 10,
|
||||
}
|
||||
|
||||
shelve_led_effect_to_message_id = {
|
||||
EffectStaticConfig: 15,
|
||||
EffectCircularConfig: 16,
|
||||
EffectRandomTwoColorInterpolationConfig: 17,
|
||||
EffectSwipeAndChange: 18,
|
||||
EffectReverseSwipe: 19,
|
||||
EffectStaticDetailedConfig: 20,
|
||||
}
|
||||
|
||||
mouse_leds_index_ranges = {
|
||||
TouchButton.RIGHT_FOOT: (0, 6),
|
||||
TouchButton.LEFT_FOOT: (6, 6 + 6),
|
||||
|
@ -46,8 +55,8 @@ mouse_leds_index_ranges = {
|
|||
TouchButton.RIGHT_EAR: (6 + 6 + 16, 6 + 6 + 16 + 17),
|
||||
}
|
||||
|
||||
PREV_BUTTON_LED_MSG = 20
|
||||
NEXT_BUTTON_LED_MSG = 21
|
||||
PREV_BUTTON_LED_MSG = 21
|
||||
NEXT_BUTTON_LED_MSG = 22
|
||||
|
||||
|
||||
class RfidTokenRead:
|
||||
|
@ -126,17 +135,20 @@ class MusicMouseProtocol(asyncio.Protocol):
|
|||
self.transport = transport
|
||||
self.in_buff = bytes()
|
||||
|
||||
def led_ring_effect(self, effect_cfg):
|
||||
def __led_effect(self, effect_cfg, msg_dict):
|
||||
msg_content = effect_cfg.as_bytes()
|
||||
header = struct.pack("<IBH", MAGIC_TOKEN_HOST_TO_FW,
|
||||
led_ring_effect_to_message_id[type(effect_cfg)], len(msg_content))
|
||||
header = struct.pack("<IBH", MAGIC_TOKEN_HOST_TO_FW, msg_dict[type(effect_cfg)],
|
||||
len(msg_content))
|
||||
self.transport.write(header + msg_content)
|
||||
|
||||
def led_ring_effect(self, effect_cfg):
|
||||
self.__led_effect(effect_cfg, led_ring_effect_to_message_id)
|
||||
|
||||
def mouse_led_effect(self, effect_cfg):
|
||||
msg_content = effect_cfg.as_bytes()
|
||||
header = struct.pack("<IBH", MAGIC_TOKEN_HOST_TO_FW,
|
||||
mouse_led_effect_to_message_id[type(effect_cfg)], len(msg_content))
|
||||
self.transport.write(header + msg_content)
|
||||
self.__led_effect(effect_cfg, mouse_led_effect_to_message_id)
|
||||
|
||||
def shelve_led_effect(self, effect_cfg):
|
||||
self.__led_effect(effect_cfg, shelve_led_effect_to_message_id)
|
||||
|
||||
def button_background_led_prev(self, val):
|
||||
msg_content = struct.pack("<f", val)
|
||||
|
|
|
@ -22,6 +22,19 @@ class ColorRGBW:
|
|||
vals = (self.r, self.g, self.b, self.w)
|
||||
return all(0 <= v <= 1 for v in vals)
|
||||
|
||||
def __mul__(self, other:float):
|
||||
assert 0<= other <= 1
|
||||
return ColorRGBW(self.r * other, self.g * other, self.b * other, self.w * other)
|
||||
|
||||
def __eq__(self, other:'ColorRGBW'):
|
||||
return self.r == other.r and self.g == other.g and self.b == other.b and self.w == other.w
|
||||
|
||||
def __neq__(self, other:'ColorRGBW'):
|
||||
return not self == other
|
||||
|
||||
def without_white_channel(self, scale=1):
|
||||
args = (min(1, e + self.w) for e in (self.r, self.g, self.b) )
|
||||
return ColorRGBW(*args, 0)
|
||||
|
||||
@dataclass
|
||||
class ColorHSV:
|
||||
|
@ -63,6 +76,20 @@ class EffectStaticConfig:
|
|||
return self.color.as_bytes() + struct.pack("<HH", self.begin, self.end)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EffectStaticDetailedConfig:
|
||||
color: ColorRGBW
|
||||
increment: int = 1
|
||||
begin: float = 0.0
|
||||
end: float = 1.0
|
||||
transition_time_in_ms : float = 500
|
||||
|
||||
def __repr__(self):
|
||||
return f"EffectStaticDetailedConfig {str(self.color)}, beg: {self.begin}, end {self.end}, incr {self.increment}, transition in ms {self.transition_time_in_ms}"
|
||||
|
||||
def as_bytes(self) -> bytes:
|
||||
return self.color.as_bytes() + struct.pack("<Hfff", self.increment, self.begin, self.end, self.transition_time_in_ms)
|
||||
|
||||
@dataclass
|
||||
class EffectAlexaSwipeConfig:
|
||||
primary_color_width: float = 20 # in degrees
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
import asyncio
|
||||
import sys
|
||||
import serial_asyncio
|
||||
from led_cmds import (ColorRGBW, ColorHSV, EffectStaticConfig,
|
||||
from led_cmds import (ColorRGBW, ColorHSV, EffectCircularConfig, EffectStaticConfig,
|
||||
EffectRandomTwoColorInterpolationConfig, EffectAlexaSwipeConfig,
|
||||
EffectSwipeAndChange, EffectReverseSwipe)
|
||||
from host_driver import MusicMouseProtocol, RfidTokenRead, RotaryEncoderEvent, ButtonEvent, TouchButton, TouchButtonPress, TouchButtonRelease, mouse_leds_index_ranges
|
||||
|
@ -16,6 +16,8 @@ import argparse
|
|||
from ruamel.yaml import YAML
|
||||
import warnings
|
||||
from pprint import pprint
|
||||
from typing import Optional
|
||||
from mqtt_json import start_mqtt
|
||||
|
||||
yaml = YAML(typ='safe')
|
||||
|
||||
|
@ -50,8 +52,11 @@ def hass_service(hass, domain, service, **kwargs):
|
|||
|
||||
class MusicMouseState:
|
||||
def __init__(self, protocol: MusicMouseProtocol):
|
||||
self.current_figure: str = None
|
||||
self.last_figure: str = None
|
||||
self.active_figure: Optional[
|
||||
str] = None # None if no figure is placed on the reader, or the name of the figure
|
||||
self.last_partially_played_figure: Optional[
|
||||
str] = None # figure whose playlist wasn't played completely and was removed
|
||||
|
||||
self.current_mouse_led_effect = None
|
||||
self.current_led_ring_effect = None
|
||||
self.protocol: MusicMouseProtocol = protocol
|
||||
|
@ -64,6 +69,7 @@ class MusicMouseState:
|
|||
def led_ring_effect(self, effect_cfg):
|
||||
self.current_led_ring_effect = effect_cfg
|
||||
self.protocol.led_ring_effect(effect_cfg)
|
||||
self.protocol.shelve_led_effect(effect_cfg)
|
||||
|
||||
def button_leds(self, brightness):
|
||||
assert 0 <= brightness <= 1
|
||||
|
@ -75,13 +81,6 @@ class MusicMouseState:
|
|||
self.mouse_led_effect(EffectStaticConfig(OFF_COLOR))
|
||||
self.led_ring_effect(EffectStaticConfig(OFF_COLOR))
|
||||
|
||||
def figure_placed(self, figure_state):
|
||||
self.last_figure = self.current_figure
|
||||
self.current_figure = figure_state
|
||||
|
||||
def figure_removed(self):
|
||||
self.last_figure = self.current_figure
|
||||
|
||||
|
||||
class Controller:
|
||||
def __init__(self, protocol, hass, cfg):
|
||||
|
@ -97,7 +96,7 @@ class Controller:
|
|||
self.audio_player.set_volume_limits(vol_min, vol_max)
|
||||
protocol.register_message_callback(self.on_firmware_msg)
|
||||
|
||||
self.audio_player.on_playlist_end_callback = self._run_off_animation
|
||||
self.audio_player.on_playlist_end_callback = self._on_playlist_end
|
||||
self.playlists = {
|
||||
fig: self.audio_player.create_playlist(fig_cfg['media_files'])
|
||||
for fig, fig_cfg in cfg['figures'].items()
|
||||
|
@ -107,27 +106,49 @@ class Controller:
|
|||
for figure_name, figure_cfg in cfg["figures"].items()
|
||||
}
|
||||
|
||||
self.protocol.shelve_led_effect(EffectStaticConfig(ColorRGBW(0, 0, 0.1, 0)))
|
||||
shelf_eff = EffectCircularConfig()
|
||||
shelf_eff.color = ColorRGBW(0, 0, 0.4, 0)
|
||||
shelf_eff = EffectStaticConfig(ColorRGBW(0, 0, 0, 0))
|
||||
self.protocol.shelve_led_effect(shelf_eff)
|
||||
|
||||
def _on_playlist_end(self):
|
||||
if not self.audio_player.is_playing():
|
||||
self.mmstate.last_partially_played_figure = None
|
||||
self._run_off_animation()
|
||||
else:
|
||||
print("Playlist end was called, even if player remains playing?!")
|
||||
|
||||
def handle_rfid_event(self, tagid):
|
||||
if tagid == bytes.fromhex("0000000000"):
|
||||
if self.audio_player.is_playing():
|
||||
print("Got 000 rfid -> playing off animation")
|
||||
self._run_off_animation()
|
||||
self.audio_player.pause()
|
||||
self.mmstate.figure_removed()
|
||||
self.mmstate.last_partially_played_figure = self.mmstate.active_figure
|
||||
else:
|
||||
self.mmstate.last_partially_played_figure = None
|
||||
|
||||
self.mmstate.active_figure = None
|
||||
elif tagid in self._rfid_to_figure_name:
|
||||
figure = self._rfid_to_figure_name[tagid]
|
||||
primary_color, secondary_color, *rest = self.cfg["figures"][figure]["colors"]
|
||||
newly_placed_figure = self._rfid_to_figure_name[tagid]
|
||||
primary_color, secondary_color, *rest = self.cfg["figures"][newly_placed_figure][
|
||||
"colors"]
|
||||
self._start_animation(primary_color, secondary_color)
|
||||
self.mmstate.button_leds(self.cfg["general"].get("button_leds_brightness", 0.5))
|
||||
|
||||
if figure in self.playlists:
|
||||
self.audio_player.set_playlist(self.playlists[figure])
|
||||
if self.mmstate.last_figure == figure:
|
||||
if newly_placed_figure in self.cfg['figures']:
|
||||
if self.mmstate.last_partially_played_figure == newly_placed_figure:
|
||||
print("Continuing playlist")
|
||||
self.audio_player.play()
|
||||
else:
|
||||
print("Restarting playlist")
|
||||
self.audio_player.set_playlist(
|
||||
self.audio_player.create_playlist(
|
||||
self.cfg['figures'][newly_placed_figure]['media_files']))
|
||||
self.audio_player.play_from_start()
|
||||
|
||||
self.mmstate.figure_placed(figure)
|
||||
self.mmstate.active_figure = newly_placed_figure
|
||||
else:
|
||||
warnings.warn(f"Unknown figure/tag with id {tagid}")
|
||||
|
||||
|
@ -145,15 +166,13 @@ class Controller:
|
|||
elif isinstance(message, ButtonEvent):
|
||||
btn = message.button
|
||||
if btn == "left" and message.event == "pressed" and self.audio_player.is_playing():
|
||||
res = self.audio_player.previous()
|
||||
print(f"Prev {res}")
|
||||
self.audio_player.previous()
|
||||
elif btn == "right" and message.event == "pressed" and self.audio_player.is_playing():
|
||||
res = self.audio_player.nex()
|
||||
print(f"Next {res}")
|
||||
self.audio_player.next()
|
||||
elif message.button == "rotary" and message.event == "pressed":
|
||||
hass_service(self.hass, "light", "toggle", entity_id="light.kinderzimmer_fluter")
|
||||
elif isinstance(message, TouchButtonPress):
|
||||
figure = self.mmstate.current_figure
|
||||
figure = self.mmstate.active_figure
|
||||
if figure and self.audio_player.is_playing():
|
||||
primary_color, secondary_color, bg, accent = self.cfg["figures"][figure]["colors"]
|
||||
self.protocol.mouse_led_effect(
|
||||
|
@ -173,14 +192,15 @@ class Controller:
|
|||
'rgb_color': [255, 74, 254]
|
||||
},
|
||||
}
|
||||
hass_service(self.hass,
|
||||
"light",
|
||||
"turn_on",
|
||||
entity_id="light.kinderzimmer_fluter",
|
||||
**colors[message.touch_button])
|
||||
hass_service(
|
||||
self.hass,
|
||||
"light",
|
||||
"turn_on",
|
||||
entity_id=["light.kinderzimmer_fluter", "light.music_mouse_regal_licht"],
|
||||
**colors[message.touch_button])
|
||||
|
||||
elif isinstance(message, TouchButtonRelease):
|
||||
figure = self.mmstate.current_figure
|
||||
figure = self.mmstate.active_figure
|
||||
eff_change = EffectRandomTwoColorInterpolationConfig()
|
||||
eff_static = EffectStaticConfig(ColorRGBW(0, 0, 0, 0),
|
||||
*mouse_leds_index_ranges[message.touch_button])
|
||||
|
@ -235,6 +255,8 @@ def main(config_path):
|
|||
baudrate=115200)
|
||||
transport, protocol = loop.run_until_complete(coro)
|
||||
controller = Controller(protocol, hass, cfg)
|
||||
mqtt_cfg = cfg["general"]["mqtt"]
|
||||
loop.create_task(start_mqtt(protocol, mqtt_cfg["server"], mqtt_cfg["user"],mqtt_cfg["password"] ))
|
||||
loop.create_task(hass.connect())
|
||||
return controller, loop
|
||||
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
from led_cmds import ColorRGBW, EffectStaticConfig, EffectCircularConfig, EffectRandomTwoColorInterpolationConfig, EffectAlexaSwipeConfig, EffectSwipeAndChange
|
||||
import asyncio
|
||||
import asyncio_mqtt
|
||||
import json
|
||||
|
||||
BRIGHTNESS_SCALE = 1 # power supply is a bit weak -> scale brightness down globally
|
||||
|
||||
|
||||
class ShelveLightMqtt:
|
||||
def __init__(self, protocol, client):
|
||||
self._brightness = 100
|
||||
self._protocol = protocol
|
||||
self._mqtt_client = client
|
||||
|
||||
self._brightness = 0
|
||||
self._color = ColorRGBW(0.5, 0.5, 0.5, 0)
|
||||
self._last_color = ColorRGBW(0.5, 0.5, 0.5, 0)
|
||||
self._original_color_repr = (127, 127, 127)
|
||||
self._effect = "static"
|
||||
|
||||
self._discovery_spec = self._create_discovery_msg_light()
|
||||
|
||||
async def init(self):
|
||||
self._protocol.shelve_led_effect(EffectStaticConfig(ColorRGBW(0, 0, 0, 0)))
|
||||
await self._notify_mqtt_brightness()
|
||||
await self._notify_mqtt_state()
|
||||
await self._notify_mqtt_rgb()
|
||||
await self._send_autodiscovery_msg()
|
||||
|
||||
async def _send_autodiscovery_msg(self):
|
||||
topic = f"homeassistant/light/{self._discovery_spec['unique_id']}/config"
|
||||
await self._mqtt_client.publish(topic, json.dumps(self._discovery_spec).encode())
|
||||
|
||||
async def _notify_mqtt_rgb(self):
|
||||
rgb_payload = ",".join(str(e) for e in self._original_color_repr)
|
||||
print("OUT ", "rgb", rgb_payload)
|
||||
await self._mqtt_client.publish(self._discovery_spec['rgb_state_topic'],
|
||||
rgb_payload.encode())
|
||||
|
||||
async def _notify_mqtt_state(self):
|
||||
state_payload = "ON" if self._brightness > 0 else "OFF"
|
||||
print("OUT ", "state", state_payload)
|
||||
await self._mqtt_client.publish(self._discovery_spec['state_topic'], state_payload.encode())
|
||||
|
||||
async def _notify_mqtt_brightness(self):
|
||||
brightness_payload = str(int(self._brightness * 255))
|
||||
print("OUT ", "brightness", brightness_payload)
|
||||
await self._mqtt_client.publish(self._discovery_spec['brightness_state_topic'],
|
||||
brightness_payload.encode())
|
||||
|
||||
async def _notify_mqtt_shelve_effect(self, effect):
|
||||
await self._mqtt_client.publish(self._discovery_spec['effect_state_topic'], effect.encode())
|
||||
|
||||
def _set_rgb_color(self, color: ColorRGBW):
|
||||
if self._color != self._last_color: # mqtt sends color multiple times, we want to remember last distinct color as second color for effects
|
||||
self._last_color = self._color
|
||||
self._color = color * BRIGHTNESS_SCALE * self._brightness
|
||||
|
||||
def _update_device(self):
|
||||
if self._effect == "static":
|
||||
eff = EffectStaticConfig(self._color)
|
||||
elif self._effect == "circular":
|
||||
eff = EffectCircularConfig()
|
||||
eff.color = self._color
|
||||
elif self._effect == "wipeup":
|
||||
eff = EffectSwipeAndChange()
|
||||
eff.swipe.secondary_color = self._color
|
||||
eff.swipe.primary_color = self._last_color
|
||||
eff.swipe.bell_curve_width_in_leds = 5
|
||||
eff.swipe.swipe_speed = 180
|
||||
eff.change.color1 = self._color
|
||||
eff.change.color2 = self._last_color
|
||||
elif self._effect == "twocolor":
|
||||
eff = EffectRandomTwoColorInterpolationConfig()
|
||||
eff.color1 = self._color
|
||||
eff.color2 = self._last_color
|
||||
eff.start_with_existing = True
|
||||
elif self._effect == "twocolorrandom":
|
||||
eff = EffectRandomTwoColorInterpolationConfig()
|
||||
eff.color1 = self._color
|
||||
eff.color2 = self._last_color
|
||||
eff.hue1_random = True
|
||||
eff.hue2_random = True
|
||||
eff.start_with_existing = True
|
||||
else:
|
||||
print(f"Unknown effect {self._effect}")
|
||||
eff = EffectStaticConfig(ColorRGBW(0, 0, 0, 0))
|
||||
self._protocol.shelve_led_effect(eff)
|
||||
|
||||
async def handle_light_message(self, msg):
|
||||
prefix = 'musicmouse/lights_shelve'
|
||||
if not msg.topic.startswith(prefix):
|
||||
return False
|
||||
cmd = msg.topic.split("/")[-1]
|
||||
payload = msg.payload.decode()
|
||||
|
||||
print("IN ", cmd, payload)
|
||||
if cmd == "rgb":
|
||||
r, g, b = tuple(int(i) for i in payload.split(","))
|
||||
self._original_color_repr = (r, g, b)
|
||||
self._set_rgb_color(ColorRGBW(r / 255, g / 255, b / 255, 0))
|
||||
await self._notify_mqtt_rgb()
|
||||
elif cmd == "switch":
|
||||
if payload == "ON" and self._brightness == 0:
|
||||
self._brightness = 1
|
||||
elif payload == "OFF":
|
||||
self._color = ColorRGBW(0, 0, 0, 0)
|
||||
self._brightness = 0
|
||||
self._update_device()
|
||||
await self._notify_mqtt_rgb()
|
||||
await self._notify_mqtt_brightness()
|
||||
await self._notify_mqtt_state()
|
||||
elif cmd == "brightness":
|
||||
self._brightness = int(payload) / 255
|
||||
self._set_rgb_color(self._color)
|
||||
await self._notify_mqtt_brightness()
|
||||
elif cmd == "effect":
|
||||
self._effect = payload
|
||||
|
||||
@staticmethod
|
||||
def _create_discovery_msg_light(base_name="musicmouse", display_name="Music Mouse Regal Licht"):
|
||||
id = "shelve"
|
||||
return {
|
||||
'name': display_name,
|
||||
'unique_id': f'{base_name}_{id}',
|
||||
'command_topic': f'{base_name}/lights_{id}/switch',
|
||||
'state_topic': f'{base_name}/lights_{id}/switch_state',
|
||||
'brightness_command_topic': f'{base_name}/lights_{id}/brightness',
|
||||
'brightness_state_topic': f'{base_name}/lights_{id}/brightness_state',
|
||||
'rgb_command_topic': f'{base_name}/lights_{id}/rgb',
|
||||
'rgb_state_topic': f'{base_name}/lights_{id}/rgb_state',
|
||||
'effect_command_topic': f'{base_name}/lights_{id}/effect',
|
||||
'effect_state_topic': f'{base_name}/lights_{id}/effect_state',
|
||||
'effect_list': ['static', 'circular', 'wipeup', 'twocolor', 'twocolorrandom'],
|
||||
}
|
||||
|
||||
|
||||
async def start_mqtt(music_mouse_protocol, server, username, password):
|
||||
async with asyncio_mqtt.Client(hostname=server, username=username, password=password) as client:
|
||||
shelve_light = ShelveLightMqtt(music_mouse_protocol, client)
|
||||
await shelve_light.init()
|
||||
async with client.filtered_messages("musicmouse/#") as messages:
|
||||
await client.subscribe("musicmouse/#")
|
||||
async for message in messages:
|
||||
await shelve_light.handle_light_message(message)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
class DummyProtocol:
|
||||
def shelve_led_effect(self, effect):
|
||||
print("EFF ", repr(effect))
|
||||
|
||||
password = "KNLEFLZF94yA6Zhj141"
|
||||
asyncio.run(start_mqtt(DummyProtocol(), "homeassistant", "musicmouse", password))
|
|
@ -0,0 +1,187 @@
|
|||
from led_cmds import ColorRGBW, EffectStaticConfig, EffectStaticDetailedConfig, EffectCircularConfig, EffectRandomTwoColorInterpolationConfig, EffectAlexaSwipeConfig, EffectSwipeAndChange
|
||||
import asyncio
|
||||
import asyncio_mqtt
|
||||
import json
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
class ShelveLightMqtt:
|
||||
def __init__(self, protocol, client: asyncio_mqtt.Client):
|
||||
self._protocol = protocol
|
||||
self._mqtt_client = client
|
||||
|
||||
self._state = {
|
||||
"state": "OFF",
|
||||
"color": {
|
||||
"r": 255,
|
||||
"g": 255,
|
||||
"b": 255,
|
||||
"w": 0,
|
||||
},
|
||||
"color_mode": "rgbw",
|
||||
"brightness": 30,
|
||||
"effect": "static",
|
||||
}
|
||||
self._last_color = ColorRGBW(0.5, 0.5, 0.5, 0)
|
||||
|
||||
self._discovery_spec = self._create_discovery_msg_light()
|
||||
|
||||
async def init(self):
|
||||
"""Init method, because constructor can't be async"""
|
||||
self._protocol.shelve_led_effect(EffectStaticConfig(ColorRGBW(0, 0, 0, 0)))
|
||||
await self._send_autodiscovery_msg()
|
||||
await self._notify_mqtt_state({"state": "OFF"})
|
||||
|
||||
async def handle_light_message(self, msg):
|
||||
if msg.topic == self._discovery_spec['command_topic']:
|
||||
payload = msg.payload.decode()
|
||||
new_state = json.loads(payload)
|
||||
print("IN ", new_state)
|
||||
await self._update_state(new_state)
|
||||
await self._notify_mqtt_state(new_state)
|
||||
|
||||
async def _update_state(self, new_state):
|
||||
"""Merges current state with new state, updates device"""
|
||||
|
||||
# memorize last color - this is used for effects that need 2 colors
|
||||
if 'color' in new_state:
|
||||
brightness = new_state.get('brightness', self._state['brightness'])
|
||||
new_color = self._color_from_json(new_state['color'], brightness)
|
||||
current_color = self._color_from_json(self._state['color'])
|
||||
if new_color != current_color:
|
||||
self._last_color = current_color
|
||||
print("last color", self._last_color)
|
||||
|
||||
self._state.update(new_state)
|
||||
self._update_device()
|
||||
|
||||
@staticmethod
|
||||
def _color_from_json(json_color, brightness=255):
|
||||
args = ((json_color[e] / 255) * (brightness / 255) for e in ('r', 'g', 'b', 'w'))
|
||||
return ColorRGBW(*args)
|
||||
|
||||
def _update_device(self):
|
||||
s = self._state
|
||||
current_color = self._color_from_json(s['color'], brightness=s["brightness"])
|
||||
transition = s.get("transition", 0.0) * 1000
|
||||
print(f"Effect {s['effect']} Transition {transition}")
|
||||
|
||||
if s['state'] == "OFF":
|
||||
if transition > 0:
|
||||
eff = EffectStaticDetailedConfig(ColorRGBW(0,0,0,0), transition_tim_in_ms=transition)
|
||||
else:
|
||||
eff = EffectStaticConfig(ColorRGBW(0, 0, 0, 0))
|
||||
elif s['effect'] == 'static':
|
||||
if transition > 0:
|
||||
eff = EffectStaticDetailedConfig(current_color, transition_time_in_ms=transition)
|
||||
else:
|
||||
eff = EffectStaticConfig(current_color)
|
||||
elif s['effect'] == 'circular':
|
||||
eff = EffectCircularConfig(speed=180, width=90, color=current_color)
|
||||
elif s['effect'] == 'wipeup':
|
||||
eff = EffectSwipeAndChange()
|
||||
eff.swipe.secondary_color = current_color
|
||||
eff.swipe.primary_color = self._last_color
|
||||
eff.swipe.bell_curve_width_in_leds = 10
|
||||
eff.swipe.transition_width = 30
|
||||
eff.swipe.start_position = 0
|
||||
eff.swipe.swipe_speed = 260
|
||||
eff.change.color1 = current_color
|
||||
eff.change.color2 = self._last_color
|
||||
elif s['effect'] == "twocolor":
|
||||
eff = EffectRandomTwoColorInterpolationConfig()
|
||||
eff.color1 = current_color
|
||||
eff.color2 = self._last_color
|
||||
eff.start_with_existing = True
|
||||
elif s['effect'] == "twocolorrandom":
|
||||
eff = EffectRandomTwoColorInterpolationConfig()
|
||||
eff.color1 = current_color
|
||||
eff.color2 = self._last_color
|
||||
eff.hue1_random = True
|
||||
eff.hue2_random = True
|
||||
eff.start_with_existing = True
|
||||
elif s['effect'] == "side_0.2":
|
||||
eff = EffectStaticDetailedConfig(current_color, begin=0.9, end=0.1, increment=1, transition_time_in_ms=transition)
|
||||
elif s['effect'] == "side_0.2_inc4":
|
||||
eff = EffectStaticDetailedConfig(current_color, begin=0.9, end=0.1, increment=4, transition_time_in_ms=transition)
|
||||
elif s['effect'] == "side_0.5":
|
||||
eff = EffectStaticDetailedConfig(current_color, begin=0.75, end=0.25, increment=1, transition_time_in_ms=transition)
|
||||
elif s['effect'] == "side_0.5_inc4":
|
||||
eff = EffectStaticDetailedConfig(current_color, begin=0.75, end=0.25, increment=4, transition_time_in_ms=transition)
|
||||
else:
|
||||
print(f"Unknown effect {s['effect']}")
|
||||
eff = EffectStaticConfig(ColorRGBW(0, 0, 0, 0))
|
||||
self._protocol.shelve_led_effect(eff)
|
||||
|
||||
@staticmethod
|
||||
def _create_discovery_msg_light(base_name="musicmouse_json",
|
||||
display_name="Music Mouse Regal Licht"):
|
||||
id = "shelve"
|
||||
return {
|
||||
'platform': 'mqtt',
|
||||
'schema': 'json',
|
||||
'name': display_name,
|
||||
'unique_id': f'{base_name}_{id}',
|
||||
'command_topic': f'{base_name}/lights_{id}/command',
|
||||
'state_topic': f'{base_name}/lights_{id}/state',
|
||||
'color_mode': True,
|
||||
'brightness': True,
|
||||
#'device': {
|
||||
# 'manufacturer': 'bauer.tech',
|
||||
# 'model': "SK6812 LED strip",
|
||||
#},
|
||||
'effect': True,
|
||||
'effect_list': ['static', 'circular', 'wipeup', 'twocolor', 'twocolorrandom',
|
||||
"side_0.2", "side_0.5", "side_0.2_inc4", "side_0.5_inc4", "top_0.2"],
|
||||
'supported_color_modes': ['rgbw'],
|
||||
}
|
||||
|
||||
async def _send_autodiscovery_msg(self):
|
||||
topic = f"homeassistant/light/{self._discovery_spec['unique_id']}/config"
|
||||
await self._mqtt_client.publish(topic, json.dumps(self._discovery_spec).encode(), retain=True)
|
||||
|
||||
async def _notify_mqtt_state(self, state):
|
||||
state_payload = json.dumps(self._state)
|
||||
print("OUT ", state_payload)
|
||||
await self._mqtt_client.publish(self._discovery_spec['state_topic'], state_payload.encode())
|
||||
return
|
||||
|
||||
direct_ack = False
|
||||
if direct_ack == True:
|
||||
state_payload = json.dumps(state)
|
||||
else:
|
||||
s = deepcopy(self._state)
|
||||
if s['state'] == "OFF":
|
||||
state_payload = json.dumps({"state": "OFF"})
|
||||
else:
|
||||
s['color_mode'] = "rgbw"
|
||||
state_payload = json.dumps(s)
|
||||
|
||||
print("OUT ", state_payload)
|
||||
await self._mqtt_client.publish(self._discovery_spec['state_topic'], state_payload.encode())
|
||||
|
||||
|
||||
async def start_mqtt(music_mouse_protocol, server, username, password):
|
||||
reconnect_interval = 10 # [seconds]
|
||||
while True:
|
||||
try:
|
||||
async with asyncio_mqtt.Client(hostname=server, username=username, password=password) as client:
|
||||
shelve_light = ShelveLightMqtt(music_mouse_protocol, client)
|
||||
await shelve_light.init()
|
||||
async with client.filtered_messages("musicmouse_json/#") as messages:
|
||||
await client.subscribe("musicmouse_json/#")
|
||||
async for message in messages:
|
||||
await shelve_light.handle_light_message(message)
|
||||
except asyncio_mqtt.MqttError as error:
|
||||
print(f'Error "{error}". Reconnecting in {reconnect_interval} seconds')
|
||||
finally:
|
||||
await asyncio.sleep(reconnect_interval)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
class DummyProtocol:
|
||||
def shelve_led_effect(self, effect):
|
||||
print("EFF ", repr(effect))
|
||||
|
||||
password = ""
|
||||
asyncio.run(start_mqtt(DummyProtocol(), "homeassistant", "musicmouse", password))
|
|
@ -0,0 +1 @@
|
|||
**/*.mp3
|
|
@ -1,72 +1,5 @@
|
|||
import vlc
|
||||
|
||||
all_events = (
|
||||
vlc.EventType.MediaDiscovererEnded,
|
||||
vlc.EventType.MediaDiscovererStarted,
|
||||
vlc.EventType.MediaDurationChanged,
|
||||
vlc.EventType.MediaFreed,
|
||||
vlc.EventType.MediaListEndReached,
|
||||
vlc.EventType.MediaListItemAdded,
|
||||
vlc.EventType.MediaListItemDeleted,
|
||||
vlc.EventType.MediaListPlayerNextItemSet,
|
||||
vlc.EventType.MediaListPlayerPlayed,
|
||||
vlc.EventType.MediaListPlayerStopped,
|
||||
vlc.EventType.MediaListViewItemAdded,
|
||||
vlc.EventType.MediaListViewItemDeleted,
|
||||
vlc.EventType.MediaListViewWillAddItem,
|
||||
vlc.EventType.MediaListViewWillDeleteItem,
|
||||
vlc.EventType.MediaListWillAddItem,
|
||||
vlc.EventType.MediaListWillDeleteItem,
|
||||
vlc.EventType.MediaMetaChanged,
|
||||
vlc.EventType.MediaParsedChanged,
|
||||
vlc.EventType.MediaPlayerAudioDevice,
|
||||
vlc.EventType.MediaPlayerAudioVolume,
|
||||
vlc.EventType.MediaPlayerBackward,
|
||||
vlc.EventType.MediaPlayerBuffering,
|
||||
vlc.EventType.MediaPlayerChapterChanged,
|
||||
vlc.EventType.MediaPlayerCorked,
|
||||
vlc.EventType.MediaPlayerESAdded,
|
||||
vlc.EventType.MediaPlayerESDeleted,
|
||||
vlc.EventType.MediaPlayerESSelected,
|
||||
vlc.EventType.MediaPlayerEncounteredError,
|
||||
vlc.EventType.MediaPlayerEndReached,
|
||||
vlc.EventType.MediaPlayerForward,
|
||||
#vlc.EventType.MediaPlayerLengthChanged,
|
||||
vlc.EventType.MediaPlayerMediaChanged,
|
||||
vlc.EventType.MediaPlayerMuted,
|
||||
vlc.EventType.MediaPlayerNothingSpecial,
|
||||
vlc.EventType.MediaPlayerOpening,
|
||||
vlc.EventType.MediaPlayerPausableChanged,
|
||||
vlc.EventType.MediaPlayerPaused,
|
||||
vlc.EventType.MediaPlayerPlaying,
|
||||
#vlc.EventType.MediaPlayerPositionChanged,
|
||||
vlc.EventType.MediaPlayerScrambledChanged,
|
||||
vlc.EventType.MediaPlayerSeekableChanged,
|
||||
vlc.EventType.MediaPlayerSnapshotTaken,
|
||||
vlc.EventType.MediaPlayerStopped,
|
||||
#vlc.EventType.MediaPlayerTimeChanged,
|
||||
vlc.EventType.MediaPlayerTitleChanged,
|
||||
vlc.EventType.MediaPlayerUncorked,
|
||||
vlc.EventType.MediaPlayerUnmuted,
|
||||
vlc.EventType.MediaPlayerVout,
|
||||
vlc.EventType.MediaStateChanged,
|
||||
vlc.EventType.MediaSubItemAdded,
|
||||
vlc.EventType.MediaSubItemTreeAdded,
|
||||
vlc.EventType.RendererDiscovererItemAdded,
|
||||
vlc.EventType.RendererDiscovererItemDeleted,
|
||||
vlc.EventType.VlmMediaAdded,
|
||||
vlc.EventType.VlmMediaChanged,
|
||||
vlc.EventType.VlmMediaInstanceStarted,
|
||||
vlc.EventType.VlmMediaInstanceStatusEnd,
|
||||
vlc.EventType.VlmMediaInstanceStatusError,
|
||||
vlc.EventType.VlmMediaInstanceStatusInit,
|
||||
vlc.EventType.VlmMediaInstanceStatusOpening,
|
||||
vlc.EventType.VlmMediaInstanceStatusPause,
|
||||
vlc.EventType.VlmMediaInstanceStatusPlaying,
|
||||
vlc.EventType.VlmMediaInstanceStopped,
|
||||
vlc.EventType.VlmMediaRemoved,
|
||||
)
|
||||
|
||||
|
||||
class AudioPlayer:
|
||||
def __init__(self, alsa_device=None):
|
||||
|
@ -94,9 +27,9 @@ class AudioPlayer:
|
|||
|
||||
evm = result.event_manager()
|
||||
evm.event_attach(vlc.EventType.MediaListEndReached,
|
||||
lambda e: print("Ml CB", str(vlc.EventType(e.type))))
|
||||
lambda e: print("Ml CB", str(vlc.EventType(e.type))))
|
||||
evm.event_attach(vlc.EventType.MediaListItemAdded,
|
||||
lambda e: print("Ml ia CB", str(vlc.EventType(e.type))))
|
||||
lambda e: print("Ml ia CB", str(vlc.EventType(e.type))))
|
||||
|
||||
return result
|
||||
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
pyserial-asyncio==0.6
|
||||
python-vlc==3.0.12118
|
||||
hass-client==0.1.2
|
Binary file not shown.
|
@ -8,6 +8,7 @@ enum class EffectId
|
|||
RANDOM_TWO_COLOR_INTERPOLATION,
|
||||
SWIPE_AND_CHANGE, // combination of ALEXA_SWIPE and RANDOM_TWO_COLOR_INTERPOLATION
|
||||
REVERSE_SWIPE,
|
||||
STATIC_DETAILED,
|
||||
};
|
||||
|
||||
template <EffectId id>
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include "effects/Common.h"
|
||||
#include "helpers/ColorRGBW.h"
|
||||
|
||||
#pragma pack(push, 1)
|
||||
struct EffectStaticConfig
|
||||
{
|
||||
EffectStaticConfig(const ColorRGBW &c = ColorRGBW{0, 0, 0, 0}, uint16_t beg = 0, uint16_t en = 0)
|
||||
|
@ -12,12 +13,14 @@ struct EffectStaticConfig
|
|||
uint16_t begin = 0;
|
||||
uint16_t end = 0;
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
template <typename TLedStrip>
|
||||
class EffectStatic
|
||||
{
|
||||
public:
|
||||
static constexpr auto NUM_LEDS = numLeds<TLedStrip>();
|
||||
using ConfigType = EffectStaticConfig;
|
||||
|
||||
EffectStatic(const EffectStaticConfig &cfg, TLedStrip &ledStrip)
|
||||
: config_(cfg),
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
#pragma once
|
||||
|
||||
#include "effects/Common.h"
|
||||
#include "helpers/ColorRGBW.h"
|
||||
#include "helpers/ColorConversions.h"
|
||||
|
||||
|
||||
#pragma pack(push, 1)
|
||||
struct EffectStaticDetailedConfig
|
||||
{
|
||||
EffectStaticDetailedConfig(const ColorRGBW &c = ColorRGBW{0, 0, 0, 0}, uint16_t beg = 0, uint16_t en = 0)
|
||||
: color(c), begin(beg), end(en) {}
|
||||
|
||||
ColorRGBW color;
|
||||
uint16_t increment = 1;
|
||||
float begin = 0.0f;
|
||||
float end = 0.0f;
|
||||
float transition_time_in_ms = 0.0f;
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
template <typename TLedStrip>
|
||||
class EffectStaticDetailed
|
||||
{
|
||||
public:
|
||||
static constexpr auto NUM_LEDS = numLeds<TLedStrip>();
|
||||
static constexpr int DELAY_MS = 10;
|
||||
using ConfigType = EffectStaticDetailedConfig;
|
||||
|
||||
EffectStaticDetailed(const EffectStaticDetailedConfig &cfg, TLedStrip &ledStrip)
|
||||
: config_(cfg),
|
||||
ledStrip_(ledStrip)
|
||||
{
|
||||
for (int i = 0; i < NUM_LEDS; ++i)
|
||||
state_[i] = getLedRGBW(ledStrip_, i);
|
||||
|
||||
beginIdx_ = constrain(static_cast<int>(cfg.begin * NUM_LEDS + 0.5f), 0, NUM_LEDS - 1);
|
||||
endIdx_ = constrain(static_cast<int>(cfg.end * NUM_LEDS + 0.5f), 0, NUM_LEDS - 1);
|
||||
}
|
||||
|
||||
bool finished() const { return finished_; }
|
||||
|
||||
int operator()()
|
||||
{
|
||||
if (finished_)
|
||||
return 1000000;
|
||||
|
||||
const float progress = static_cast<float>(DELAY_MS * calls_) / config_.transition_time_in_ms;
|
||||
|
||||
// Finished case
|
||||
if(progress > 1.0) {
|
||||
finished_ = true;
|
||||
return 10000000;
|
||||
}
|
||||
|
||||
// In-progress case
|
||||
clear(ledStrip_);
|
||||
for(int i = beginIdx_; i != endIdx_; i += config_.increment) {
|
||||
ColorRGBW newColor = hsv2rgb(interpolate(rgb2hsv(state_[i]), rgb2hsv(config_.color), progress));
|
||||
newColor.w = config_.color.w * progress + state_[i].w * (1 - progress);
|
||||
setLedRGBW(ledStrip_, i, newColor);
|
||||
if(i >= NUM_LEDS)
|
||||
i = 0;
|
||||
}
|
||||
|
||||
++calls_;
|
||||
return DELAY_MS;
|
||||
}
|
||||
|
||||
private:
|
||||
EffectStaticDetailedConfig config_;
|
||||
TLedStrip &ledStrip_;
|
||||
ColorRGBW state_[NUM_LEDS];
|
||||
int beginIdx_;
|
||||
int endIdx_;
|
||||
int calls_ = 0;
|
||||
bool finished_ = false;
|
||||
};
|
||||
|
||||
// Traits
|
||||
template <>
|
||||
struct EffectIdToConfig<EffectId::STATIC_DETAILED>
|
||||
{
|
||||
using type = EffectStaticDetailedConfig;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct EffectConfigToId<EffectStaticDetailedConfig>
|
||||
{
|
||||
static constexpr auto id = EffectId::STATIC_DETAILED;
|
||||
};
|
||||
|
||||
template <typename TLedStrip>
|
||||
struct EffectIdToClass<EffectId::STATIC_DETAILED, TLedStrip>
|
||||
{
|
||||
using type = EffectStaticDetailed<TLedStrip>;
|
||||
};
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,13 @@
|
|||
# Put this into /etc/systemd/system/musicmouse.service
|
||||
[Unit]
|
||||
Description=Music Mouse RFID Music Player
|
||||
After=multi-user.target
|
||||
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Restart=always
|
||||
ExecStart=/opt/musicmouse/venv/bin/python /opt/musicmouse/espmusicmouse/host_driver/main.py /media/musicmouse/
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -104,14 +104,22 @@ enum class MessageHostToFw : uint8_t
|
|||
LED_WHEEL_EFFECT_RANDOM_TWO_COLOR_INTERPOLATION = 3,
|
||||
LED_WHEEL_EFFECT_SWIPE_AND_CHANGE = 4,
|
||||
LED_WHEEL_EFFECT_REVERSE_SWIPE = 5,
|
||||
|
||||
MOUSE_LED_EFFECT_STATIC = 6,
|
||||
MOUSE_LED_EFFECT_CIRCULAR = 7,
|
||||
MOUSE_LED_EFFECT_RANDOM_TWO_COLOR_INTERPOLATION = 8,
|
||||
MOUSE_LED_EFFECT_SWIPE_AND_CHANGE = 9,
|
||||
MOUSE_LED_EFFECT_REVERSE_SWIPE = 10,
|
||||
|
||||
PREV_BUTTON_LED = 20,
|
||||
NEXT_BUTTON_LED = 21,
|
||||
SHELF_LED_EFFECT_STATIC = 15,
|
||||
SHELF_LED_EFFECT_CIRCULAR = 16,
|
||||
SHELF_LED_EFFECT_RANDOM_TWO_COLOR_INTERPOLATION = 17,
|
||||
SHELF_LED_EFFECT_SWIPE_AND_CHANGE = 18,
|
||||
SHELF_LED_EFFECT_REVERSE_SWIPE = 19,
|
||||
SHELF_LED_EFFECT_STATIC_DETAILED = 20,
|
||||
|
||||
PREV_BUTTON_LED = 21,
|
||||
NEXT_BUTTON_LED = 22,
|
||||
};
|
||||
|
||||
template <>
|
||||
|
@ -158,8 +166,21 @@ void sendMessageToHost(const TMessage &msg)
|
|||
Serial.write((uint8_t *)&msg, sizeof(msg));
|
||||
}
|
||||
|
||||
template <typename LedTask1, typename LedTask2>
|
||||
inline void handleIncomingMessagesFromHost(LedTask1 *ledTaskCircle, LedTask2 *ledTaskMouse, uint8_t ledChannelLeft, uint8_t ledChannelRight)
|
||||
template <typename TEffectConfig, typename TLedTask>
|
||||
inline bool handleLedEffect(TLedTask *ledTask, MessageHostToFw msgType, MessageHostToFw incomingMsgType, uint8_t *msgBuffer)
|
||||
{
|
||||
if (msgType == incomingMsgType)
|
||||
{
|
||||
auto cfg = reinterpret_cast<TEffectConfig *>(msgBuffer);
|
||||
ledTask->startEffect(*cfg);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename LedTask1, typename LedTask2, typename LedTaskShelf>
|
||||
inline void handleIncomingMessagesFromHost(LedTask1 *ledTaskCircle, LedTask2 *ledTaskMouse, LedTaskShelf *ledTaskShelf, uint8_t ledChannelLeft, uint8_t ledChannelRight)
|
||||
{
|
||||
if (Serial.available() < sizeof(MAGIC_TOKEN_FW_TO_HOST) + sizeof(MessageHostToFw) + sizeof(uint16_t))
|
||||
return;
|
||||
|
@ -180,65 +201,36 @@ inline void handleIncomingMessagesFromHost(LedTask1 *ledTaskCircle, LedTask2 *le
|
|||
|
||||
static constexpr int maxIncomingBufferSize = 1024;
|
||||
static uint8_t msgBuffer[maxIncomingBufferSize];
|
||||
|
||||
if (msgSize < maxIncomingBufferSize)
|
||||
{
|
||||
Serial.readBytes(msgBuffer, msgSize);
|
||||
if (msgType == MessageHostToFw::LED_WHEEL_EFFECT_STATIC)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectStaticConfig *>(msgBuffer);
|
||||
ledTaskCircle->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::LED_WHEEL_EFFECT_ALEXA_SWIPE)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectAlexaSwipeConfig *>(msgBuffer);
|
||||
ledTaskCircle->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::LED_WHEEL_EFFECT_CIRCULAR)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectCircularConfig *>(msgBuffer);
|
||||
ledTaskCircle->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::LED_WHEEL_EFFECT_RANDOM_TWO_COLOR_INTERPOLATION)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectRandomTwoColorInterpolationConfig *>(msgBuffer);
|
||||
ledTaskCircle->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::LED_WHEEL_EFFECT_SWIPE_AND_CHANGE)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectSwipeAndChangeConfig *>(msgBuffer);
|
||||
ledTaskCircle->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::LED_WHEEL_EFFECT_REVERSE_SWIPE)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectReverseSwipeConfig *>(msgBuffer);
|
||||
ledTaskCircle->startEffect(*cfg);
|
||||
}
|
||||
//
|
||||
else if (msgType == MessageHostToFw::MOUSE_LED_EFFECT_STATIC)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectStaticConfig *>(msgBuffer);
|
||||
ledTaskMouse->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::MOUSE_LED_EFFECT_CIRCULAR)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectCircularConfig *>(msgBuffer);
|
||||
ledTaskMouse->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::MOUSE_LED_EFFECT_RANDOM_TWO_COLOR_INTERPOLATION)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectRandomTwoColorInterpolationConfig *>(msgBuffer);
|
||||
ledTaskMouse->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::MOUSE_LED_EFFECT_SWIPE_AND_CHANGE)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectSwipeAndChangeConfig *>(msgBuffer);
|
||||
ledTaskMouse->startEffect(*cfg);
|
||||
}
|
||||
else if (msgType == MessageHostToFw::MOUSE_LED_EFFECT_REVERSE_SWIPE)
|
||||
{
|
||||
auto cfg = reinterpret_cast<EffectReverseSwipeConfig *>(msgBuffer);
|
||||
ledTaskMouse->startEffect(*cfg);
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
// LED Circle
|
||||
if(handleLedEffect<EffectStaticConfig >(ledTaskCircle, MessageHostToFw::LED_WHEEL_EFFECT_STATIC, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectAlexaSwipeConfig >(ledTaskCircle, MessageHostToFw::LED_WHEEL_EFFECT_ALEXA_SWIPE, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectCircularConfig >(ledTaskCircle, MessageHostToFw::LED_WHEEL_EFFECT_CIRCULAR, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectRandomTwoColorInterpolationConfig>(ledTaskCircle, MessageHostToFw::LED_WHEEL_EFFECT_RANDOM_TWO_COLOR_INTERPOLATION, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectSwipeAndChangeConfig >(ledTaskCircle, MessageHostToFw::LED_WHEEL_EFFECT_SWIPE_AND_CHANGE, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectReverseSwipeConfig >(ledTaskCircle, MessageHostToFw::LED_WHEEL_EFFECT_REVERSE_SWIPE, msgType, msgBuffer)) {}
|
||||
|
||||
// Mouse LEDs
|
||||
else if(handleLedEffect<EffectStaticConfig >(ledTaskMouse, MessageHostToFw::MOUSE_LED_EFFECT_STATIC, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectCircularConfig >(ledTaskMouse, MessageHostToFw::MOUSE_LED_EFFECT_CIRCULAR, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectRandomTwoColorInterpolationConfig>(ledTaskMouse, MessageHostToFw::MOUSE_LED_EFFECT_RANDOM_TWO_COLOR_INTERPOLATION, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectSwipeAndChangeConfig >(ledTaskMouse, MessageHostToFw::MOUSE_LED_EFFECT_SWIPE_AND_CHANGE, msgType, msgBuffer)) {}
|
||||
else if(handleLedEffect<EffectReverseSwipeConfig >(ledTaskMouse, MessageHostToFw::MOUSE_LED_EFFECT_REVERSE_SWIPE, msgType, msgBuffer)) {}
|
||||
|
||||
// Shelf LEDs
|
||||
else if (handleLedEffect<EffectStaticConfig >(ledTaskShelf, MessageHostToFw::SHELF_LED_EFFECT_STATIC, msgType, msgBuffer)) {}
|
||||
else if (handleLedEffect<EffectCircularConfig >(ledTaskShelf, MessageHostToFw::SHELF_LED_EFFECT_CIRCULAR, msgType, msgBuffer)) {}
|
||||
else if (handleLedEffect<EffectRandomTwoColorInterpolationConfig>(ledTaskShelf, MessageHostToFw::SHELF_LED_EFFECT_RANDOM_TWO_COLOR_INTERPOLATION, msgType, msgBuffer)) {}
|
||||
else if (handleLedEffect<EffectSwipeAndChangeConfig >(ledTaskShelf, MessageHostToFw::SHELF_LED_EFFECT_SWIPE_AND_CHANGE, msgType, msgBuffer)) {}
|
||||
else if (handleLedEffect<EffectReverseSwipeConfig >(ledTaskShelf, MessageHostToFw::SHELF_LED_EFFECT_REVERSE_SWIPE, msgType, msgBuffer)) {}
|
||||
else if (handleLedEffect<EffectStaticDetailedConfig >(ledTaskShelf, MessageHostToFw::SHELF_LED_EFFECT_STATIC_DETAILED, msgType, msgBuffer)) {}
|
||||
// clang-format on
|
||||
|
||||
else if (msgType == MessageHostToFw::PREV_BUTTON_LED)
|
||||
{
|
||||
float *val = reinterpret_cast<float *>(msgBuffer);
|
||||
|
|
|
@ -77,6 +77,7 @@ void _led_task_func(void *params)
|
|||
// clang-format off
|
||||
if (dispatchEffectId<EffectId::CIRCULAR >(id, effectFunction, ledStrip, msgBuffer, effectStorage)) {}
|
||||
else if (dispatchEffectId<EffectId::STATIC >(id, effectFunction, ledStrip, msgBuffer, effectStorage)) {}
|
||||
else if (dispatchEffectId<EffectId::STATIC_DETAILED >(id, effectFunction, ledStrip, msgBuffer, effectStorage)) {}
|
||||
else if (dispatchEffectId<EffectId::ALEXA_SWIPE >(id, effectFunction, ledStrip, msgBuffer, effectStorage)) {}
|
||||
else if (dispatchEffectId<EffectId::RANDOM_TWO_COLOR_INTERPOLATION>(id, effectFunction, ledStrip, msgBuffer, effectStorage)) {}
|
||||
else if (dispatchEffectId<EffectId::SWIPE_AND_CHANGE >(id, effectFunction, ledStrip, msgBuffer, effectStorage)) {}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "drivers/Esp32DriverRGBW.h"
|
||||
#include "effects/Circular.h"
|
||||
#include "effects/Static.h"
|
||||
#include "effects/StaticDetailed.h"
|
||||
#include "effects/AlexaSwipe.h"
|
||||
#include "effects/RandomTwoColorInterpolation.h"
|
||||
|
||||
|
@ -158,7 +159,7 @@ void setupMouseLeds()
|
|||
|
||||
// -------------------------------------------------- Shelf Leds -------------------------------------------
|
||||
|
||||
LedStripRGBW<250> ledStripShelf;
|
||||
LedStripRGBW<252> ledStripShelf;
|
||||
Esp32DriverRGBW ledDriverShelf;
|
||||
LedTask<decltype(ledStripShelf)> ledTaskShelf;
|
||||
|
||||
|
@ -166,7 +167,7 @@ void setupShelfLeds()
|
|||
{
|
||||
ledDriverShelf.begin(17, 2);
|
||||
ledTaskShelf.begin(ledStripShelf, ledDriverShelf);
|
||||
ledTaskShelf.startEffect(EffectStaticConfig{ColorRGBW{0, 0, 30}, 0, 0});
|
||||
ledTaskShelf.startEffect(EffectStaticConfig{ColorRGBW{0, 0, 0, 0}, 0, 0});
|
||||
}
|
||||
|
||||
// -------------------------------------------------- Touch Buttons ----------------------------------------
|
||||
|
@ -233,7 +234,7 @@ void setup()
|
|||
|
||||
void loop()
|
||||
{
|
||||
handleIncomingMessagesFromHost(&ledTaskCircle, &ledTaskMouse, 0, 1);
|
||||
handleIncomingMessagesFromHost(&ledTaskCircle, &ledTaskMouse, &ledTaskShelf, 0, 1);
|
||||
handleTouchInputs();
|
||||
handleRotaryEncoder();
|
||||
handleButtons();
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
|
||||
- button hintergrund beleuchtung [ok]
|
||||
|
||||
- playlisten
|
||||
- runterladen
|
||||
|
||||
- befestigung im regal
|
||||
- winkel
|
||||
- mehrfachsteckdose
|
||||
- lan kabel
|
||||
|
||||
- effekt kanal fuer audioeffekte
|
||||
- "boing" etc runterladen
|
||||
|
||||
- Fernbedienung wenn empfaenger da
|
||||
- HA regeln fuer standard
|
||||
|
||||
|
||||
- ansible cleanup
|
||||
- lirc
|
||||
- musicmouse kanal
|
||||
- musicmouse effect kanal
|
||||
|
||||
- home assistant anbindung
|
||||
- events an HA (figur, button press, ...)
|
||||
- mouse & ring leds von HA
|
||||
- HA device control (led fluter, rollos)
|
||||
- regal licht von HA aus
|
||||
|
||||
- Regal LEDs
|
||||
- kabel von musikmaus
|
||||
- Leisten zuschneiden
|
||||
- kabel auf richtige laenge zuschneiden
|
||||
- Kabel loeten
|
||||
- im Arbeitszimmer testen
|
||||
- Bonus: Ecken drucken
|
||||
|
||||
- Effekte Regal LEDs
|
||||
|
||||
- Musik-abhaengige Effekte
|
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
musicmouse.FCStd
BIN
musicmouse.FCStd
Binary file not shown.
Binary file not shown.
|
@ -1,6 +0,0 @@
|
|||
cmake_minimum_required(VERSION 3.16)
|
||||
|
||||
project("pyaudioplayeralsa")
|
||||
|
||||
add_executable(play main.cpp src/WavFile.cpp)
|
||||
target_link_libraries(play -lasound -pthread)
|
|
@ -1,177 +0,0 @@
|
|||
/*
|
||||
* Simple sound playback using ALSA API and libasound.
|
||||
*
|
||||
* Compile:
|
||||
* $ cc -o play sound_playback.c -lasound
|
||||
*
|
||||
* Usage:
|
||||
* $ ./play <sample_rate> <channels> <seconds> < <file>
|
||||
*
|
||||
* Examples:
|
||||
* $ ./play 44100 2 5 < /dev/urandom
|
||||
* $ ./play 22050 1 8 < /path/to/file.wav
|
||||
*
|
||||
* Copyright (C) 2009 Alessandro Ghedini <al3xbio@gmail.com>
|
||||
* --------------------------------------------------------------
|
||||
* "THE BEER-WARE LICENSE" (Revision 42):
|
||||
* Alessandro Ghedini wrote this file. As long as you retain this
|
||||
* notice you can do whatever you want with this stuff. If we
|
||||
* meet some day, and you think this stuff is worth it, you can
|
||||
* buy me a beer in return.
|
||||
* --------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include <alsa/asoundlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <fstream>
|
||||
|
||||
#include "src/WavFile.h"
|
||||
|
||||
#include <future>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include <queue>
|
||||
|
||||
#define PCM_DEVICE "default"
|
||||
|
||||
std::deque<std::string> queue;
|
||||
std::mutex queueMutex;
|
||||
|
||||
static std::string getInput()
|
||||
{
|
||||
|
||||
std::cout << "starting input thread" << std::endl;
|
||||
while (true)
|
||||
{
|
||||
std::string result;
|
||||
std::getline(std::cin, result);
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(queueMutex);
|
||||
std::cout << "adding to queue" << std::endl;
|
||||
queue.push_back(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
std::ifstream wavFileStream("test.wav", std::ios::binary);
|
||||
WavFile wav(wavFileStream);
|
||||
wavFileStream.close();
|
||||
std::cout << "Wav samples " << wav.size() << std::endl;
|
||||
|
||||
unsigned int pcm, tmp, dir;
|
||||
snd_pcm_t *pcm_handle;
|
||||
snd_pcm_hw_params_t *params;
|
||||
snd_pcm_uframes_t frames;
|
||||
char *buff;
|
||||
int buff_size, loops;
|
||||
|
||||
/* Open the PCM device in playback mode */
|
||||
if (pcm = snd_pcm_open(&pcm_handle, PCM_DEVICE,
|
||||
SND_PCM_STREAM_PLAYBACK, 0) < 0)
|
||||
printf("ERROR: Can't open \"%s\" PCM device. %s\n",
|
||||
PCM_DEVICE, snd_strerror(pcm));
|
||||
|
||||
/* Allocate parameters object and fill it with default values*/
|
||||
snd_pcm_hw_params_alloca(¶ms);
|
||||
|
||||
snd_pcm_hw_params_any(pcm_handle, params);
|
||||
|
||||
/* Set parameters */
|
||||
if (pcm = snd_pcm_hw_params_set_access(pcm_handle, params,
|
||||
SND_PCM_ACCESS_RW_INTERLEAVED) < 0)
|
||||
printf("ERROR: Can't set interleaved mode. %s\n", snd_strerror(pcm));
|
||||
|
||||
if (pcm = snd_pcm_hw_params_set_format(pcm_handle, params,
|
||||
SND_PCM_FORMAT_S16_LE) < 0)
|
||||
printf("ERROR: Can't set format. %s\n", snd_strerror(pcm));
|
||||
|
||||
if (pcm = snd_pcm_hw_params_set_channels(pcm_handle, params, wav.channels()) < 0)
|
||||
printf("ERROR: Can't set channels number. %s\n", snd_strerror(pcm));
|
||||
|
||||
snd_pcm_hw_params_set_buffer_size(pcm_handle, params, 2 * 2048);
|
||||
|
||||
unsigned int rate = wav.sampleRate();
|
||||
if (pcm = snd_pcm_hw_params_set_rate_near(pcm_handle, params, &rate, 0) < 0)
|
||||
printf("ERROR: Can't set rate. %s\n", snd_strerror(pcm));
|
||||
|
||||
/* Write parameters */
|
||||
if (pcm = snd_pcm_hw_params(pcm_handle, params) < 0)
|
||||
printf("ERROR: Can't set harware parameters. %s\n", snd_strerror(pcm));
|
||||
|
||||
/* Resume information */
|
||||
printf("PCM name: '%s'\n", snd_pcm_name(pcm_handle));
|
||||
|
||||
printf("PCM state: %s\n", snd_pcm_state_name(snd_pcm_state(pcm_handle)));
|
||||
|
||||
snd_pcm_hw_params_get_channels(params, &tmp);
|
||||
|
||||
printf("channels: %i ", tmp);
|
||||
|
||||
if (tmp == 1)
|
||||
printf("(mono)\n");
|
||||
else if (tmp == 2)
|
||||
printf("(stereo)\n");
|
||||
|
||||
snd_pcm_hw_params_get_rate(params, &tmp, 0);
|
||||
printf("rate: %d bps\n", tmp);
|
||||
|
||||
/* Allocate buffer to hold single period */
|
||||
snd_pcm_hw_params_get_period_size(params, &frames, 0);
|
||||
|
||||
buff_size = frames * wav.channels() * 2 /* 2 -> sample size */;
|
||||
buff = (char *)malloc(buff_size);
|
||||
std::cout << "Buffer size " << buff_size << " frames " << frames << std::endl;
|
||||
|
||||
snd_pcm_hw_params_get_period_time(params, &tmp, NULL);
|
||||
|
||||
std::thread inputThread(getInput);
|
||||
bool playing = true;
|
||||
|
||||
size_t wavFilePosition = 0;
|
||||
while (wavFilePosition < wav.size())
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(queueMutex);
|
||||
if (queue.size() > 0)
|
||||
{
|
||||
queue.pop_back();
|
||||
std::cout << "Toggling" << std::endl;
|
||||
//snd_pcm_drain(pcm_handle);
|
||||
std::cout << "Done" << std::endl;
|
||||
playing = !playing;
|
||||
if (playing)
|
||||
{
|
||||
std::cout << "continuing at " << wavFilePosition << std::endl;
|
||||
snd_pcm_pause(pcm_handle, 0);
|
||||
//snd_pcm_prepare(pcm_handle);
|
||||
}
|
||||
else
|
||||
snd_pcm_pause(pcm_handle, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!playing)
|
||||
continue;
|
||||
size_t dataToWrite = std::min(size_t(buff_size), wav.size() - wavFilePosition);
|
||||
int framesToWrite = dataToWrite / wav.channels() / 2;
|
||||
if (pcm = snd_pcm_writei(pcm_handle, wav[wavFilePosition], framesToWrite) == -EPIPE)
|
||||
{
|
||||
std::cout << "XRUN at wav position " << wavFilePosition << std::endl;
|
||||
snd_pcm_prepare(pcm_handle);
|
||||
}
|
||||
else if (pcm < 0)
|
||||
printf("ERROR. Can't write to PCM device. %s\n", snd_strerror(pcm));
|
||||
|
||||
wavFilePosition += dataToWrite;
|
||||
}
|
||||
|
||||
std::cout << "done filling buffer" << std::endl;
|
||||
snd_pcm_drain(pcm_handle);
|
||||
snd_pcm_close(pcm_handle);
|
||||
free(buff);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
- play wav file [ok]
|
||||
- read & parse wav file [ok]
|
||||
- stop wave file in the middle, wait 2 secs and continue
|
||||
- fade in/out
|
||||
- mix second wave file on top (some effect)
|
|
@ -1,61 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
||||
template <size_t len>
|
||||
std::string readStr(std::istream &str)
|
||||
{
|
||||
char buff[len];
|
||||
str.read(buff, len);
|
||||
return std::string(buff, len);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T read(std::istream &str)
|
||||
{
|
||||
T res;
|
||||
str.read((char *)&res, sizeof(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
class WavFile
|
||||
{
|
||||
public:
|
||||
WavFile(std::istream &str)
|
||||
{
|
||||
auto chunkId = readStr<4>(str);
|
||||
auto chunkSize = read<uint32_t>(str);
|
||||
auto format = readStr<4>(str);
|
||||
|
||||
auto subchunk1Id = readStr<4>(str);
|
||||
auto subchunk1Size = read<uint32_t>(str);
|
||||
|
||||
auto audioFormat = read<uint16_t>(str);
|
||||
numChannels_ = read<uint16_t>(str);
|
||||
sampleRate_ = read<uint32_t>(str);
|
||||
auto byteRate = read<uint32_t>(str);
|
||||
|
||||
auto blockAlign = read<uint16_t>(str);
|
||||
bitsPerSample_ = read<uint16_t>(str);
|
||||
|
||||
auto subchunk2Id = readStr<4>(str);
|
||||
dataSize_ = read<uint32_t>(str);
|
||||
|
||||
data_ = std::unique_ptr<char>(new char[dataSize_]);
|
||||
str.read(data_.get(), dataSize_);
|
||||
}
|
||||
|
||||
uint32_t sampleRate() const { return sampleRate_; }
|
||||
uint16_t channels() const { return numChannels_; }
|
||||
uint32_t size() const { return dataSize_; }
|
||||
|
||||
const char *operator[](int offset) const { return &data_.get()[offset]; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<char> data_;
|
||||
uint32_t sampleRate_;
|
||||
uint16_t bitsPerSample_;
|
||||
uint32_t dataSize_;
|
||||
uint16_t numChannels_;
|
||||
};
|
Loading…
Reference in New Issue