This repository has been archived by the owner on Dec 20, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path06_trigger.cpp
184 lines (150 loc) · 5.69 KB
/
06_trigger.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
#include "al/core/app/al_App.hpp"
#include "al/core/graphics/al_Shapes.hpp"
#include "al/core/math/al_Random.hpp"
#include "al/util/ui/al_Parameter.hpp"
#include "al/util/ui/al_PresetSequencer.hpp"
#include "al/util/ui/al_ControlGUI.hpp"
#include "al/util/scene/al_SynthSequencer.hpp"
#include "Gamma/Oscillator.h"
#include "Gamma/Envelope.h"
#include "Gamma/Domain.h"
using namespace al;
/*
* This tutorial shows how to define and act on events' triggering.
*
* When building a synthesizer you may want specific actions to be taken when
* an event is triggered and when an event is terminated. Note that the
* actual duration of the event may be longer than the difference between
* trigger and termination, to account for release envelopes for example.
*/
/*
* Start by defining the behavior of a voice
*/
class MyVoice : public SynthVoice {
public:
MyVoice() {
addCone(mesh); // Prepare mesh to draw a cone
mesh.primitive(Mesh::LINE_STRIP);
mEnvelope.lengths(0.1f, 0.5f);
mEnvelope.levels(0, 1, 0);
mEnvelope.sustainPoint(1);
}
virtual void onProcess(AudioIOData &io) override {
while(io()) {
// We multiply the envelope by the generator
io.out(0) += mEnvelope() * mSource() * 0.05; // Output on the first channel scaled by 0.05;
}
// it's very important to mark a voice as done to allow the polysynth
// to reuse it. If you don't do this, each new voice will allocate new
// memory and you will fill up your system's memory after a while!
if (mEnvelope.done()) {
free();
}
}
virtual void onProcess(Graphics &g) {
g.pushMatrix();
// You can get a parameter's value using the get() member function
g.translate(mX, mY, 0);
// We can use the audio envelope to affect the graphics.
// We will use the value() function to get its current value without
// making it "tick" i.e. calculate the next value
g.scale(mSize * mEnvelope.value());
g.draw(mesh); // Draw the mesh
g.popMatrix();
}
void set(float x, float y, float size, float frequency, float attackTime, float releaseTime) {
mX = x;
mY = y;
mSize = size;
mSource.freq(frequency);
mEnvelope.lengths()[0] = attackTime;
mEnvelope.lengths()[1] = releaseTime;
}
/* If we depend on triggering envelopes we need to define on TriggerOn()
*/
virtual void onTriggerOn() override {
// We want to reset the envelope:
mEnvelope.reset();
}
/* If we rely on triggering a release portion of an envelope when triggered
* off we need to override the onTriggerOff() function.
* You can override one, both or none as needed.
*/
virtual void onTriggerOff() override {
// We want to force the envelope to release:
mEnvelope.release();
}
private:
gam::Sine<> mSource; // Sine wave oscillator source
gam::AD<> mEnvelope;
Mesh mesh; // The mesh now belongs to the voice
float mX {0}, mY {0}, mSize {1.0}; // This are the internal parameters
};
class MyApp : public App
{
public:
virtual void onCreate() override {
nav().pos(Vec3d(0,0,8)); // Set the camera to view the scene
gui << X << Y << Size << AttackTime << ReleaseTime; // Register the parameters with the GUI
gui.init(); // Initialize GUI. Don't forget this!
navControl().active(false); // Disable nav control (because we are using the control to drive the synth
}
virtual void onDraw(Graphics &g) override
{
g.clear();
mPolySynth.render(g); // Call render for PolySynth to generate its output
// Draw th GUI
gui.draw(g);
}
virtual void onSound(AudioIOData &io) override {
// You must also call render() for audio if you want to hear the voice's
// audio output
mPolySynth.render(io);
}
virtual void onKeyDown(const Keyboard& k) override
{
/*
* First we need to get a free voice from the PolySynth.
* If there are no free voices, one is allocated.
*/
MyVoice *voice = mPolySynth.getVoice<MyVoice>();
/*
* Next, we configure the voice. Notice, we capture the current
* values of the X,Y and Size parameters and then these become
* "frozen" as the internal parameters for the voice.
* For the frequency of the oscillator, we use the key pressed
*/
int midiNote = asciiToMIDI(k.key());
float freq = 440.0f * powf(2, (midiNote - 69)/12.0f);
voice->set(X.get(), Y.get(), Size.get(), freq, AttackTime.get(), ReleaseTime.get());
/*
* After the voice is configured, it needs to be triggered in the
* PolySynth. The second parameter is a time offset and the third
* parameter is the note id. We need to pass this value to be able to
* turn it off later.
*/
mPolySynth.triggerOn(voice, 0, midiNote);
}
virtual void onKeyUp(const Keyboard &k) override {
int midiNote = asciiToMIDI(k.key());
mPolySynth.triggerOff(midiNote);
}
private:
Light light;
Parameter X {"X", "Position", 0.0, "", -1.0f, 1.0f};
Parameter Y {"Y", "Position", 0.0, "", -1.0f, 1.0f};
Parameter Size {"Scale", "Size", 1.0, "", 0.1f, 3.0f};
Parameter AttackTime {"AttackTime", "Sound", 0.1, "", 0.001f, 2.0f};
Parameter ReleaseTime {"ReleaseTime", "Sound", 1.0, "", 0.001f, 5.0f};
ControlGUI gui;
PolySynth mPolySynth;
};
int main(int argc, char *argv[])
{
MyApp app;
app.dimensions(800, 600);
app.initAudio(44100, 256, 2, 0);
gam::sampleRate(44100);
app.start();
return 0;
}