conformal_poly/
lib.rs

1#![doc = include_str!("../docs_boilerplate.md")]
2#![doc = include_str!("../README.md")]
3
4use self::state::State;
5use conformal_component::{
6    ProcessingEnvironment,
7    audio::{BufferMut, channels_mut},
8    events::{Data, Event as CEvent, NoteData},
9    parameters,
10};
11
12fn add_in_place(x: &[f32], y: &mut [f32]) {
13    for (x, y) in x.iter().zip(y.iter_mut()) {
14        *y += *x;
15    }
16}
17
18fn mul_constant_in_place(x: f32, y: &mut [f32]) {
19    for y in y.iter_mut() {
20        *y *= x;
21    }
22}
23
24/// The data associated with an event.
25#[derive(Debug, Clone, Copy, PartialEq)]
26pub enum EventData {
27    /// This event is sent when a note is started.
28    NoteOn {
29        /// The data associated with the note.
30        data: NoteData,
31    },
32    /// This event is sent when a note is stopped.
33    NoteOff {
34        /// The data associated with the note.
35        data: NoteData,
36    },
37}
38
39/// An event sent to a voice at a particular time.
40#[derive(Debug, Clone, Copy, PartialEq)]
41pub struct Event {
42    /// The offset relative to the start of the buffer in samples when the event occurred.
43    pub sample_offset: usize,
44    /// The data associated with the event.
45    pub data: EventData,
46}
47
48/// The current state of all note expression controllers for a voice.
49///
50/// See the documentation for [`conformal_component::events::NoteExpression`] for
51/// more information.
52#[derive(Debug, Clone, Copy, PartialEq, Default)]
53pub struct NoteExpressionState {
54    /// The current value of the pitch bend for this voice in semitones away from the root note.
55    pub pitch_bend: f32,
56
57    /// The current value of the "timbre" controller for this voice.
58    ///
59    /// On many controllers, this represents the vertical or "y" position.
60    /// This is referred to as "slide" in Ableton Live.
61    ///
62    /// This value varies from 0 to 1, with 0 being neutral.
63    pub timbre: f32,
64
65    /// The current value of the aftertouch controller for this voice.
66    ///
67    /// This value varies from 0 to 1, with 0 being neutral.
68    pub aftertouch: f32,
69}
70
71/// A single point in a note expression curve.
72#[derive(Debug, Clone, PartialEq)]
73pub struct NoteExpressionPoint {
74    /// The time, expressed as samples relative to the start of the buffer.
75    pub sample_offset: usize,
76
77    /// The current value of the expression controllers for a voice.
78    pub state: NoteExpressionState,
79}
80
81/// A note expression curve is a series of [`NoteExpressionPoint`]s over a buffer.
82///
83/// Note that the following invariants will hold:
84///
85///   - The number of points is at least 1
86///   - The points are sorted by time
87///   - The time of the first point is 0
88///
89/// Between points, the value the expression is constant - this makes it
90/// different from [`conformal_component::parameters::PiecewiseLinearCurve`],
91/// where the value is linearly interpolated between points.
92#[derive(Debug, Clone)]
93pub struct NoteExpressionCurve<I> {
94    points: I,
95}
96
97impl<I: Iterator<Item = NoteExpressionPoint>> IntoIterator for NoteExpressionCurve<I> {
98    type Item = NoteExpressionPoint;
99    type IntoIter = I;
100
101    fn into_iter(self) -> Self::IntoIter {
102        self.points
103    }
104}
105
106impl<I: Iterator<Item = NoteExpressionPoint> + Clone> NoteExpressionCurve<I> {
107    /// Creates an iterator that yields the note expression state for each sample
108    #[allow(clippy::missing_panics_doc)]
109    pub fn iter_by_sample(self) -> impl Iterator<Item = NoteExpressionState> + Clone {
110        let mut iter = self.points.peekable();
111        let mut last_state = None;
112        (0..).map(move |sample_index| {
113            while let Some(point) = iter.peek() {
114                if point.sample_offset > sample_index {
115                    break;
116                }
117                last_state = Some(point.state);
118                iter.next();
119            }
120            // Note that this will never panic, since the curve is guaranteed to have a point at time 0
121            last_state.unwrap()
122        })
123    }
124}
125
126/// Return a note expression curve that is constant, with all expressions set to zero.
127#[must_use]
128#[allow(clippy::missing_panics_doc)]
129pub fn default_note_expression_curve()
130-> NoteExpressionCurve<impl Iterator<Item = NoteExpressionPoint> + Clone> {
131    NoteExpressionCurve::new(std::iter::once(NoteExpressionPoint {
132        sample_offset: 0,
133        state: Default::default(),
134    }))
135    .unwrap()
136}
137
138impl<I: Iterator<Item = NoteExpressionPoint> + Clone> NoteExpressionCurve<I> {
139    /// Creates a new note expression curve from an iterator of points.
140    ///
141    /// Returns `None` if the curve does not satisfy the invariants described
142    /// in the documentation for [`NoteExpressionCurve`].
143    pub fn new(points: I) -> Option<Self> {
144        let points_iter = points.clone().peekable();
145        let mut contains_zero = false;
146        let mut last_time = None;
147        // Check invariants
148        for point in points_iter {
149            if !contains_zero {
150                if point.sample_offset != 0 {
151                    return None;
152                }
153                contains_zero = true;
154            } else if let Some(last_time) = last_time
155                && point.sample_offset < last_time
156            {
157                return None;
158            }
159            last_time = Some(point.sample_offset);
160        }
161        Some(Self { points })
162    }
163}
164
165// Optimization opportunity - allow `Voice` to indicate that not all output
166// was filled. This will let us skip rendering until a voice is playing
167// and also skip mixing silence.
168
169/// A single voice in a polyphonic synth.
170pub trait Voice {
171    /// Data that is shared across all voices. This could include things like
172    /// low frequency oscillators that are used by multiple voices.
173    type SharedData<'a>: Clone;
174
175    /// Creates a new voice.
176    fn new(max_samples_per_process_call: usize, sampling_rate: f32) -> Self;
177
178    /// Handles a single event outside of audio processing.
179    ///
180    /// Note that events sent during a [`process`](`Voice::process`) call must be handled there.
181    fn handle_event(&mut self, event: &EventData);
182
183    /// Renders audio for this voice.
184    ///
185    /// Audio for the voice will be written into the `output` buffer, which will
186    /// start out filled with silence.
187    fn process(
188        &mut self,
189        events: impl IntoIterator<Item = Event>,
190        params: &impl parameters::BufferStates,
191        note_expressions: NoteExpressionCurve<impl Iterator<Item = NoteExpressionPoint> + Clone>,
192        data: Self::SharedData<'_>,
193        output: &mut [f32],
194    );
195
196    /// Returns whether this voice is currently outputng audio.
197    ///
198    /// When this returns `true`, [`process`](`Voice::process`) will not be called for this
199    /// voice again until a new note is started. This can improve performance by
200    /// allowing voices to skip processing.
201    #[must_use]
202    fn quiescent(&self) -> bool;
203
204    /// Called in lieu of [`process`](`Voice::process`) when the voice is quiescent.
205    ///
206    /// Voices can use this call to update internal state such as oscillator
207    /// phase, to simulate the effect we'd get if we had processed `num_samples`
208    /// of audio.
209    fn skip_samples(&mut self, _num_samples: usize) {}
210
211    /// Resets the voice to its initial state.
212    fn reset(&mut self);
213}
214
215/// A helper struct for implementing polyphonic synths.
216///
217/// This struct handles common tasks such as routing events to voices, updating note expression curves,
218/// and mixing the output of voices.
219///
220/// To use it, you must implement the [`Voice`] trait for your synth. Then, use the methods
221/// on this struct to implement the required [`conformal_component::synth::Synth`] trait methods.
222pub struct Poly<V> {
223    voices: Vec<V>,
224    state: State,
225    voice_scratch_buffer: Vec<f32>,
226}
227
228impl<V: std::fmt::Debug> std::fmt::Debug for Poly<V> {
229    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
230        f.debug_struct("Poly")
231            .field("voices", &self.voices)
232            .field("state", &self.state)
233            .finish_non_exhaustive()
234    }
235}
236
237mod state;
238
239impl<V: Voice> Poly<V> {
240    /// Creates a new [`Poly`] struct.
241    #[must_use]
242    pub fn new(environment: &ProcessingEnvironment, max_voices: usize) -> Self {
243        let voices = std::iter::repeat_with(|| {
244            V::new(
245                environment.max_samples_per_process_call,
246                environment.sampling_rate,
247            )
248        })
249        .take(max_voices)
250        .collect();
251        let state = State::new(max_voices);
252
253        Self {
254            voices,
255            state,
256            voice_scratch_buffer: vec![0f32; environment.max_samples_per_process_call],
257        }
258    }
259
260    /// Handles a set of events without rendering audio.
261    ///
262    /// This can be used to implement [`conformal_component::synth::Synth::handle_events`].
263    pub fn handle_events(&mut self, events: impl IntoIterator<Item = Data> + Clone) {
264        for (v, ev) in self
265            .state
266            .clone()
267            .dispatch_events(events.clone().into_iter().map(|data| CEvent {
268                sample_offset: 0,
269                data,
270            }))
271        {
272            self.voices[v].handle_event(&ev.data);
273        }
274
275        self.state.update(events.into_iter().map(|data| CEvent {
276            sample_offset: 0,
277            data,
278        }));
279    }
280
281    /// Renders the audio for the synth.
282    ///
283    /// This can be used to implement [`conformal_component::synth::Synth::process`].
284    /// For any voices with active notes, [`Voice::process`] will be called.
285    pub fn process(
286        &mut self,
287        events: impl Iterator<Item = CEvent> + Clone,
288        params: &impl parameters::BufferStates,
289        shared_data: &V::SharedData<'_>,
290        output: &mut impl BufferMut,
291    ) {
292        let buffer_size = output.num_frames();
293        #[allow(clippy::cast_precision_loss)]
294        let voice_scale = 1f32 / self.voices.len() as f32;
295        let mut cleared = false;
296        for (index, voice) in self.voices.iter_mut().enumerate() {
297            let voice_events = || {
298                self.state
299                    .clone()
300                    .dispatch_events(events.clone())
301                    .into_iter()
302                    .filter_map(|(i, event)| if i == index { Some(event) } else { None })
303            };
304            if voice_events().next().is_none() && voice.quiescent() {
305                voice.skip_samples(buffer_size);
306                continue;
307            }
308            voice.process(
309                voice_events(),
310                params,
311                self.state
312                    .clone()
313                    .note_expressions_for_voice(index, events.clone()),
314                shared_data.clone(),
315                &mut self.voice_scratch_buffer[0..output.num_frames()],
316            );
317            mul_constant_in_place(voice_scale, &mut self.voice_scratch_buffer);
318            if cleared {
319                for channel_mut in channels_mut(output) {
320                    add_in_place(&self.voice_scratch_buffer[0..buffer_size], channel_mut);
321                }
322            } else {
323                for channel_mut in channels_mut(output) {
324                    channel_mut.copy_from_slice(&self.voice_scratch_buffer[0..buffer_size]);
325                }
326                cleared = true;
327            }
328        }
329        if !cleared {
330            for channel_mut in channels_mut(output) {
331                channel_mut.fill(0f32);
332            }
333        }
334        self.state.update(events);
335    }
336
337    /// Resets the state of the polyphonic synth.
338    ///
339    /// This can be used to implement [`conformal_component::Processor::set_processing`].
340    pub fn reset(&mut self) {
341        for voice in &mut self.voices {
342            voice.reset();
343        }
344        self.state.reset();
345    }
346}