From 0f90d697616a4de829e8a1ce026bce90d8ab3328 Mon Sep 17 00:00:00 2001 From: Alexandre DuBreuil Date: Fri, 11 Oct 2019 19:05:52 +0200 Subject: [PATCH] Cleanup chapter 4 and split with commons --- Chapter04/README.md | 53 +- Chapter04/__init__.py | 0 Chapter04/all.py | 190 ---- Chapter04/audio_to_drums.py | 822 ------------------ Chapter04/chapter_04_example_01.py | 91 +- Chapter04/chapter_04_example_02.py | 75 +- Chapter04/chapter_04_example_03.py | 69 +- Chapter04/drums.py | 266 ------ Chapter04/primers/52_jazz_125_beat_4-4.mid | Bin 28643 -> 0 bytes Chapter04/primers/classic_2_bars_piano_01.mid | Bin 248 -> 0 bytes .../primers/jazz_2_bars_clavichord_01.mid | Bin 216 -> 0 bytes Chapter04/references/__init__.py | 0 Chapter04/{ => references}/append.py | 0 .../drumify.py} | 57 -- Chapter04/{ => references}/groovae.py | 0 Chapter04/{ => references}/musicvae.py | 0 .../{ => references}/tensorboard_example.py | 0 Common/README.md | 4 + Common/utils.py | 68 ++ 19 files changed, 162 insertions(+), 1533 deletions(-) create mode 100644 Chapter04/__init__.py delete mode 100644 Chapter04/all.py delete mode 100644 Chapter04/audio_to_drums.py delete mode 100644 Chapter04/drums.py delete mode 100644 Chapter04/primers/52_jazz_125_beat_4-4.mid delete mode 100644 Chapter04/primers/classic_2_bars_piano_01.mid delete mode 100644 Chapter04/primers/jazz_2_bars_clavichord_01.mid create mode 100644 Chapter04/references/__init__.py rename Chapter04/{ => references}/append.py (100%) rename Chapter04/{chapter_04_example_04.py => references/drumify.py} (52%) rename Chapter04/{ => references}/groovae.py (100%) rename Chapter04/{ => references}/musicvae.py (100%) rename Chapter04/{ => references}/tensorboard_example.py (100%) create mode 100644 Common/README.md create mode 100644 Common/utils.py diff --git a/Chapter04/README.md b/Chapter04/README.md index 4f155d4..800de19 100644 --- a/Chapter04/README.md +++ b/Chapter04/README.md @@ -1,10 +1,57 @@ # Chapter 04 - Latent space interpolation with Music VAE -TODO +In this chapter we’ll learn about the importance of continuous latent space +brought by Variational Autoencoders (VAE) and its importance in music generation +compared to standard Autoencoders (AE). We’ll use the MusicVAE model, a +hierarchical recurrent VAE, from Magenta, to sample sequences and then +interpolate between them, effectively morphing smoothly from one to another. +We'll then see how to add groove, or humanization, to an existing sequence, +using the GrooVAE model. We’ll finish by looking at the Tensorflow code used +to build the VAE model. -## Run +## Code -TODO +### [Example 1](chapter_04_example_01.py) or [notebook](notebook.ipynb) + +This example shows how to sample, interpolate and humanize a drums sequence +using MusicVAE and various configurations. For the Python script, +while in the Magenta environment (`conda activate magenta`): + +```bash +# Runs the example, the output files (plot, midi) will be in the "output" folder +python chapter_04_example_01.py +``` + +For the Jupyter notebook: + +```bash +jupyter notebook notebook.ipynb +``` + +### [Example 2](chapter_04_example_02.py) + +This example shows how to sample and interpolate a melody sequence +using MusicVAE and various configurations. For the Python script, +while in the Magenta environment (`conda activate magenta`): + +```bash +# Runs the example, the output files (plot, midi) will be in the "output" folder +python chapter_04_example_02.py +``` + +### [Example 3](chapter_04_example_03.py) + +This example shows how to sample a trio (drums, melody, bass) sequence +using MusicVAE and various configurations. For the Python script, +while in the Magenta environment +(`conda activate magenta`): + +```bash +# Runs the example, the output files (plot, midi) will be in the "output" folder +python chapter_04_example_03.py +``` + +## TODO ```bash # On linux diff --git a/Chapter04/__init__.py b/Chapter04/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Chapter04/all.py b/Chapter04/all.py deleted file mode 100644 index f6f98e2..0000000 --- a/Chapter04/all.py +++ /dev/null @@ -1,190 +0,0 @@ -"""TODO all examples - -- TODO talk about the size in the output -- TODO talk about the sample and interpolate code -""" - -import os -import time -from typing import Tuple, List - -import magenta.music as mm -import pretty_midi -import tensorflow as tf -from magenta.models.music_vae import TrainedModel, configs -from magenta.protobuf.music_pb2 import NoteSequence -from six.moves import urllib -from visual_midi import Plotter - - -def download_checkpoint(model_name: str, - checkpoint_name: str, - target_dir: str): - """Downloads a Magenta checkpoint to target directory. - - Target directory target_dir will be created if it does not already exist. - - TODO extract? - - Args: - model_name: magenta model name to download - checkpoint_name: magenta checkpoint name to download. - target_dir: local directory in which to write the checkpoint. - """ - tf.gfile.MakeDirs(target_dir) - checkpoint_target = os.path.join(target_dir, checkpoint_name) - if not os.path.exists(checkpoint_target): - response = urllib.request.urlopen( - "https://storage.googleapis.com/magentadata/models/" - "%s/checkpoints/%s" % (model_name, checkpoint_name)) - data = response.read() - local_file = open(checkpoint_target, 'wb') - local_file.write(data) - local_file.close() - - -def get_model(name): - """TODO""" - checkpoint = name + ".tar" - download_checkpoint("music_vae", checkpoint, "bundles") - return TrainedModel( - # Removes the .lohl in some training checkpoint which shares the same config - configs.CONFIG_MAP[name.split(".")[0] if "." in name else name], - batch_size=8, - checkpoint_dir_or_path=os.path.join("bundles", checkpoint)) - - -def merge(sequences: List[NoteSequence]) -> NoteSequence: - merged = pretty_midi.PrettyMIDI() - for sequence in sequences: - sequence_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) - for instrument in sequence_midi.instruments: - if instrument.notes: - merged.instruments.append(instrument) - return mm.midi_io.midi_to_note_sequence(merged) - - -def write_midis(dir: str, prefix: str, sequences: List[NoteSequence]): - # TODO Writes the resulting midi file to the output directory - if not os.path.exists(os.path.join("output", dir)): - os.mkdir(os.path.join("output", dir), ) - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime('%Y-%m-%d_%H%M%S') - midi_filename = "%s_%s_%s.mid" % (prefix, index, date_and_time) - midi_path = os.path.join("output", dir, midi_filename) - mm.midi_io.note_sequence_to_midi_file(sequence, midi_path) - print("Generated midi file: " + str(os.path.abspath(midi_path))) - - -def write_plots(dir: str, prefix: str, sequences: List[NoteSequence]): - # TODO Writes the resulting plot file to the output directory - if not os.path.exists(os.path.join("output", dir)): - os.mkdir(os.path.join("output", dir), ) - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime('%Y-%m-%d_%H%M%S') - plot_filename = "%s_%s_%s.html" % (prefix, index, date_and_time) - plot_path = os.path.join("output", dir, plot_filename) - pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) - plotter = Plotter(plot_max_length_time=64) - plotter.save(pretty_midi, plot_path) - print("Generated plot file: " + str(os.path.abspath(plot_path))) - - -def app(unused_argv): - """TODO split melody and drums""" - - num_outputs = 10 - - def sample() -> Tuple[List[NoteSequence], List[NoteSequence]]: - """ - TODO maybe use a primer to set the ending sample - - use cat-mel_2bar_big to sample 2 melodies - - use cat-drums_2bar_small.lokl to sample 2 drums sequences - """ - model_melody = get_model("cat-mel_2bar_big") - model_drums = get_model("cat-drums_2bar_small.lokl") - - # TODO explain steps and temperature - sample_melody_sequences = model_melody.sample(2, 32, 1) - sample_drums_sequences = model_drums.sample(2, 32, 1) - - # TODO output in folder - write_midis("sample", "music_vae-" + "cat-mel_2bar_big", - sample_melody_sequences) - write_plots("sample", "music_vae-" + "cat-mel_2bar_big", - sample_melody_sequences) - write_midis("sample", "music_vae-" + "cat-drums_2bar_small", - sample_drums_sequences) - write_plots("sample", "music_vae-" + "cat-drums_2bar_small", - sample_drums_sequences) - - return sample_drums_sequences, sample_melody_sequences - - def interpolate(sample_drums_sequences: List[NoteSequence], - sample_melody_sequences: List[NoteSequence]) \ - -> Tuple[NoteSequence, NoteSequence]: - """ - - use cat-mel_2bar_big to interpolate between the 2 melodies - - use cat-drums_2bar_small.hikl to interpolate between the 2 drum seq - """ - model_melody = get_model("cat-mel_2bar_big") - model_drums = get_model("cat-drums_2bar_small.hikl") - - # TODO explain num_outputs, length, temperature - sample_melody_sequences = model_melody.interpolate( - sample_melody_sequences[0], sample_melody_sequences[1], 10, 32, 1) - sample_drums_sequences = model_drums.interpolate( - sample_drums_sequences[0], sample_drums_sequences[1], 10, 32, 1) - - # TODO output in folder - write_midis("interpolate", "music_vae-" + "cat-mel_2bar_big", - sample_melody_sequences) - write_plots("interpolate", "music_vae-" + "cat-mel_2bar_big", - sample_melody_sequences) - write_midis("interpolate", "music_vae-" + "cat-drums_2bar_small", - sample_drums_sequences) - write_plots("interpolate", "music_vae-" + "cat-drums_2bar_small", - sample_drums_sequences) - - # TODO merge with mm libs - sample_melody_sequence = mm.sequences_lib.concatenate_sequences( - sample_melody_sequences) - sample_drums_sequence = mm.sequences_lib.concatenate_sequences( - sample_drums_sequences) - - write_midis("merge", "music_vae-" + "cat-mel_2bar_big", - [sample_melody_sequence]) - write_plots("merge", "music_vae-" + "cat-mel_2bar_big", - [sample_melody_sequence]) - write_midis("merge", "music_vae-" + "cat-drums_2bar_small", - [sample_drums_sequence]) - write_plots("merge", "music_vae-" + "cat-drums_2bar_small", - [sample_drums_sequence]) - - # TODO merge in two instruments - sample_sequence = merge([sample_melody_sequence, sample_drums_sequence]) - - write_midis("merge", "music_vae-" + "cat-mel_2bar_big" + "-merged", - [sample_sequence]) - write_plots("merge", "music_vae-" + "cat-mel_2bar_big" + "-merged", - [sample_sequence]) - - return sample_melody_sequence, sample_drums_sequence - - def groove(sample_drums_sequences: List[NoteSequence], - sample_melody_sequences: List[NoteSequence]): - """ - - use groovae_2bar_humanize to add groove to the 2 drums seq - - use groovae_2bar_add_closed_hh to add some high hats to the last seq - """ - pass - - sample_drums_sequences, sample_melody_sequences = sample() - interpolate(sample_drums_sequences, sample_melody_sequences) - groove(sample_drums_sequences, sample_melody_sequences) - - return 0 - - -if __name__ == "__main__": - tf.app.run(app) diff --git a/Chapter04/audio_to_drums.py b/Chapter04/audio_to_drums.py deleted file mode 100644 index 34a9666..0000000 --- a/Chapter04/audio_to_drums.py +++ /dev/null @@ -1,822 +0,0 @@ -# -*- coding: utf-8 -*- -"""GrooVAE.ipynb - -Automatically generated by Colaboratory. - -Original file is located at - https://colab.research.google.com/github/tensorflow/magenta-demos/blob/master/colab-notebooks/GrooVAE.ipynb - -# GrooVAE: Generating and Controlling Expressive Drum Performances -### ___Jon Gillick, Adam Roberts, Jesse Engel___ - -####To open this notebook in Colab visit https://goo.gl/magenta/groovae-colab - ---- - -This notebook demonstrates some applications of machine learning for generating and manipulating beats and drum performances. Additional details can be found in our [paper](https://goo.gl/magenta/groovae-paper) and [blog post](https://g.co/magenta/groovae). - -To make these experiments possible, we hired some talented professional drummers to record on an electronic drum kit (see the [Groove MIDI Dataset](https://g.co/magenta/groove-datasets) for more details), and then we trained our "GrooVAE" models on this data. - -
- -One way to think about a MIDI drum beat, whether it is played live or electronically sequenced, is to break it down into 2 main components: - - - -
- -One (simplified) view of a drum beat is that it is the combination of a score and a groove. Given one or the other, a good drummer knows how to fill in the rest to come up with a complete beat - in this project, we try to teach models to do this completion. - - - - - GrooVAE Figure - -# Environment Setup -""" - -# @title Setup Environment - -print('Installing dependencies...') - -!apt - get -update - qq & & apt - get -install - qq -libfluidsynth1 -fluid - soundfont - gm -build - essential -libasound2 - dev -libjack - dev -!pip -install - q -pyfluidsynth -!pip -install - U - q -magenta - -import tensorflow_datasets as tfds -import tensorflow as tf - -# Allow python to pick up the newly-installed fluidsynth lib. -# This is only needed for the hosted Colab environment. -import ctypes.util - -orig_ctypes_util_find_library = ctypes.util.find_library - - -def proxy_find_library(lib): - if lib == 'fluidsynth': - return 'libfluidsynth.so.1' - else: - return orig_ctypes_util_find_library(lib) - - -ctypes.util.find_library = proxy_find_library - -print('Importing software libraries...') - -import copy, warnings, librosa, numpy as np - -warnings.filterwarnings("ignore", category=DeprecationWarning) - -# Colab/Notebook specific stuff -import IPython.display -from IPython.display import Audio -from google.colab import files - -# Magenta specific stuff -from magenta.models.music_vae import configs -from magenta.models.music_vae.trained_model import TrainedModel -from magenta import music as mm -from magenta.music import midi_synth -from magenta.music.sequences_lib import concatenate_sequences -from magenta.models.music_vae import data -from magenta.protobuf import music_pb2 - - -# Define some functions - -# If a sequence has notes at time before 0.0, scootch them up to 0 -def start_notes_at_0(s): - for n in s.notes: - if n.start_time < 0: - n.end_time -= n.start_time - n.start_time = 0 - return s - - -def play(note_sequence, sf2_path='Standard_Drum_Kit.sf2'): - if sf2_path: - audio_seq = midi_synth.fluidsynth(start_notes_at_0(note_sequence), - sample_rate=44100, sf2_path=sf2_path) - IPython.display.display(IPython.display.Audio(audio_seq, rate=44100)) - else: - mm.play_sequence(start_notes_at_0(note_sequence), synth=mm.fluidsynth) - - -# Some midi files come by default from different instrument channels -# Quick and dirty way to set midi files to be recognized as drums -def set_to_drums(ns): - for n in ns.notes: - n.instrument = 9 - n.is_drum = True - - -def unset_to_drums(ns): - for note in ns.notes: - note.is_drum = False - note.instrument = 0 - return ns - - -# quickly change the tempo of a midi sequence and adjust all notes -def change_tempo(note_sequence, new_tempo): - new_sequence = copy.deepcopy(note_sequence) - ratio = note_sequence.tempos[0].qpm / new_tempo - for note in new_sequence.notes: - note.start_time = note.start_time * ratio - note.end_time = note.end_time * ratio - new_sequence.tempos[0].qpm = new_tempo - return new_sequence - - -def download(note_sequence, filename): - mm.sequence_proto_to_midi_file(note_sequence, filename) - files.download(filename) - - -def download_audio(audio_sequence, filename, sr): - librosa.output.write_wav(filename, audio_sequence, sr=sr, norm=True) - files.download(filename) - - -# Load some configs to be used later -dc_quantize = configs.CONFIG_MAP['groovae_2bar_humanize'].data_converter -dc_tap = configs.CONFIG_MAP['groovae_2bar_tap_fixed_velocity'].data_converter -dc_hihat = configs.CONFIG_MAP['groovae_2bar_add_closed_hh'].data_converter -dc_4bar = configs.CONFIG_MAP['groovae_4bar'].data_converter - - -# quick method for removing microtiming and velocity from a sequence -def get_quantized_2bar(s, velocity=0): - new_s = dc_quantize.to_notesequences(dc_quantize.to_tensors(s).inputs)[0] - new_s = change_tempo(new_s, s.tempos[0].qpm) - if velocity != 0: - for n in new_s.notes: - n.velocity = velocity - return new_s - - -# quick method for turning a drumbeat into a tapped rhythm -def get_tapped_2bar(s, velocity=85, ride=False): - new_s = dc_tap.to_notesequences(dc_tap.to_tensors(s).inputs)[0] - new_s = change_tempo(new_s, s.tempos[0].qpm) - if velocity != 0: - for n in new_s.notes: - n.velocity = velocity - if ride: - for n in new_s.notes: - n.pitch = 42 - return new_s - - -# quick method for removing hi-hats from a sequence -def get_hh_2bar(s): - new_s = dc_hihat.to_notesequences(dc_hihat.to_tensors(s).inputs)[0] - new_s = change_tempo(new_s, s.tempos[0].qpm) - return new_s - - -# Calculate quantization steps but do not remove microtiming -def quantize(s, steps_per_quarter=4): - return mm.sequences_lib.quantize_note_sequence(s, steps_per_quarter) - - -# Destructively quantize a midi sequence -def flatten_quantization(s): - beat_length = 60. / s.tempos[0].qpm - step_length = beat_length / 4 # s.quantization_info.steps_per_quarter - new_s = copy.deepcopy(s) - for note in new_s.notes: - note.start_time = step_length * note.quantized_start_step - note.end_time = step_length * note.quantized_end_step - return new_s - - -# Calculate how far off the beat a note is -def get_offset(s, note_index): - q_s = flatten_quantization(quantize(s)) - true_onset = s.notes[note_index].start_time - quantized_onset = q_s.notes[note_index].start_time - diff = quantized_onset - true_onset - beat_length = 60. / s.tempos[0].qpm - step_length = beat_length / 4 # q_s.quantization_info.steps_per_quarter - offset = diff / step_length - return offset - - -def is_4_4(s): - ts = s.time_signatures[0] - return (ts.numerator == 4 and ts.denominator == 4) - - -def preprocess_4bar(s): - return dc_4bar.to_notesequences(dc_4bar.to_tensors(s).outputs)[0] - - -def preprocess_2bar(s): - return dc_quantize.to_notesequences(dc_quantize.to_tensors(s).outputs)[0] - - -def _slerp(p0, p1, t): - """Spherical linear interpolation.""" - omega = np.arccos(np.dot(np.squeeze(p0 / np.linalg.norm(p0)), - np.squeeze(p1 / np.linalg.norm(p1)))) - so = np.sin(omega) - return np.sin((1.0 - t) * omega) / so * p0 + np.sin(t * omega) / so * p1 - - -print('Downloading drum samples...') -# Download a drum kit for playing drum midi -!gsutil - q - m -cp -gs: // magentadata / soundfonts / Standard_Drum_Kit.sf2. - -print("Download MIDI data...") - -# Load MIDI files from GMD with MIDI only (no audio) as a tf.data.Dataset -dataset_2bar = tfds.as_numpy(tfds.load( - name="groove/2bar-midionly", - split=tfds.Split.VALIDATION, - try_gcs=True)) - -dev_sequences = [quantize(mm.midi_to_note_sequence(features["midi"])) for - features in dataset_2bar] -_ = [set_to_drums(s) for s in dev_sequences] -dev_sequences = [s for s in dev_sequences if - is_4_4(s) and len(s.notes) > 0 and s.notes[ - -1].quantized_end_step > mm.steps_per_bar_in_quantized_sequence( - s)] - -dataset_4bar = tfds.as_numpy(tfds.load( - name="groove/4bar-midionly", - split=tfds.Split.VALIDATION, - try_gcs=True)) - -dev_sequences_4bar = [quantize(mm.midi_to_note_sequence(features["midi"])) for - features in dataset_4bar] -_ = [set_to_drums(s) for s in dev_sequences_4bar] -dev_sequences_4bar = [s for s in dev_sequences_4bar if - is_4_4(s) and len(s.notes) > 0 and s.notes[ - -1].quantized_end_step > mm.steps_per_bar_in_quantized_sequence( - s)] - -print("Loading model checkpoints...") - -# Download all the models -!gsutil - q - m -cp -gs: // magentadata / models / music_vae / checkpoints / groovae_ *.tar. -GROOVAE_4BAR = "groovae_4bar.tar" -GROOVAE_2BAR_HUMANIZE = "groovae_2bar_humanize.tar" -GROOVAE_2BAR_HUMANIZE_NOKL = "groovae_2bar_humanize_nokl.tar" -GROOVAE_2BAR_HITS_CONTROL = "groovae_2bar_hits_control.tar" -GROOVAE_2BAR_TAP_FIXED_VELOCITY = "groovae_2bar_tap_fixed_velocity.tar" -GROOVAE_2BAR_ADD_CLOSED_HH = "groovae_2bar_add_closed_hh.tar" -GROOVAE_2BAR_HITS_CONTROL_NOKL = "groovae_2bar_hits_control_nokl.tar" - -print("Downloading audio data...") -!gsutil - q - m -cp -gs: // magentadata / models / music_vae / groovae_colab / * wav. - -"""# Generate New Beats - -Before we get more specific, let's generate some beats from scratch. One of the powerful abilities of Variational Autoencoder models is to generate new datapoints similar to the ones they were trained on. Like [MusicVAE](g.co/magenta/music-vae), we can sample as many new beats from our latent space as we would like, but with GrooVAE, our latent space encodes not just the drum pattern but also the performances characteristics of the drummers who played them. We can also interpolate smoothly between different beats in our latent space. -""" - -# @title Load checkpoint - -config_4_bar = configs.CONFIG_MAP['groovae_4bar'] -groovae_4_bar = TrainedModel(config_4_bar, 2, - checkpoint_dir_or_path=GROOVAE_4BAR) - -# @title Generate Beats -temperature = 1. # @param {type:"slider", min:0.01, max:2.0, step:0.01} -tempo = 116 # @param {type:"slider", min:80, max:180, step:1} -samples = groovae_4_bar.sample(3, temperature=temperature, length=64) -samples = [change_tempo(start_notes_at_0(s), tempo) for s in samples] -for s in samples: - play(s) - -# @title Interpolate Between Beats -temperature = 1 # @param {type:"slider", min:0.01, max:2.0, step:0.01} -steps = 3 # @param {type:"slider", min:1, max:5, step:1} - -sequence_indices = np.random.randint(0, len(dev_sequences_4bar), 2) -beat_a = change_tempo(dev_sequences_4bar[sequence_indices[0]], 120) -beat_a = preprocess_4bar(beat_a) -beat_b = change_tempo(dev_sequences_4bar[sequence_indices[1]], 120) -beat_b = preprocess_4bar(beat_b) - -print("Playing Beat A") -play(beat_a) -print("Playing Beat B") -play(beat_b) - -seqs = groovae_4_bar.interpolate(beat_a, beat_b, steps + 2, length=64, - temperature=1.) - -individual_duration = 8.0 - -interp_seq = mm.sequences_lib.concatenate_sequences( - seqs, [individual_duration] * len(seqs)) - -print("Playing Interpolation from A to B") - -play(start_notes_at_0(interp_seq)) - -"""# Groove: Add some groove to a programmed beat - -Now let's see what it sounds like to add groove to a quantized beat. This is a function that's often used in music production to give drums more character. In the past, it's typically been done by randomizing note timings and velocities or by fixing all timings and velocities to specific values defined by a "swing" setting or a template. Here, instead we let the model predict what the groove characteristics should be, adapting the timing and velocities based on what the beat is. -""" - -# @title Load checkpoint - -config_2_bar_humanize = configs.CONFIG_MAP['groovae_2bar_humanize'] -groovae_2_bar_humanize = TrainedModel(config_2_bar_humanize, 1, - checkpoint_dir_or_path=GROOVAE_2BAR_HUMANIZE) - - -def humanize(s, model, temperature=1.0): - encoding, mu, sigma = model.encode([s]) - decoded = model.decode(encoding, length=32, temperature=1.)[0] - return change_tempo(decoded, s.tempos[0].qpm) - - -# @title Groove up the Beats - -sequence_indices = np.random.randint(0, len(dev_sequences), 3) -for i in sequence_indices: - s = start_notes_at_0(dev_sequences[i]) - s = get_quantized_2bar(s, velocity=85) - print("\nPlaying programmed beat: ") - play(s) - h = humanize(s, groovae_2_bar_humanize) - print("Playing humanized beat: ") - play(start_notes_at_0(h)) - -# @title (Optional) Upload your own quantized MIDI beats to Groove on - -uploaded = files.upload() - -uploaded_sequences = [mm.midi_file_to_note_sequence(fn) for fn in - uploaded.keys()] - -new_beats = [] - -for s in uploaded_sequences: - set_to_drums(s) - s = start_notes_at_0(s) - # s = get_quantized_2bar(s, velocity=85) - print("\nPlaying your beat: ") - play(s) - h = humanize(s, groovae_2_bar_humanize) - print("Playing humanized beat: ") - play(start_notes_at_0(h)) - new_beats.append(h) - -# @title (Optional) Save your GrooVAE beats -for i, beat in enumerate(new_beats): - download(beat, 'humanized_beat_%d.mid' % (i)) - -"""# Tap2Drum: Generate a beat from any rhythm - -While the Groove model works by removing the micro-timing and velocity information and learning to predict them from just the drum pattern, we can also go in the opposite direction. Here, we take a representation of a Groove as input (in the form of a rhythm that can have precise timing but where drum categories are ignored) - and then generate drum beats that match the groove implied by this rhythm. We trained this model by collapsing all drum hits from each beat in the training data to a single "tapped" rhythm, and then learning to decode full beats from that rhythm. This allows us to input any rhythm we like through the precise onset timings in a "tap" and let the model decode our rhythm into a beat. We can even simply record taps as audio, or extract them from a recording of another instrument, rather than needing a midi controller. -""" - -# @title Load checkpoint - -config_2bar_tap = configs.CONFIG_MAP['groovae_2bar_tap_fixed_velocity'] -groovae_2bar_tap = TrainedModel(config_2bar_tap, 1, - checkpoint_dir_or_path=GROOVAE_2BAR_TAP_FIXED_VELOCITY) - - -def mix_tracks(y1, y2, stereo=False): - l = max(len(y1), len(y2)) - y1 = librosa.util.fix_length(y1, l) - y2 = librosa.util.fix_length(y2, l) - - if stereo: - return np.vstack([y1, y2]) - else: - return y1 + y2 - - -def make_click_track(s): - last_note_time = max([n.start_time for n in s.notes]) - beat_length = 60. / s.tempos[0].qpm - i = 0 - times = [] - while i * beat_length < last_note_time: - times.append(i * beat_length) - i += 1 - return librosa.clicks(times) - - -def drumify(s, model, temperature=1.0): - encoding, mu, sigma = model.encode([s]) - decoded = model.decode(encoding, length=32, temperature=temperature) - return decoded[0] - - -def combine_sequences(seqs): - # assumes a list of 2 bar seqs with constant tempo - for i, seq in enumerate(seqs): - shift_amount = i * (60 / seqs[0].tempos[0].qpm * 4 * 2) - if shift_amount > 0: - seqs[i] = mm.sequences_lib.shift_sequence_times(seq, shift_amount) - return mm.sequences_lib.concatenate_sequences(seqs) - - -def combine_sequences_with_lengths(sequences, lengths): - seqs = copy.deepcopy(sequences) - total_shift_amount = 0 - for i, seq in enumerate(seqs): - if i == 0: - shift_amount = 0 - else: - shift_amount = lengths[i - 1] - total_shift_amount += shift_amount - if total_shift_amount > 0: - seqs[i] = mm.sequences_lib.shift_sequence_times(seq, total_shift_amount) - combined_seq = music_pb2.NoteSequence() - for i in range(len(seqs)): - tempo = combined_seq.tempos.add() - tempo.qpm = seqs[i].tempos[0].qpm - tempo.time = sum(lengths[0:i - 1]) - for note in seqs[i].notes: - combined_seq.notes.extend([copy.deepcopy(note)]) - return combined_seq - - -def get_audio_start_time(y, sr): - tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr) - beat_times = librosa.frames_to_time(beat_frames, sr=sr) - onset_times = librosa.onset.onset_detect(y, sr, units='time') - start_time = onset_times[0] - return start_time - - -def audio_tap_to_note_sequence(f, velocity_threshold=30): - y, sr = librosa.load(f) - # pad the beginning to avoid errors with onsets right at the start - y = np.concatenate([np.zeros(1000), y]) - tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr) - # try to guess reasonable tempo - beat_times = librosa.frames_to_time(beat_frames, sr=sr) - onset_frames = librosa.onset.onset_detect(y, sr, units='frames') - onset_times = librosa.onset.onset_detect(y, sr, units='time') - start_time = onset_times[0] - onset_strengths = librosa.onset.onset_strength(y, sr)[onset_frames] - normalized_onset_strengths = onset_strengths / np.max(onset_strengths) - onset_velocities = np.int32(normalized_onset_strengths * 127) - note_sequence = music_pb2.NoteSequence() - note_sequence.tempos.add(qpm=tempo) - for onset_vel, onset_time in zip(onset_velocities, onset_times): - if onset_vel > velocity_threshold and onset_time >= start_time: # filter quietest notes - note_sequence.notes.add( - instrument=9, pitch=42, is_drum=True, - velocity=onset_vel, # use fixed velocity here to avoid overfitting - start_time=onset_time - start_time, - end_time=onset_time - start_time) - - return note_sequence - - -# Allow encoding of a sequence that has no extracted examples -# by adding a quiet note after the desired length of time -def add_silent_note(note_sequence, num_bars): - tempo = note_sequence.tempos[0].qpm - length = 60 / tempo * 4 * num_bars - note_sequence.notes.add( - instrument=9, pitch=42, velocity=0, start_time=length - 0.02, - end_time=length - 0.01, is_drum=True) - - -def get_bar_length(note_sequence): - tempo = note_sequence.tempos[0].qpm - return 60 / tempo * 4 - - -def sequence_is_shorter_than_full(note_sequence): - return note_sequence.notes[-1].start_time < get_bar_length(note_sequence) - - -def get_rhythm_elements(y, sr): - onset_env = librosa.onset.onset_strength(y, sr=sr) - tempo = librosa.beat.tempo(onset_envelope=onset_env, max_tempo=180)[0] - onset_times = librosa.onset.onset_detect(y, sr, units='time') - onset_frames = librosa.onset.onset_detect(y, sr, units='frames') - onset_strengths = librosa.onset.onset_strength(y, sr)[onset_frames] - normalized_onset_strengths = onset_strengths / np.max(onset_strengths) - onset_velocities = np.int32(normalized_onset_strengths * 127) - - return tempo, onset_times, onset_frames, onset_velocities - - -def make_tap_sequence(tempo, onset_times, onset_frames, onset_velocities, - velocity_threshold, start_time, end_time): - note_sequence = music_pb2.NoteSequence() - note_sequence.tempos.add(qpm=tempo) - for onset_vel, onset_time in zip(onset_velocities, onset_times): - if onset_vel > velocity_threshold and onset_time >= start_time and onset_time < end_time: # filter quietest notes - note_sequence.notes.add( - instrument=9, pitch=42, is_drum=True, - velocity=onset_vel, # model will use fixed velocity here - start_time=onset_time - start_time, - end_time=onset_time - start_time + 0.01 - ) - return note_sequence - - -def audio_to_drum(f, velocity_threshold=30, temperature=1., force_sync=False, - start_windows_on_downbeat=False): - y, sr = librosa.load(f) - # pad the beginning to avoid errors with onsets right at the start - y = np.concatenate([np.zeros(1000), y]) - - clip_length = float(len(y)) / sr - - tap_sequences = [] - # Loop through the file, grabbing 2-bar sections at a time, estimating - # tempos along the way to try to handle tempo variations - - tempo, onset_times, onset_frames, onset_velocities = get_rhythm_elements(y, - sr) - - initial_start_time = onset_times[0] - - start_time = onset_times[0] - beat_length = 60 / tempo - two_bar_length = beat_length * 8 - end_time = start_time + two_bar_length - - start_times = [] - lengths = [] - tempos = [] - - start_times.append(start_time) - lengths.append(end_time - start_time) - tempos.append(tempo) - - tap_sequences.append(make_tap_sequence(tempo, onset_times, onset_frames, - onset_velocities, velocity_threshold, - start_time, end_time)) - - start_time += two_bar_length; - end_time += two_bar_length - - while start_time < clip_length: - start_sample = int(librosa.core.time_to_samples(start_time, sr=sr)) - end_sample = int( - librosa.core.time_to_samples(start_time + two_bar_length, sr=sr)) - current_section = y[start_sample:end_sample] - tempo = librosa.beat.tempo( - onset_envelope=librosa.onset.onset_strength(current_section, sr=sr), - max_tempo=180)[0] - - beat_length = 60 / tempo - two_bar_length = beat_length * 8 - - end_time = start_time + two_bar_length - - start_times.append(start_time) - lengths.append(end_time - start_time) - tempos.append(tempo) - - tap_sequences.append(make_tap_sequence(tempo, onset_times, onset_frames, - onset_velocities, velocity_threshold, - start_time, end_time)) - - start_time += two_bar_length; - end_time += two_bar_length - - # if there's a long gap before the first note, back it up close to 0 - def _shift_notes_to_beginning(s): - start_time = s.notes[0].start_time - if start_time > 0.1: - for n in s.notes: - n.start_time -= start_time - n.end_time -= start_time - return start_time - - def _shift_notes_later(s, start_time): - for n in s.notes: - n.start_time += start_time - n.end_time += start_time - - def _sync_notes_with_onsets(s, onset_times): - for n in s.notes: - n_length = n.end_time - n.start_time - closest_onset_index = np.argmin(np.abs(n.start_time - onset_times)) - n.start_time = onset_times[closest_onset_index] - n.end_time = n.start_time + n_length - - drum_seqs = [] - for s in tap_sequences: - try: - if sequence_is_shorter_than_full(s): - add_silent_note(s, 2) - - if start_windows_on_downbeat: - note_start_time = _shift_notes_to_beginning(s) - h = drumify(s, groovae_2bar_tap, temperature=temperature) - h = change_tempo(h, s.tempos[0].qpm) - - if start_windows_on_downbeat and note_start_time > 0.1: - _shift_notes_later(s, note_start_time) - - drum_seqs.append(h) - except: - continue - - combined_tap_sequence = start_notes_at_0( - combine_sequences_with_lengths(tap_sequences, lengths)) - combined_drum_sequence = start_notes_at_0( - combine_sequences_with_lengths(drum_seqs, lengths)) - - if force_sync: - _sync_notes_with_onsets(combined_tap_sequence, onset_times) - _sync_notes_with_onsets(combined_drum_sequence, onset_times) - - full_tap_audio = librosa.util.normalize( - midi_synth.fluidsynth(combined_tap_sequence, sample_rate=sr)) - full_drum_audio = librosa.util.normalize( - midi_synth.fluidsynth(combined_drum_sequence, sample_rate=sr)) - - tap_and_onsets = mix_tracks(full_tap_audio, - y[int(initial_start_time * sr):] / 2, stereo=True) - drums_and_original = mix_tracks(full_drum_audio, - y[int(initial_start_time * sr):] / 2, - stereo=True) - - return full_drum_audio, full_tap_audio, tap_and_onsets, drums_and_original, combined_drum_sequence - - -"""Here are a couple of examples using MIDI rhythms:""" - -# @title MIDI Taps --> Beats - -sequence_indices = [1111, 366] -for i in sequence_indices: - s = start_notes_at_0(dev_sequences[i]) - s = change_tempo(get_tapped_2bar(s, velocity=85, ride=True), - dev_sequences[i].tempos[0].qpm) - print("\nPlaying Tapped Beat: ") - play(start_notes_at_0(s)) - h = change_tempo(drumify(s, groovae_2bar_tap), s.tempos[0].qpm) - print("Playing Drummed Beat: ") - play(start_notes_at_0(h)) - -"""And a couple of examples using audio:""" - -# @title Audio Taps --> Beats - -paths = ['clap.wav', 'bbox.wav'] -temperature = 1.32 # @param {type:"slider", min:0.01, max:2.0, step:0.01} -velocity_threshold = 0.05 # @param {type:"slider", min:0, max:1, step:0.01} -stereo = True # @param {type:"boolean"} - -new_beats = [] -new_drum_audios = [] -combined_audios = [] - -for i in range(len(paths)): - f = paths[i] - print("\n\n\nPlaying %s: " % (f)) - y, sr = librosa.load(paths[i]) - IPython.display.display(IPython.display.Audio(y, rate=sr)) - - full_drum_audio, full_tap_audio, tap_and_onsets, drums_and_original, combined_drum_sequence = audio_to_drum( - f, velocity_threshold=velocity_threshold, temperature=temperature) - new_beats.append(combined_drum_sequence) - new_drum_audios.append(full_drum_audio) - combined_audios.append(drums_and_original) - print("Playing the rhythm detected in %s: " % (f)) - IPython.display.display(IPython.display.Audio(full_tap_audio, rate=sr)) - print("Playing drums generated from %s: " % (f)) - IPython.display.display(IPython.display.Audio(full_drum_audio, rate=sr)) - print("Playing %s together with drums" % (f)) - IPython.display.display(IPython.display.Audio(drums_and_original, rate=sr)) - -"""The model we are using here is only set up to handle 2 measure clips at a constant tempo, so this works best with exactly that - 2 measures of audio starting on a downbeat. But it can be fun to try longer clips or nonmusical audio and see what happens.""" - -# @title (Optional) Upload your own Audio files to Drumify - -temperature = 1.32 # @param {type:"slider", min:0.01, max:2.0, step:0.01} -velocity_threshold = 0.05 # @param {type:"slider", min:0, max:1, step:0.01} -stereo = True # @param {type:"boolean"} - -uploaded = files.upload() - -new_beats = [] -new_drum_audios = [] -combined_audios = [] - -for i in range(len(uploaded)): - f = uploaded.keys()[i] - print("\n\n\nPlaying %s: " % (f)) - y, sr = librosa.load(uploaded.keys()[i]) - IPython.display.display(IPython.display.Audio(y, rate=sr)) - - full_drum_audio, full_tap_audio, tap_and_onsets, drums_and_original, combined_drum_sequence = audio_to_drum( - f, velocity_threshold=velocity_threshold, temperature=temperature) - new_beats.append(combined_drum_sequence) - new_drum_audios.append(full_drum_audio) - combined_audios.append(drums_and_original) - print("Playing the rhythm detected in %s: " % (f)) - IPython.display.display(IPython.display.Audio(full_tap_audio, rate=sr)) - print("Playing drums generated from %s: " % (f)) - IPython.display.display(IPython.display.Audio(full_drum_audio, rate=sr)) - print("Playing %s together with drums" % (f)) - IPython.display.display(IPython.display.Audio(drums_and_original, rate=sr)) - -# @title (Optional) Download your MIDI and Audio files -print("Creating MIDI files for download...") -for i, beat in enumerate(new_beats): - download(beat, 'drumified_beat_%d.mid' % (i)) - -# print("Creating Audio files for download. This may take a minute...") -for i, aud in enumerate(new_drum_audios): - download_audio(aud, 'drumified_beat_%d.wav' % (i), sr) - -for i, aud in enumerate(combined_audios): - download_audio(aud, 'drumified_%d.wav' % (i), sr) - -"""# Transfer a Groove from one Beat to another - -One other fun use of the GrooVAE models is for "Groove Transfer". Lets load two random beats from our dataset and see how it feels like combine the "groove" from one beat with the drum pattern from the other. Then we'll take a look at what it would sound like to move smoothly, or interpolate, through the "space" of possible grooves. Another interesting possibility that allows for easy control is to learn to use a "tap" as a source groove. -""" - -# @title Load Checkpoint -config_2bar_transfer = configs.CONFIG_MAP['groovae_2bar_hits_control_tfds'] -groovae_2bar_transfer = TrainedModel(config_2bar_transfer, 1, - checkpoint_dir_or_path=GROOVAE_2BAR_HITS_CONTROL) -transfer_converter = config_2bar_transfer.data_converter - - -def transfer_groove(source_groove, target_beat_controls, model, - temperature=1.0): - groove_encoding, _, _ = model.encode([source_groove]) - decoded = model.decode(groove_encoding, length=32, temperature=temperature, - c_input=target_beat_controls) - return decoded[0] - - -def transfer_groove_encoding(groove_encoding, target_beat_controls, model, - temperature=1.0): - decoded = model.decode(groove_encoding, length=32, temperature=temperature, - c_input=target_beat_controls) - return decoded[0] - - -# @title -sequence_indices = np.random.randint(0, len(dev_sequences), 2) - -source_groove = preprocess_2bar( - change_tempo(dev_sequences[sequence_indices[0]], 120)) -target_beat = preprocess_2bar( - change_tempo(dev_sequences[sequence_indices[1]], 120)) -controls = transfer_converter.to_tensors(target_beat).controls[0] - -new_beat = transfer_groove(source_groove, controls, groovae_2bar_transfer) - -print("Source Groove: ") -play(start_notes_at_0(source_groove)) -print("Target Beat: ") -play(start_notes_at_0(target_beat)) -print("Transferred: ") -play(start_notes_at_0(new_beat)) - -print("Interpolating the Groove") - -num_steps = 5 - -_, mu, _ = groovae_2bar_transfer.encode([target_beat, source_groove]) -z = np.array( - [_slerp(mu[0], mu[1], t) for t in np.linspace(0, 1, num_steps)]).squeeze() - -individual_duration = 4.0 - -seqs = groovae_2bar_transfer.decode(z, length=32, temperature=1., - c_input=controls) - -interp_seq = mm.sequences_lib.concatenate_sequences( - seqs, [individual_duration] * len(seqs)) - -play(start_notes_at_0(interp_seq)) diff --git a/Chapter04/chapter_04_example_01.py b/Chapter04/chapter_04_example_01.py index 141ea22..8ab8dea 100644 --- a/Chapter04/chapter_04_example_01.py +++ b/Chapter04/chapter_04_example_01.py @@ -1,10 +1,10 @@ """ -TODO 01 example +This example shows how to sample, interpolate and humanize a drums sequence +using MusicVAE and various configurations. """ import os -import time -from typing import List, Union, Optional +from typing import List import magenta.music as mm import tensorflow as tf @@ -12,7 +12,8 @@ from magenta.music import DEFAULT_STEPS_PER_BAR from magenta.protobuf.music_pb2 import NoteSequence from six.moves import urllib -from visual_midi import Plotter + +from Common.utils import save_midi, save_plot def download_checkpoint(model_name: str, @@ -56,59 +57,6 @@ def get_model(name: str): checkpoint_dir_or_path=os.path.join("checkpoints", checkpoint)) -# TODO sift to chapter 03 -def save_midi(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence"): - """ - Writes the sequences as MIDI files to the "output" directory, with the - filename pattern "__" and "mid" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - os.makedirs(output_dir, exist_ok=True) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime("%Y-%m-%d_%H%M%S") - filename = f"{prefix}_{index:02}_{date_and_time}.mid" - path = os.path.join(output_dir, filename) - mm.midi_io.note_sequence_to_midi_file(sequence, path) - print(f"Generated midi file: {os.path.abspath(path)}") - - -# TODO sift to chapter 03 -def save_plot(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence", - plot_max_length_bar: int = 8): - """ - Writes the sequences as HTML plot files to the "output" directory, with the - filename pattern "__" and "html" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - :param plot_max_length_bar: an int for the number of bars to show in the plot - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - os.makedirs(output_dir, exist_ok=True) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime("%Y-%m-%d_%H%M%S") - filename = f"{prefix}_{index:02}_{date_and_time}.html" - path = os.path.join(output_dir, filename) - midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) - plotter = Plotter(plot_max_length_bar=plot_max_length_bar, - show_velocity=True) - plotter.save(midi, path) - print(f"Generated plot file: {os.path.abspath(path)}") - - def sample(model_name: str, num_steps_per_sample: int) -> List[NoteSequence]: """ @@ -118,7 +66,7 @@ def sample(model_name: str, # Uses the model to sample 2 sequences, # with the number of steps and default temperature - sample_sequences = model.sample(2, num_steps_per_sample) + sample_sequences = model.sample(n=2, length=num_steps_per_sample) # Saves the midi and the plot in the sample folder save_midi(sample_sequences, "sample", model_name) @@ -154,10 +102,10 @@ def interpolate(model_name: str, # sequences are not properly formed (for example if the sequences # are not quantized, a sequence is empty or not of the proper length). interpolate_sequences = model.interpolate( - sample_sequences[0], - sample_sequences[1], - num_output, - num_steps_per_sample) + start_sequence=sample_sequences[0], + end_sequence=sample_sequences[1], + num_steps=num_output, + length=num_steps_per_sample) # Saves the midi and the plot in the interpolate folder save_midi(interpolate_sequences, "interpolate", model_name) @@ -175,7 +123,8 @@ def interpolate(model_name: str, # Saves the midi and the plot in the merge folder, # with the plot having total_bars size save_midi(interpolate_sequence, "merge", model_name) - save_plot(interpolate_sequence, "merge", model_name, total_bars) + save_plot(interpolate_sequence, "merge", model_name, + plot_max_length_bar=total_bars) return interpolate_sequence @@ -207,19 +156,19 @@ def groove(model_name: str, # # The resulting array shape is (a, b), where a is the number of # split sequences (should correspond to num_output), and b is the encoding - # size (should correspond to num_steps_per_sample * model.batch_size). - # - # TODO check this + # size. # # This might throw a NoExtractedExamplesError exception if the # sequences are not properly formed (for example if the sequences # are not quantized, a sequence is empty or not of the proper length). - encoding, mu, sigma = model.encode(split_interpolate_sequences) + encoding, mu, sigma = model.encode( + note_sequences=split_interpolate_sequences) # Uses the model to decode the encoding (also called z or latent vector), # returning a list of humanized sequence with one element per encoded # sequences (each of length num_steps_per_sample). - groove_sequences = model.decode(encoding, num_steps_per_sample) + groove_sequences = model.decode( + z=encoding, length=num_steps_per_sample) # Concatenates the resulting sequences (of length num_output) into one # single sequence. @@ -229,10 +178,8 @@ def groove(model_name: str, # Saves the midi and the plot in the groove folder, # with the plot having total_bars size save_midi(groove_sequence, "groove", model_name) - save_plot(groove_sequence, "groove", model_name, total_bars) - - # TOOD compress velocities - pass + save_plot(groove_sequence, "groove", model_name, + plot_max_length_bar=total_bars, show_velocity=True) return groove_sequence diff --git a/Chapter04/chapter_04_example_02.py b/Chapter04/chapter_04_example_02.py index 8bf76cb..240e1fa 100644 --- a/Chapter04/chapter_04_example_02.py +++ b/Chapter04/chapter_04_example_02.py @@ -1,10 +1,10 @@ """ -TODO 01 example +This example shows how to sample and interpolate a melody sequence +using MusicVAE and various configurations. """ import os -import time -from typing import List, Union, Optional +from typing import List import magenta.music as mm import tensorflow as tf @@ -12,7 +12,8 @@ from magenta.music import DEFAULT_STEPS_PER_BAR from magenta.protobuf.music_pb2 import NoteSequence from six.moves import urllib -from visual_midi import Plotter + +from Common.utils import save_midi, save_plot def download_checkpoint(model_name: str, @@ -55,59 +56,6 @@ def get_model(name: str): checkpoint_dir_or_path=os.path.join("checkpoints", checkpoint)) -# TODO sift to chapter 03 -def save_midi(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence"): - """ - Writes the sequences as MIDI files to the "output" directory, with the - filename pattern "__" and "mid" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - os.makedirs(output_dir, exist_ok=True) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime("%Y-%m-%d_%H%M%S") - filename = f"{prefix}_{index:02}_{date_and_time}.mid" - path = os.path.join(output_dir, filename) - mm.midi_io.note_sequence_to_midi_file(sequence, path) - print(f"Generated midi file: {os.path.abspath(path)}") - - -# TODO sift to chapter 03 -def save_plot(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence", - plot_max_length_bar: int = 8): - """ - Writes the sequences as HTML plot files to the "output" directory, with the - filename pattern "__" and "html" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - :param plot_max_length_bar: an int for the number of bars to show in the plot - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - os.makedirs(output_dir, exist_ok=True) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime("%Y-%m-%d_%H%M%S") - filename = f"{prefix}_{index:02}_{date_and_time}.html" - path = os.path.join(output_dir, filename) - midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) - plotter = Plotter(plot_max_length_bar=plot_max_length_bar, - show_velocity=True) - plotter.save(midi, path) - print(f"Generated plot file: {os.path.abspath(path)}") - - def sample(model_name: str, num_steps_per_sample: int) -> List[NoteSequence]: """ @@ -117,7 +65,7 @@ def sample(model_name: str, # Uses the model to sample 2 sequences, # with the number of steps and default temperature - sample_sequences = model.sample(2, num_steps_per_sample) + sample_sequences = model.sample(n=2, length=num_steps_per_sample) # Saves the midi and the plot in the sample folder save_midi(sample_sequences, "sample", model_name) @@ -153,10 +101,10 @@ def interpolate(model_name: str, # sequences are not properly formed (for example if the sequences # are not quantized, a sequence is empty or not of the proper length). interpolate_sequences = model.interpolate( - sample_sequences[0], - sample_sequences[1], - num_output, - num_steps_per_sample) + start_sequence=sample_sequences[0], + end_sequence=sample_sequences[1], + num_steps=num_output, + length=num_steps_per_sample) # Saves the midi and the plot in the interpolate folder save_midi(interpolate_sequences, "interpolate", model_name) @@ -174,7 +122,8 @@ def interpolate(model_name: str, # Saves the midi and the plot in the merge folder, # with the plot having total_bars size save_midi(interpolate_sequence, "merge", model_name) - save_plot(interpolate_sequence, "merge", model_name, total_bars) + save_plot(interpolate_sequence, "merge", model_name, + plot_max_length_bar=total_bars) return interpolate_sequence diff --git a/Chapter04/chapter_04_example_03.py b/Chapter04/chapter_04_example_03.py index aded411..e359d2c 100644 --- a/Chapter04/chapter_04_example_03.py +++ b/Chapter04/chapter_04_example_03.py @@ -1,18 +1,19 @@ """ -TODO 01 example +This example shows how to sample a trio (drums, melody, bass) sequence +using MusicVAE and various configurations. """ import os -import time -from typing import List, Union, Optional +from typing import List -import magenta.music as mm import tensorflow as tf from magenta.models.music_vae import TrainedModel, configs from magenta.music import DEFAULT_STEPS_PER_BAR from magenta.protobuf.music_pb2 import NoteSequence from six.moves import urllib -from visual_midi import Plotter +from visual_midi import Coloring + +from Common.utils import save_midi, save_plot def download_checkpoint(model_name: str, @@ -55,59 +56,6 @@ def get_model(name: str): checkpoint_dir_or_path=os.path.join("checkpoints", checkpoint)) -# TODO sift to chapter 03 -def save_midi(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence"): - """ - Writes the sequences as MIDI files to the "output" directory, with the - filename pattern "__" and "mid" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - os.makedirs(output_dir, exist_ok=True) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime("%Y-%m-%d_%H%M%S") - filename = f"{prefix}_{index:02}_{date_and_time}.mid" - path = os.path.join(output_dir, filename) - mm.midi_io.note_sequence_to_midi_file(sequence, path) - print(f"Generated midi file: {os.path.abspath(path)}") - - -# TODO sift to chapter 03 -def save_plot(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence", - plot_max_length_bar: int = 8): - """ - Writes the sequences as HTML plot files to the "output" directory, with the - filename pattern "__" and "html" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - :param plot_max_length_bar: an int for the number of bars to show in the plot - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - os.makedirs(output_dir, exist_ok=True) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime("%Y-%m-%d_%H%M%S") - filename = f"{prefix}_{index:02}_{date_and_time}.html" - path = os.path.join(output_dir, filename) - midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) - plotter = Plotter(plot_max_length_bar=plot_max_length_bar, - show_velocity=True) - plotter.save(midi, path) - print(f"Generated plot file: {os.path.abspath(path)}") - - def sample(model_name: str, num_steps_per_sample: int) -> List[NoteSequence]: """ @@ -117,11 +65,12 @@ def sample(model_name: str, # Uses the model to sample 2 sequences, # with the number of steps and default temperature - sample_sequences = model.sample(2, num_steps_per_sample) + sample_sequences = model.sample(n=2, length=num_steps_per_sample) # Saves the midi and the plot in the sample folder save_midi(sample_sequences, "sample", model_name) - save_plot(sample_sequences, "sample", model_name, 16) + save_plot(sample_sequences, "sample", model_name, + plot_max_length_bar=16, coloring=Coloring.INSTRUMENT) return sample_sequences diff --git a/Chapter04/drums.py b/Chapter04/drums.py deleted file mode 100644 index 30795ae..0000000 --- a/Chapter04/drums.py +++ /dev/null @@ -1,266 +0,0 @@ -"""TODO all examples - -- TODO talk about the size in the output -- TODO talk about the sample and interpolate code -""" - -import os -import time -from typing import List, Union, Optional - -import magenta.music as mm -import pretty_midi -import tensorflow as tf -from magenta.models.music_vae import TrainedModel, configs -from magenta.music import DEFAULT_STEPS_PER_BAR -from magenta.protobuf.music_pb2 import NoteSequence -from six.moves import urllib -from visual_midi import Plotter - - -def download_checkpoint(model_name: str, - checkpoint_name: str, - target_dir: str): - """Downloads a Magenta checkpoint to target directory. - - Target directory target_dir will be created if it does not already exist. - - TODO extract? - - Args: - model_name: magenta model name to download - checkpoint_name: magenta checkpoint name to download. - target_dir: local directory in which to write the checkpoint. - """ - tf.gfile.MakeDirs(target_dir) - checkpoint_target = os.path.join(target_dir, checkpoint_name) - if not os.path.exists(checkpoint_target): - response = urllib.request.urlopen( - "https://storage.googleapis.com/magentadata/models/" - "%s/checkpoints/%s" % (model_name, checkpoint_name)) - data = response.read() - local_file = open(checkpoint_target, 'wb') - local_file.write(data) - local_file.close() - - -def get_model(name): - """TODO""" - checkpoint = name + ".tar" - download_checkpoint("music_vae", checkpoint, "bundles") - return TrainedModel( - # Removes the .lohl in some training checkpoint which shares the same config - configs.CONFIG_MAP[name.split(".")[0] if "." in name else name], - batch_size=8, - checkpoint_dir_or_path=os.path.join("bundles", checkpoint)) - - -def merge(sequences: List[NoteSequence]) -> NoteSequence: - merged = pretty_midi.PrettyMIDI() - for sequence in sequences: - sequence_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) - for instrument in sequence_midi.instruments: - if instrument.notes: - merged.instruments.append(instrument) - return mm.midi_io.midi_to_note_sequence(merged) - - -# TODO sift to chapter 03 -def save_midi(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence"): - """Writes the sequences as MIDI files to the "output" directory, with the - filename pattern "__" and "mid" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - if not os.path.exists(output_dir): - os.mkdir(output_dir) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime('%Y-%m-%d_%H%M%S') - filename = "%s_%02d_%s.mid" % (prefix, index, date_and_time) - path = os.path.join(output_dir, filename) - mm.midi_io.note_sequence_to_midi_file(sequence, path) - print("Generated midi file: " + str(os.path.abspath(path))) - - -# TODO sift to chapter 03 -def save_plot(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence", - total_bar: int = 2): - """Writes the sequences as HTML plot files to the "output" directory, with the - filename pattern "__" and "html" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - :param total_bar: an int for the number of bars to show in the plot - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - if not os.path.exists(output_dir): - os.mkdir(output_dir) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime('%Y-%m-%d_%H%M%S') - filename = "%s_%02d_%s.html" % (prefix, index, date_and_time) - path = os.path.join(output_dir, filename) - midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) - if total_bar > 2: - # TODO - bar_fill_alphas = [0.5, 0.5] \ - + [0.20, 0.20, 0.00, 0.00] * int((total_bar - 4) / 4) \ - + [0.5, 0.5] - else: - # TODO - bar_fill_alphas = [0.25, 0.25, 0.05, 0.05] - # TODO arg - plotter = Plotter(plot_max_length_bar=total_bar, - bar_fill_alphas=bar_fill_alphas, - show_velocity=True) - plotter.save(midi, path) - print("Generated plot file: " + str(os.path.abspath(path))) - - -def app(unused_argv): - # TODO doesn't work - tf.logging.set_verbosity("INFO") - - num_output = 6 - num_bar_per_sample = 2 - num_steps_per_sample = num_bar_per_sample * DEFAULT_STEPS_PER_BAR - total_bars = num_output * num_bar_per_sample - - def sample() -> List[NoteSequence]: - """ - TODO maybe use a primer to set the ending sample - - use cat-drums_2bar_small.lokl to sample 2 drums sequences - """ - model = get_model("cat-drums_2bar_small.lokl") - - # TODO explain steps and temperature, 32 is 2 bars - sample_sequences = model.sample(2, num_steps_per_sample) - - # TODO output in folder - save_midi(sample_sequences, "sample", "music_vae") - save_plot(sample_sequences, "sample", "music_vae") - - return sample_sequences - - def interpolate(sample_sequences: List[NoteSequence]) -> NoteSequence: - """ - - use cat-drums_2bar_small.hikl to interpolate between the 2 drum seq - """ - if len(sample_sequences) != 2: - raise Exception("Wrong number of sequences, expected: 2, actual: " - + str(len(sample_sequences))) - if not sample_sequences[0].notes or not sample_sequences[1].notes: - raise Exception("Empty note sequences, sequence 1: " - + str(len(sample_sequences[0].notes)) - + ", sequence 2: " - + str(len(sample_sequences[1].notes))) - - model = get_model("cat-drums_2bar_small.hikl") - - # TODO magenta.models.music_vae.trained_model.NoExtractedExamplesError: - # No examples extracted from NoteSequence - # /home/alex/miniconda3/envs/magenta/lib/python3.5/site-packages/magenta/models/music_vae/trained_model.py:220 - # !!! Needs to be quant - # TODO explain num_outputs, length, temperature, num_steps is 2 bars - # TODO use empty sequence : check error and add to book - interpolate_sequences = model.interpolate( - sample_sequences[0], - sample_sequences[1], - num_output, - num_steps_per_sample) - - # TODO output in folder - save_midi(interpolate_sequences, "interpolate", "music_vae") - save_plot(interpolate_sequences, "interpolate", "music_vae") - - # TODO merge with mm libs - interpolate_sequence = mm.sequences_lib.concatenate_sequences( - interpolate_sequences, [4] * num_output) - - save_midi(interpolate_sequence, "merge", "music_vae") - save_plot(interpolate_sequence, "merge", "music_vae", total_bars) - - # TODO merge in two instruments - # groove_sequence = merge([sample_melody_sequence, groove_sequence]) - # - # write_midis("merge", "music_vae-" + "cat-mel_2bar_big" + "-merged", - # [groove_sequence]) - # write_plots("merge", "music_vae-" + "cat-mel_2bar_big" + "-merged", - # [groove_sequence]) - - return interpolate_sequence - - def groove(interpolate_sequence: NoteSequence) -> NoteSequence: - """ - - use groovae_2bar_humanize to add groove to the drums seq - - use groovae_2bar_add_closed_hh to add some high hats to the last seq - """ - # if interpolate_sequence.total_time != 40: - # raise Exception("Wrong sequence size, expected: 40, actual: " - # + str(interpolate_sequence.total_time)) - model = get_model("groovae_2bar_humanize") - - # TODO 4 = 120 bpm 2 seconds is 1 bar we need 2 bars - split_interpolate_sequences = mm.sequences_lib.split_note_sequence( - interpolate_sequence, 4) - - # TODO why ? - if len(split_interpolate_sequences) != num_output: - raise Exception("Wrong number of interpolate size, expected: 10, actual: " - + str(split_interpolate_sequences)) - - # groove_sequences = [] - # for split_interpolate_sequence in split_interpolate_sequences: - # # TODO magenta.models.music_vae.trained_model.NoExtractedExamplesError: - # # No examples extracted from NoteSequence - # # /home/alex/miniconda3/envs/magenta/lib/python3.5/site-packages/magenta/models/music_vae/trained_model.py:220 - # # !!! Needs to be 2 bars - # # TODO encode decode, mu, sigma not necessary but clearer - # encoding, mu, sigma = model.encode([split_interpolate_sequence]) - # # TODO num_steps is 2 bars - # groove_sequence = model.decode(encoding, num_steps)[0] - # groove_sequences.append(groove_sequence) - - # TODO encode decode, mu, sigma not necessary but clearer - # : (, NoExtractedExamplesError('No examples extracted from NoteSequence: ticks_per_quarter: 220\ntempos - encoding, mu, sigma = model.encode(split_interpolate_sequences) - # TODO num_steps is 2 bars - groove_sequences = model.decode(encoding, num_steps_per_sample) - - # TODO merge with mm libs - groove_sequence = mm.sequences_lib.concatenate_sequences( - groove_sequences, [4] * num_output) - - save_midi(groove_sequence, "groove", "music_vae") - save_plot(groove_sequence, "groove", "music_vae", total_bars) - - # TODO add hi hats - pass - - return groove_sequence - - # TODO add to book : interpolate works on quantitized stuff - # TODO add to book : groove works 2 bars at a time - generated_sample_sequences = sample() - generated_interpolate_sequence = interpolate(generated_sample_sequences) - generated_groove_sequence = groove(generated_interpolate_sequence) - - print("Generated groove sequence total time: ", - str(generated_groove_sequence.total_time)) - - return 0 - - -if __name__ == "__main__": - tf.app.run(app) diff --git a/Chapter04/primers/52_jazz_125_beat_4-4.mid b/Chapter04/primers/52_jazz_125_beat_4-4.mid deleted file mode 100644 index dc924f43be7c8a30edc249e6ac6daa95bbc71ea6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 28643 zcmaK#`&L`o)#Z<*Eg=y1n$zyACf&3|DVC%vd;F;?pe(}$???(pIuUOi`{Oa z=t?D*PPwS$ibYo|xh?1ROEQFJSaa7kcd<~fo~C$ush<3~$ZU9)1}P^|&u6z&bDK4{ zUURE8_q^tw6?i_2dQx*=_4DX-a-LxpMYq%Ij!W*`kK4{~Vw?MU&7IWT;ey+%x$T;J zU2|(=_3Ha5`Js=m)!Z^BkTp9#yRU2R(Slp*b&Dl8Qgpejj5}O%^Z2UW9p>uEUjOHY zZJ+vQ|Bys`pKTor?y~0m(^$a4XubMfNAeMy?DF6W4{2wzpWQ~}Hj8e*=+4vbq82Qm zZ?QZmZ@tFvWsftGO2?H_dd(5AI2#e=hk+_SKUYW1rpS0)}GDD7i`}(Xc&LleVXAM{cLA8J?8f#jv|s2oT@{ zdxd)Ssw=s~Nz7$^fWbSH2sFIJkPGq&Ua!^MT*1u(-p+b*+4d9{a z3X%D|-zJ;p(fvsT<`gDo8}IaklG`h~BQSZKj;HwjdC6VpZ7ywu3J{BucO76Ljz*Xl z3+-j7dmirixLfnt9}MApopm>VuU9|yCf{6|2SMIV$z6e)k~^^==V{wTw^4Ghitc5} zt)%Ml*XiV9@Uweba*vAcVXc@2a zlDOVqa=QxgOjYJ2#%4>dBd2ifFS$z5Jz>^4n`skUbM<81ncdG8+_6lOPCoGiP<=k{ zZnFGfmMOqS@ExCkGeF9y=;vU-9cAlr9v7Uq&A|(?`PI`48>ew&t1GJD&Eipar�!uu#^G)%xm?q*4@)cm4)ywIC$ zLLU3XOqrKm+qLO_Hx?oF#H)k79ACXtC((mFL~d>nvuf z=)Q&@Q+mc(JCn0UQ z2AFcEq?m_Yu1fCapW|eoR6KWHE%%CHNElbsJnaEfS9J7XlyMwvDZwiPve>i8EtlOr z!aYa0H<3G)C;zb8ubGYJz6WkU5s0@A6}*@xsIc4uD#-hxS~Wn)S`QY z0-E53{$m1u_ZT9bkmqa&Nu4fPvwmUXIu`~=bGg8%_*05;u%5hyE1>G#5f5@K zy$ei-8_!C+n@$p=0Bwl}gdYqt07f9OkNv8D?DlYKAzXDrNTLQ22D1eaP zMZijee!=7QY9HT_9cTCqVJ2e&uJcU1%0qA*&pAmecrJ$hw-w7`(ZOVg$;$wmJdt{q zt1r%=ot1rvl@N(f^Yz6!o>9rUv65ULm?T@TJct%GEl*;X69g6(J?*d0-MH$HIN`AB zpyjzJ8&5`60e8RK=3X>P8(*K1lVe1vf+zS$?f>MWBEO6p2#yi&Tem$ zUAl^Hgx^JlW!VFL^tj~KyIb_!x9<9#y9AJV9*Yc9zCd>Z%G8U}w8S@^rUH zg$4wEWkhl@SC9K}i1-?({S+m?W8v$L!B{Hl?%6O?@wmEAmBlKHuDj&!l8P2qaj$`m8s(Big_FH9X5gRs#vzRL9RU@1loDDqTcdUq z*hpB*SzBz$<($ymXWA>F(Cai#2iXtu$vJQd`hbdKJY(^pWEi>RYL3Npiv{+@(xhHY zK$(b$QreDMNZA*;KEEOH5zjxClJ9X0`HFh|>o)gDS_fyp2qxU_w@loiRI#|i+ z__=UKfn|dl(z?Q_yJaiY*5vKqB8(BE$)J)4;nrHi-vF^^%GX@mf{QObnb^9);F73JM)LKJ6B*{#D2| zOP3+i&K-u}8`$ILZ1VF!Y+*qkNTj0mD~@a6xe`RavT|8|2aJrg-&e29({jM8_#Xh{ ze-VdJ1bs5XQ}p#%Az5L;V?~!q3kU-=;t&xMPJH6ounAsI)-aVdHDG4Kj?wT3qL_TX z5V=}e;%*!8vWB60@;^YkVhXbOd7i%_=;YA67nwk$Vv#`3ZCCRJp}9%jutzpjk~F@& zqWuzeCrPF{uF4ex>2k+-OjR?Bf*Q%Hs2`-+j#H5G=!>C77{d^N>KR_wC`e%|Y z`RaGz^f8t1UU%{*CMtxGx1{2rg5hpH!$j2dQ)vqg_Y~bdvg;t9t>LVJAQP^E38^Gn zNsc=zHG=MXm(Ld}g3x6Lz0`X0WPzWe=EwMjug{Lk^|(l#gv@$Vumv?9_zy?pd z+!MpXmXrmv@WQ;qG?zqubd8(~G?sP5z=qC|@e^Pc=)lmks0RB8I6tvINJ*J`nnS-iuhYajZE?6mG+h-97K5^ z(q|sKOUUsgrQEl2**tPseSndOzh8jyO0YvQ2kMUS-gR8 zV-n&TT7&Q0wKc#g9jc#3lRfZ2hXH;uDC~dhcHTkVAit2D@1hS?bJDq`cb6ihtMjW& zUv<>i@ad_hPw1yD^!u!N2=l{DVtxv$EIC(`#)+WoZ)ENvRKEq|v1kaogXo}K=BMV3 zm*ti$E@_)x5vFI9Qyo2)5Y@+4Kt;)KY2+2D_f2>0KagX9qeLMbW=cu3HoqpX8t?)>d;w-volL_Q?=*y>qY2oQsEoPjYe;9Y^r z%yJ}jMAeT(#2!V|j}QqQN(W0tvDzG<>523=bHM||(>>uc<+gh>vDsF}MRIe1^LRf zu^|qmy&#Ig++R^*>rd&mMtP@p7#1RAzQ+dv54I;@xrnYj zU4rUhA%2uieqlV64y#u~i6)P*--*n%2`d<&!8@$mv+Y@%f?tKm=qU_oUsw!|jIl9R%2Y-7MGJ8i04s#iofvNYHPe@6F`BcM-Tg$Ax> zdz7h;`QF~~aIlQ21l)Yp-`J>{V&G8ue1{Td{zaIAxCTz>`R-mcP7GOO*Ag_eq*~&vX%W0GNh4 zgt{lzi;^AEDw9AzVMZX!nqjMICO|}O7+;>#fx7_2)ab}oG&KIyq*U2LV(`W#X?(mp zUL+9D;goNXAD%+jGty=hcr;Qh9c|KEw1j&Jl@JRDbjn0LY%+@dAcS~{R_uf=a9=9F zC$uT{p)F{MX!trOUU;tD(`%I)ijfqq91McOj&|6mNom8$TJb#gWky`ecadN#gi#s0 z?4{j4k~K*uzhGcirtLrKg@B|}l8V*TDgiLN(zJo_1zQcQ?3kralA1d#HPf;RIIlu2 zyJ#ucYc~@#w`K<=4U_CL);m>B)N?QmXy2cazhDo~dmAXXV^|BES>v3+0z3!JiBukqIotfNEBPt+&{i09(!r zM=vq(AoJP;L7-(HX#u1T>4f=|OePEI-xusk&AzjN{@w!$XoOY#lole zXgAy3f6nF^78qvWQzrW#sd(`=AE^}g`vQ)HLB5?4%ZYtS8o@MGW?AOs#IoB5FT z656FiR*`9AjQMreJ%+KDa@O7>XfW4~Cdzt`oHVf!Ar=qh*qW;_m-dMH%JUxSyUE}! zZ9Jkf-hicnqKio$m_?OKIyvTV?b_*!YSD0}f zZy-L2jG(9jhY_!Y+jt@Q;Nk?1Ij;)$2LbV0(z)zE;1YJ!wnt6C!>)Kw$0DR&NRIO5 zK=Ol&SJ2se6GHB5&6qD{s`SZL2qrn-$--cIy~Axt=_0#X5sCYZ^94;lVtzM#fjB0- z`N9+0USe8YGN1o~oyl0^B-}sX%SGyghs<)V-};6=`c+r<89^#oSQsY0V2!v?;gQh+ z6DW5}^9M7xv_aZp7Gv$nN8qJyt{90(C7bTE6>UwKCr=~&@K0^3KZa5e(d`d#1YSYDuXcM zS0slU9LLHR2UQu|DY|w5Bu>v%M|=yszp>+sZaP$y5bn203;>WcE~@;j*LuRC2l~?V zH_H03Pfk`v!$SvqA@<})CM-@BF~|i-SQ>QGC3OO}3LYa+rZR#A zw9wZqDj0VLEJVoxrWK)tLXi5QLY7y4VR*E` zZR;#3iig1A51IH8JWoY(2iarWXgwlnWo+9nKo-+N!NP=*(k)9_$M&Hf&_^rD4}>=v z4Sw6ULTOi}T9mv>RmPAi>FNj*ILSuAt}p2=0C=aBY+SD_a(8hu;Z9;lOt)Hk)f zyW=~HtYbcy319w{j%$;c9Ilr0&XxJ;f|D+xUqXE)rQAYFo@2gO%opX4!EmAKxKKfy z_n2PiY|!^J4fmO{OjoB0%QJA1JlHKk0N9tsDOmhLuPW=_*5aD9b%DQCVf!wKGH=#OuGL<0XcaR{;eDLEyU=(%k@+-)%Q{3$1*n?^M01*|hfL*Zax$ zY2-1(6*LTdo8;7VoX*vkUm{-^BvVoP8*^&rR6!!>ZDV)4&01Lj^9DP}-B*v~@U1>O z0|?Ww1HiaePwo%ML!e5;#LmhmV*i+m<&Q)i-$C841|of?I)JD0)mooou*}wthgP%v zrT<7Lf`;5U!NUqX`CdKn2?G8)1DwCXEo{2(X>))dfnmPHEOitRgDhIep%U(i;>a71 zhTQ`uO^kfx@tc$d;uj(!WG=N+AkAwfeugG#o=r%pQ&p*Li~CH9C*QQkC7cw_6!{&1 zJA8*y9KFz1oy0ZbvitlDn%q&MuUbNQ(J$N~c{)t;fRg)ukD|-g+YriE>TJZ2v+!6oQ&ipzhMN*di!Y#9Fh0_AWNy>_YYDHfvm7F_jOPK)&X9i;7qr zKrKa>VOirH7Wx>E_wkD*1)2^9!M1r;9+2GU8&4>tH8_U;C52JI9FLkTW4+`Z&WAZ| zqhpFGtLDI#du8qS9(IRah3hs|3ch+`v%31RJ9)$OM9>I!xUJKzD%`qFg*~Rx5-Bm# zy4i+4h~Gi{Bot7P(aBJ*rNF+VdApO2_lVu*Nb6a*2K~-MfKjpmkrPBzpPT*i$)U6i z&Oi6vX!DTDY_gWc7Okd)3cKK{^!e}N0iv$Z(CTuor5jwO{6bT*~ zYJAe5#|Lx=l}`XiS$UKvczKR$;piIJIK@zFKwE+wfM|)F{>vqAQ9Me}N?IK10wySD z)V=ciu$wUfA<^S0%F7id+s>xM9&Xmjj4g)#2+mm3iaNuL@r)J9sjPihT3>|T97Lxv zc|16Pfd|Dg?M%sLIVCS)dX{qs|TO?L>&UBUC@gNyPy&;7? zyf{ZlOSR0>=Vh$Zw8DeKQ;TKXCnU4k&|9xe^E=wQi&B2D!X=_%nruU)T~mp_@0dF$ zNhlJu9Bl)}6D*l01r3xIm{-)s>4;}Q?G}v*zfufMhhNf!22Ev74H^&%50Cf3rpw=C z;vFEl>jYdi;6*k2dB_$7fj(w?*%yvV;%;J-kRx~FQ5}+0Pl&b`hW!d#^y+MHDePFP z@&>VgMH(ui2+en-6_Oj9j0j=d*rF1%koPdN92uFD?iwapjdFbKYU+29l8s0Lzl#{@^J; zcw0{XfM=jr@NHZ1B#hUQ9YIk$li^4kAo-Ynpy@iV?wA&SQzePizDmb;F{7f3AyseD z@_A(Rro#7I##y3N%DFkAjZcbzkb7bgu!OnbV)8P3o(xL<^7ip$sFO!T3XKc`S$!i$ z;~Zu3C#HR62Yla%QwS>vfM^tIMl}eN-s({SM_BGN%WsHubYxrsqWs#2Bqf2Wtxo{_ zzNHI}mnEv2Br2?-L}X0l+^990*zS*f_l*LRZK&o!baKOwRgX?M-qPPl|NBgyO~le*)Q1JM5r(KEt4F-EOh+&H2=dD>{H2BY{lQjrD9W|0vRqin>f}VH>)BtyE zb3ZTq_G*KO0kcMucw@pKyaDgNan+CH%iGz_G8jLQFw~Z@+FlPDQqUT`2| z5V1w0N3-c={|p-k6=w!Jj@$`B`M$?aGlS5Tv}#K#%qeS$0!2kBbV>s~C>2Tr3$xP& zx_k!M_Gl%_jOM&PfSuI4ar;jOI6%yhaQf%810**;>kh7wz18)ioF0MpyKlJ$m5$$w|0 zOM23a40F;@V5+pehKJ>mA#ta)Kp#T5%K~j1^A+vd$)-PbEDeeM!<2sK;`Crl+X?gz2uzW@an7oYMc!Wl z+A-%FfF~ft?Quq@Z6C5Fq52r3t@4SEOmdp4mWhp9P?HRF{)^K9)WC2Di=hA}Uyue~ z^lGKE#M3F*z0<8itese9OABvk)Di9Yv=HblFjt7cXW!E`o*+tWD{oD7rxpfJ0_0X6 zO{NuW%#{o&LPDVEcxf|lSmSFTp2{V^!&c&#ylnclK$)wKFx^73&K%$vyxmVO8gLPr zMXR{4e1M_WVHeOc1MmaU0eSMw1#pStulQda55ew>Gaw4q&5Gv-G_!{^b)G`zuW;C3 zu(1fwD@S~xZ4uVvr3UWs9RsulK~%lLTIln;eVSDPAIxOUTI8KHr#k*j1dX$7zmvj& zV0lnjB!WliK@^fJRk8%ORzpS<#;h<`sJ~9_j zjpwn|E^GNS(Lgq=K+Qo)aFWND;sm(~=pjlBTJf*~QfQmlM<;}oq;ekldZu1}KAz#P z?2G~@C5ViBCY6a@a$NSXI|>QsCi7M(@)3S1=NyG>_k}D5AXminURFNV^fWi1M8@tf zxqP4U*PvaFquWj`pZuNW-@E-|dGvf9fgiVZ1cWG*YW7xAn;c8Q5f#s$BwTd@9J6;z|M0xB!9|m^& zy>1t!KNndRC!bBUDQ)y1Va73=v)fCy2JXg@;KYA^Zb}GHz*$Jc0v^mhGigHf68OGH zMY(vMDf;t8v|(-AgoVS>5&%Oc0KnED2B#kcwvzu0Daq=q#zx?4+p&jJZBqz2gwlqW z?sh2mc4N>zB0kl)7q|dM+T4L22RqZ0h-Y)j4X)7~o7bLnwjjkKmHp3P69giVfvH~y z&q8%Zd6e6W9AE9y+JW;+vL}-RXgVaKA>o3MLge8d={VXb_p;dYK7_; zHdHX^*bD`Br)~4mzTsh%L62$JQu$pmyC;~A%S}ysll^<+yBvLictj#28lgIf#dFCr z7T=3F8BVqr8>28f-AE``dvts#WA{2_F+1S|!EZY`=?uJFv0iU^4q{t{D_*ZkQ**?b z;tt|OTP$Xhzr$9L2=im8n-oI!*L=ta;q!!N7KhzS zC0Zw%pY3LdKqu#!%#@FD25aV=OBKVUeo7UcAmKp=ZBy?>8 z%+SU7>l0lM?_4VuABJ-0Gs&M|CVC3&&~uH&bjKOp3}tjW z)i~Zd)jFn(72iuhadr;Qp_C;v1eZKyb{uV|t^6*{RTDP9FpxiwfT{anHhDIzjLn+` zW;w;ffE(7enoc^vA~h&wp7tHf^oYad6V{A=EmmcGlJjlqCl=;hb)-A_Eof3lK}`ng zZfKp^({#em03^23WQ#=}V}skoPe(+f&e-lG-Lp8Ju_$8&ZwAFs=mi(Qyn3VD!WS}uz1fLJ48Z@eRLS;2P{q>c!#=6eNqpo z*+JC2{Yu>C8DC5dxAaT`!UcI8bn>LURi;Y2*be?B@7ye zbo{)4O_=v>D)&VRzPVqI|D1jDM?mi#gO-@VNP~negFnu2$QmgVz~jv*>LR1)gk*t7 zotXF}HLNRXD2zKK#li4suX1;xZ7Z50PS}$t$-J~FNzD()qmhuJ9L>(e#0Uu zn7GDjpdp6n5TY7aAY!gc*#S6ChlEJ8v~|w2A9L>kGGLKNIF!e-h>VmEy2v;S_+%ag z&)P+Ee@%V>6e{o`8C-u50oBI52+bM3cx7cX;G}fh4iycFX01mcCeMx*+yEI-SNG;< zVEhR{aZFf5jHX~_YFhT$ku%YNFW-#;LO@QSkV)kAo#Q0|-78l0V~%m4OzXk>;bZ ztn_OaHBo+a07;g3=VgzczmE1gYK!Iby+O4I=;LUUMW>`NwvIfX=Bz9LNPO{i+O3yW zN#10&Xr6#>TbVsY^QxJ!8#t-^nZ*4t`;3e*paZT~uue0OSY?oQq=Lj8~VGF0$po@-T0to;## zK43A+!mYQvjvQN#7CHaKwgMi3pBYs5KFFdWNpQ8?CPMbu#-oqG@j?fDS_9Xx%!Q zs>fLZSG{m>^b7SXTyRvr14BzsxRoH--EK*9^dCi1+SjzshH8S#CN zskv@-OVR3;V^UpM+-7Z%$L<8P<{9cam#y^_X_=@cojI~cstrbjR_k#dx1Ypfz_`E! zC^GfeL9z{J72xUQ?{Gf%z&P4T63OUr44%5*=EfwCF$Jd*jTx1Vntaw2VaA9%)F_1Z z1yN4XvS=L<$V*+ANfabLg~wjfr`rSyxbjFbKs<)SX~GB!4N8E9F!!n*0D!9(3u*vS zQ9waG;g~O5)C;!O0}}V&00=iYn?*e^{Odg75MGX8jcIJh$Ut^ABl%B1WG*Fk0o@zT zQTWR^TR^ZB5U*YZBx~^G6AN2 zi)mRJha*@W+3TTrmYYH;ge!?TqeELb5Zi5p?iZKIC<8#hCl}FGIK<~o!~>rZU8t3e z%rOFvUt@i3QIfI77B81bVSO(r3An{qPjWeBn5%BV0R~2E5WuIGKfwWdrii^XTvn8F!V3v zuu@R9f1d^#RquNOfC%eBANdo2YAy67|GWNL>osY-`*nBN# z`*3}_%$4l7Rm-dsHr2^!wq3>LTomd^j)^Oh>$x`7!3wznPUVi`s^!zjgL`*GYA5Nhn!dcH+1kc_+<^fzyXy5 zmdHi|Kxc!LKw@Kr6T?)e$tE_*+q)8a{YHMc1-bs^Za(r@os=QVD8wC5zQR_dKqSTM z9(NR)0Ov^Ar@7>hM0cQ3T|=jY#Yn35XQkSRy<2+8>QwqeR{p*%IRFRR#%jWCf@xAj z!#!8y+MiwT#>O_?pv<$wJe3$J)lQG3S|4)PL=>O*hF# zvNSwmNy^&Xyt2S2Sv%JOp%Jo>z`~#$f*lZWuLe|#oIoPS7*!YjNYn7-BuF4wp-kf~ z>W$FCYd{K&b>0(CFn^4hWW*-Z0A&Wd zjYjUSwn*EOYoD0x#NSq%l>lMCSOC<%I>ezSzTcz;txmcIhJBb*g-42;>J>w9&?Cj8i)d~q5Ed*ZduXt+EE>ILtAg-Yp9g8o!t$e*w z!;N6+aZYYyejHmRcT_|Zh@0w1u2eI3AC+%pWjYmt2HCDW1r-mAn|1F{*#;>S>xvgw z(>L$9?eNpXj(TMQ|LS5hrvvXWdQ_c~;zsWLDHR`7`JG~x&zw=CM$xjg>4}{|B~i_H zsl(q-7Uv>HSuL0;)1~3vBs?Wgdh%1eM(ln}(Ypz%iq8cinlUU*jo6GxVtRslAw z)3>&Xb(K^+pTLG^xW)jMY)9x2^7{!>o)I>>9dK`+DO~QLBlwwY6SOpH&ww_%c%*6e8>o!4ESZg{S zWgHq%gd>z{5{n;$hig}_%*+~^&I&k@fTEzR>OjmcL3ajWuC;5P4^;`eX^9l*?wJyM zTdZ?Nydfh53BMa;f;W01Q($)SU1rB#lx<@3qAkrWk3UBZI`UJhD)9_Ux5me5VdzXQ%XkEPZMBH*H|2w~a# z97D7HIx>+CuQg9?k?z~2A7Ebk8pI&@*yGIsP1$2f0G88TdS9J!lf}$DmlhKN3aq}> z-q7xLKilK)gxg5y83?Oe852>LBW+u6HL({^F<^)s|0*}khB~m_4mvM%XgRof;AOj= zr;^HG4;ud9Jz*_L83j6;R>h5FLf@$+1+h9fTjgQ5R!7!ZOzD$+>t0&A%Ij%%0Hy9J z4*tvQYJ96ln*%(hqdVjkm_b^i^>nGP=18)GnjyF$&=C*2b|g-vww;jlCtc2sGRe2( zuSMi5TM!v3SfCbmbQ|_j-(^&}AIL1{6pPG1PU-cJ3uY){bU$(jWyMoCdxp!9$F}c) zVtCM@D~^<|YSq};mT{BqG=YMJ!H`W}irP9=Xwl_9bzPZ1lRW080FtfARrmsav6Ixm z28Uu#X8vlZMGXDTa<~i@C) zp={&s1ny%Ge@JJ#{Pt;Ur|mH^*Uq4z@7nA>?SQE-Zr!!|E{HX|x+}?=QeYCE(~;_Z zN)YyL6xR(?ZV1_zk<*_Y22C! z;ITnMGt%C`<1_V;`G}h(x1jQ|$u;Nu$}c2$9q%@R0x;PBw!zfsEb}TqCXaF0Gi8lE zjqriWKs&E%Xf=-5X;wd3=PTT5E1P}N5)4d_%S^wTyf*C&PTsja)?&u;G@1Yy$3~b> zfRncj$4=ROYe;8|Pu3uyj+%)L;8_wUi1=b za0lwKaf(e^UxclxwA3F6j5CHyT^~D?=?-YLY-x@jwE7`tH0HMi`ca_1qJb zPZ0L1L!w|Wa{J}5CkRRv1@^uL2y@b=M@zx4MYpaS4J7!739}r|N}ZiwkdC zc$*xD%ltJYX5y>m=32J=n>!nED&Al#$>^Q911*I1616B7XDg7+soj6E9O zhX7$7=BfRTW|*|FE>W*U(5dQ{bU9C*2}N-HBIhtQtGhLw=l=(dVB}Dp<;NUZ-e`~+ za=z&c8o`gy$V8yQ1#1X1=IWQw^798F_+lh-qh%cs@MjnXIb?!7==pVkh>)pYBDZC| z(b|yqQHTy*O=?n~!4H$3*edqgWTKEG>^PvmMDMdIuVVc#Xr#>wdTR{5F#WJZVQD4U ztx7gM5W?ILHJz7=!@vHF!=NPy-d_3Z7`KB10O>_W+7Bw598kpoJUB#tYj$65_(NnQ z0<6&~+(J`h0}Au@lI-7Pl3Oo(8YgC_!hV__!49_NptV-{VR$%)QoNz;#1Mv>@P|rT z79tn>Ba@Bf;uV@N-_R|qS!)5YWRj7^FJkhz32ghzZS+~x==d`cQFNV_vRM%`RHcCvsLO~~7|Hy#w!hz+>dE`H-dO^YtlkMZ1Pi)4w~{s9{QkD!WBYp7T`n#lnz(?YISG6(y_qbNGUTC z___d7R_x!&Xxg)omECve!%d+DCljP-OQje?xIf%R1Ywgewn7{4+c+%020uk;Rn&nQ#i}bhk20Sf9$pQw86Bijh98=repTb?XE0xi2UVuir9bM=Hhh| z+g=NyUd)eZNHR4rW)B48Kuw5$5();?S30Z{j0A%c z@?73*W)kAOtivVO<W#1C}pr&ntiqn4?C>?%V)AUFEr6<)VgY^Wyghm%=+?Z0$US4}7s|uZ@9?ogqS8an6Gm-czBl5Z0)dHh2tM`NLCu z2U{(dZq<@f(Zen1YEcoOdONLgwCcb z{?dRqKK0`lTcmz&W10Z0zLrrWO26e*Qo$$G-8co@s&!x=MkH`mg}E`Yb+okgz4#<& z))|3u^x`U9X4Ui|$3GDp2q!OS{T2ZqZoGm-w~QLY>b-mRF9z^09q5fE_C9XE>&@f| zC;}~XK7WO9zi=kTM)sIo2?Z6bko3{?5a)KbM3a}$%>vn6ubSPs9U`qk-5Jp}q@A;w z_Y&Hs+;rZx$(Fv}F-Obw#=sy1pAs*Ngmzvt879};+0jm7!Qu`O0`G8I#;@hyQY7sl z1|&tk7*-~lz=i~o>03$(;)oFDS-|_k49?&0`8@T%p1agP$6flLcU^`*-9N`%_Mi7$ zj{mmt_dGvq=XV|aS>XS7^7F1pH#rdN(?;1{W}B%H;aJ1Y@MIsD|>@1^@vKc{~k0|d)w+y9 z8rvzj0doAcJDYjYT8E%eo2piwvNyq0L9m}m&{aCZBl?D?k^z+&gche7ZvEOsWV>EB z3kkxl6k5Dg2pVNw{?Z;0=kH3xUO^f_ne#%sxC7|;e@mjra|aUh%eJFVATkq6*y4qZ zD2yq=APgw(XxRlj8_~0v1Mi0b4fuY_s7iNFnvCCqA?coen;5R_5h4juo^lddJX7Ql z&(@#iK|GG%@>Xd+2E}{ILu!LL@;xlvnunwh=D8au1L9?*mEIbDSA!G$w`WR_$o$;? z`HvO;p_Q6@n0HHMB?hjNw`Qpzxy(4MMRddTKu!x4QvcvqyudttfI(z1)Pj{&1#5{8 zA+SgTz5k|WFmjKc*c^o9am+iG{COs;G+0!~{S#UUjv%>;0?enIoJsvM{hyecp)(j% zi%svypJ?)2Sym6R6&{p2;swaX?!Tj$ySNBqJtcmKj7am$-=1$;3Y4}aBe66 zVhOtr%}zlxXEeA)2e{^x_ENTCg2l2(rpmjBym%cQK&Z|Hu@xJhY zyr4b=9>m3BUj8C>Hn=z*X#}hG4(X9xq%tZu1}Sf@8RuMS_?kvQ>bEO{=pi2 z1Z&L>siXwe>bbXoO*CjU1@bw%F1jE+wJA(kJBgJz#j|YN;3=h^mga^3Fmv$S(o8-5 z4cR1aX>muOTw4NPBYh5y<89x zsi9woJD9LG27a~nW|PRSJd(0!vubXl`~Xkb+G+6ct+0k14EzgyI)d5XNR-eB5&~!W zJunft<&-d6QUk^rQ3l~x+Uf?nMs(tqN&TFMF7%4uLg2KZA_NYkXDA}9^v_3;5r8)= zg!FGvG(QPUZc(7vv>s3y2@6YG7vui_M>|p$7D{0)~gd*R($%AH6I!=W8Z;fBY$bG>N zQ|Mxd&3xJnNZS!p0|rkoR7ZSK_O}#6E&z2)G%3<{r=Bd+(t}g;h%>4NhSo%{vdYE* cB=D8=&j%`xB!aud4Iw{7LP)Ss#Lhyn8Y`{6fLHJaIf6$KFA{fy z+P!&iX0rA10H6y2Azs<~d@|D;^`_Hl4SJ}zEi>0en;s)nSKtX$bUGVj^<>fqT6kV(*oQ7UChJAle~r-Ifwzy=KVvY11U3eZ3Gk$% zso7jHkRXo(OeqMLF-O9du9PYOp|Yv@+O!t4+=0@plc;)ZBg1#i#S-C2VF_PZ3E$fr arYbakVS6fXyi^q6X8-@|_R-Oq^aMY%`7SR2 diff --git a/Chapter04/references/__init__.py b/Chapter04/references/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Chapter04/append.py b/Chapter04/references/append.py similarity index 100% rename from Chapter04/append.py rename to Chapter04/references/append.py diff --git a/Chapter04/chapter_04_example_04.py b/Chapter04/references/drumify.py similarity index 52% rename from Chapter04/chapter_04_example_04.py rename to Chapter04/references/drumify.py index 593f461..ec0ecbb 100644 --- a/Chapter04/chapter_04_example_04.py +++ b/Chapter04/references/drumify.py @@ -3,16 +3,12 @@ """ import os -import time -from typing import List, Union, Optional -import magenta.music as mm import tensorflow as tf from magenta.models.music_vae import TrainedModel, configs, Config from magenta.music import midi_file_to_note_sequence from magenta.protobuf.music_pb2 import NoteSequence from six.moves import urllib -from visual_midi import Plotter def download_checkpoint(model_name: str, @@ -59,59 +55,6 @@ def get_model(name: str) -> TrainedModel: checkpoint_dir_or_path=os.path.join("checkpoints", checkpoint)) -# TODO sift to chapter 03 -def save_midi(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence"): - """ - Writes the sequences as MIDI files to the "output" directory, with the - filename pattern "__" and "mid" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - os.makedirs(output_dir, exist_ok=True) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime("%Y-%m-%d_%H%M%S") - filename = f"{prefix}_{index:02}_{date_and_time}.mid" - path = os.path.join(output_dir, filename) - mm.midi_io.note_sequence_to_midi_file(sequence, path) - print(f"Generated midi file: {os.path.abspath(path)}") - - -# TODO sift to chapter 03 -def save_plot(sequences: Union[NoteSequence, List[NoteSequence]], - output_dir: Optional[str] = None, - prefix: str = "sequence", - plot_max_length_bar: int = 8): - """ - Writes the sequences as HTML plot files to the "output" directory, with the - filename pattern "__" and "html" as extension. - - :param sequences: a NoteSequence or list of NoteSequence to be saved - :param output_dir: an optional subdirectory in the output directory - :param prefix: an optional prefix for each file - :param plot_max_length_bar: an int for the number of bars to show in the plot - """ - output_dir = os.path.join("output", output_dir) if output_dir else "output" - os.makedirs(output_dir, exist_ok=True) - if not isinstance(sequences, list): - sequences = [sequences] - for (index, sequence) in enumerate(sequences): - date_and_time = time.strftime("%Y-%m-%d_%H%M%S") - filename = f"{prefix}_{index:02}_{date_and_time}.html" - path = os.path.join(output_dir, filename) - midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) - plotter = Plotter(plot_max_length_bar=plot_max_length_bar, - show_velocity=True) - plotter.save(midi, path) - print(f"Generated plot file: {os.path.abspath(path)}") - - # TODO quick method for turning a drumbeat into a tapped rhythm def get_tapped_2bar(sequence): model_tap = get_config("groovae_2bar_tap_fixed_velocity") diff --git a/Chapter04/groovae.py b/Chapter04/references/groovae.py similarity index 100% rename from Chapter04/groovae.py rename to Chapter04/references/groovae.py diff --git a/Chapter04/musicvae.py b/Chapter04/references/musicvae.py similarity index 100% rename from Chapter04/musicvae.py rename to Chapter04/references/musicvae.py diff --git a/Chapter04/tensorboard_example.py b/Chapter04/references/tensorboard_example.py similarity index 100% rename from Chapter04/tensorboard_example.py rename to Chapter04/references/tensorboard_example.py diff --git a/Common/README.md b/Common/README.md new file mode 100644 index 0000000..97485bd --- /dev/null +++ b/Common/README.md @@ -0,0 +1,4 @@ +# Common - Common code for all the chapters + +TODO + diff --git a/Common/utils.py b/Common/utils.py new file mode 100644 index 0000000..e8a999c --- /dev/null +++ b/Common/utils.py @@ -0,0 +1,68 @@ +""" +Common utilities for the book's code. +""" + +import os +import time +from typing import Union, List, Optional + +import magenta.music as mm +from magenta.protobuf.music_pb2 import NoteSequence +from visual_midi import Plotter, Coloring + + +def save_midi(sequences: Union[NoteSequence, List[NoteSequence]], + output_dir: Optional[str] = None, + prefix: str = "sequence"): + """ + Writes the sequences as MIDI files to the "output" directory, with the + filename pattern "__" and "mid" as extension. + + :param sequences: a NoteSequence or list of NoteSequence to be saved + :param output_dir: an optional subdirectory in the output directory + :param prefix: an optional prefix for each file + """ + output_dir = os.path.join("output", output_dir) if output_dir else "output" + os.makedirs(output_dir, exist_ok=True) + if not isinstance(sequences, list): + sequences = [sequences] + for (index, sequence) in enumerate(sequences): + date_and_time = time.strftime("%Y-%m-%d_%H%M%S") + filename = f"{prefix}_{index:02}_{date_and_time}.mid" + path = os.path.join(output_dir, filename) + mm.midi_io.note_sequence_to_midi_file(sequence, path) + print(f"Generated midi file: {os.path.abspath(path)}") + + +def save_plot(sequences: Union[NoteSequence, List[NoteSequence]], + output_dir: Optional[str] = None, + prefix: str = "sequence", + plot_max_length_bar: int = 8, + coloring: Coloring = Coloring.PITCH, + show_velocity: bool = False): + """ + Writes the sequences as HTML plot files to the "output" directory, with the + filename pattern "__" and "html" as extension. + + :param sequences: a NoteSequence or list of NoteSequence to be saved + :param output_dir: an optional subdirectory in the output directory + :param prefix: an optional prefix for each file + :param plot_max_length_bar: an int for the number of bars + to show in the plot + :param coloring: a coloring for the plot note colors + :param show_velocity: a boolean for the showing of the velocity + """ + output_dir = os.path.join("output", output_dir) if output_dir else "output" + os.makedirs(output_dir, exist_ok=True) + if not isinstance(sequences, list): + sequences = [sequences] + for (index, sequence) in enumerate(sequences): + date_and_time = time.strftime("%Y-%m-%d_%H%M%S") + filename = f"{prefix}_{index:02}_{date_and_time}.html" + path = os.path.join(output_dir, filename) + midi = mm.midi_io.note_sequence_to_pretty_midi(sequence) + plotter = Plotter(plot_max_length_bar=plot_max_length_bar, + coloring=coloring, + show_velocity=show_velocity) + plotter.save(midi, path) + print(f"Generated plot file: {os.path.abspath(path)}")