-
Notifications
You must be signed in to change notification settings - Fork 7
/
generate_music.py
71 lines (57 loc) · 2.58 KB
/
generate_music.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
from music21 import stream, note, chord, instrument
from create_generator_model import get_notes, LATENT_DIMENSION
instr = instrument.Violin()
def create_midi(prediction_output, filename):
""" convert the output from the prediction to notes and create a midi file
from the notes """
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for item in prediction_output:
pattern = item[0]
# pattern is a chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.storedInstrument = instr
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# pattern is a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instr
output_notes.append(new_note)
# increase offset each iteration so that notes do not stack
offset += 0.5
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='{}.mid'.format(filename))
def generate_music(generator_model, latent_dim, n_vocab, length=500):
""" Generate new music using the trained generator model """
# Create random noise as input to the generator
noise = np.random.normal(0, 1, (1, latent_dim))
predictions = generator_model.predict(noise)
# Scale back the predictions to the original range
pred_notes = [x * (n_vocab / 2) + (n_vocab / 2) for x in predictions[0]]
# Map generated integer indices to note names
pitchnames = sorted(set(item for item in notes))
int_to_note = dict((number, note) for number, note in enumerate(pitchnames))
pred_notes_mapped = [int_to_note[int(x)] for x in pred_notes]
return pred_notes_mapped[:length]
if __name__ == '__main__':
# Load the trained generator model
generator_model = load_model("generator_model.h5")
# Load the processed notes and get the number of unique pitches
notes = get_notes()
n_vocab = len(set(notes))
# Generate new music sequence
generated_music = generate_music(generator_model, LATENT_DIMENSION, n_vocab)
# Create a MIDI file from the generated music
create_midi(generated_music, 'generated_music')