Best Python code snippet using hypothesis
data_utils.py
Source:data_utils.py
1# -*- coding: utf-8 -*-2"""3Created on Sat Aug 28 17:32:32 20214@author: user5"""6import guitarpro as gp7import os8import numpy as np9import matplotlib.pyplot as plt10from sklearn.utils import shuffle11import librosa12import copy13# binary tab -----------------------------------------------------------------14def tabFrame2binary(t):15 b = np.zeros(5*6)16 for i in range(6):17 s = np.binary_repr( t[i].astype(int) , width=5 )18 b[i*5:i*5+5] = np.array(list(s), dtype=np.float32)19 return b20def tablature2binary(tablature):21 b = np.zeros( (5*6 , tablature.shape[1]) )22 for i in range(tablature.shape[1]):23 b[:, i] = tabFrame2binary(tablature[:, i])24 return b25def bool2int(x):26 y = 027 for i,j in enumerate(np.flip(x)):28 y += j<<i29 return y30def binary2tablature(b):31 t = np.zeros( (6, b.shape[1]) )32 for i in range(b.shape[1]):33 for j in range(6):34 t[j,i] = bool2int( b[j*5:j*5+5, i].astype(int) )35 t[ t==31 ] = -136 return t37# flat tab -------------------------------------------------------------------38def tabFrame2flatFretboard(t, frets_num=24):39 f = np.zeros( (6, frets_num+1) ) # 0-th fret is counted40 for i in range(len(t)):41 if t[i] >= 0 and t[i] <= frets_num:42 f[i, t[i].astype(int)] = 143 # print('before flat: ', f.shape)44 # print('after flat: ', f.flatten().shape)45 return f.flatten()46def tablature2flatFretboard(tablature, frets_num=24):47 f = np.zeros( ( 6*(frets_num+1) , tablature.shape[1] ) )48 for i in range( tablature.shape[1] ):49 f[:,i] = tabFrame2flatFretboard( tablature[:, i], frets_num=frets_num )50 return f51# full tab 3D ----------------------------------------------------------------52def tabFrame2Fretboard(t, frets_num=24):53 f = np.zeros( (6, frets_num+1) ) # 0-th fret is counted54 for i in range(len(t)):55 if t[i] >= 0 and t[i] <= frets_num:56 f[i, t[i].astype(int)] = 157 # print('before flat: ', f.shape)58 # print('after flat: ', f.flatten().shape)59 return f60# pianoroll column 2 tokens -------------------------------------------------61def pianorollFrame2tokens(c, compount=True):62 nz = np.where( c != 0 )[0]63 s = []64 for i in nz:65 if compount:66 s.append( 'note_' + str( i ) )67 else:68 s.append( 'note' )69 s.append( str( i ) )70 return s71# pianoroll 2 tokens ---------------------------------------------------------72def pianoroll2tokens(p, compount=True):73 s = ['<SOS>']74 for i in range( p.shape[1] ):75 c = pianorollFrame2tokens( p[:,i], compount )76 if len( c ) > 0:77 s.append( 'new_frame' )78 s.extend( c )79 s.append('<EOS>')80 return s81# tablature string 2 tokens ---------------------------------------------------------82string_names = ['E', 'A', 'D', 'G', 'B', 'E']83def tablatureFrame2tokens(c, compount=True):84 nz = np.where( c != -1 )[0]85 s = []86 for i in nz:87 if compount:88 s.append('string_' + string_names[ i ])89 s.append('fret_' + str(c[i].astype(int)))90 else:91 s.append('string')92 s.append( string_names[ i ] )93 s.append('fret')94 s.append(str(c[i].astype(int)))95 return s96# tablature 2 tokens ---------------------------------------------------------97def tablature2tokens(p, compount=True):98 s = ['<SOS>']99 for i in range( p.shape[1] ):100 c = tablatureFrame2tokens( p[:,i], compount )101 if len( c ) > 0:102 s.append( 'new_frame' )103 s.extend( c )104 s.append('<EOS>')105 return s106# event 2 full tab ---------------------------------------------------------107def event2fulltab(e):108 t = np.zeros( (6,25) )109 for p in e['pitches']:110 if p['fret'] < 25:111 t[ p['string']-1 , p['fret'] ] = 1112 return t113# end event2fulltab114def plotEvent(e):115 t = event2fulltab(e)116 plt.imshow(t, cmap='gray_r')117 return t118# end plotEvent119def patternOf2DTab(t):120 p = copy.deepcopy(t)121 if np.sum(p) != 0:122 while np.sum(p[:,0]) == 0:123 p = np.roll( p, [0,-1], axis=1 )124 return p125# end patternOf2DTab126def plotTab(t):127 plt.imshow(t, cmap='gray_r')128# end plotEvent129 130# %% plottings131def plot_full_tabs(t, titles=None):132 for i in range(t.shape[0]):133 plt.subplot( 4, (t.shape[0]-1)//4+1, i+1 )134 plt.imshow(t[i,:,:], cmap='gray_r')135 if titles is not None:136 if len(titles) == t.shape[0]:137 plt.title( titles[i] )138 else:139 if i == t.shape[0]-1:140 plt.title(titles)141 else:142 plt.title(str( i ))143def plot_flat_tabs(t, titles=None):144 for i in range(t.shape[1]):145 plt.subplot( 4, (t.shape[1]-1)//4+1, i+1 )146 plt.imshow( np.reshape( t[:,i], [6,25] ), cmap='gray_r')147 if titles is not None:148 if len(titles) == t.shape[1]:149 plt.title( titles[i] )150 else:151 if i == t.shape[1]-1:152 plt.title(titles)153 else:154 plt.title(str( i ))155def tablature2Fretboard(tablature, frets_num=24):156 f = np.zeros( ( tablature.shape[1] , 6, frets_num+1 ) )157 for i in range( tablature.shape[1] ):158 f[i,:,:] = tabFrame2Fretboard( tablature[:, i], frets_num=frets_num )159 return f160class Constants:161 def __init__(self, sample_rate=16000, analysis_samples=8000, ticks_per_quarter=960):162 self.sample_rate = sample_rate163 self.analysis_samples = analysis_samples164 self.ticks_per_quarter = ticks_per_quarter165 # end init166 167 def secs2samples(self, secs):168 return np.floor( secs*self.sample_rate ).astype(int)169 # end secs2samples170# end Constants171class GuitarSamples:172 def __init__(self, name, constants=None):173 self.name = name174 self.samples = {}175 if constants is None:176 self.constants = Constants()177 else:178 self.constants = constants179 # end init180 181 def append_sample(self, string, fret, audio_path, onset_path):182 # load audio183 s, _ = librosa.load( audio_path , sr=self.constants.sample_rate )184 # load onset info185 with open(onset_path, 'r') as f:186 onsetsec = float(f.read())187 onsetidx = np.floor(onsetsec*self.constants.sample_rate).astype(int)188 # minus25idx = int(0.025*self.constants.sample_rate)189 if str(string) in self.samples.keys():190 if str(fret) in self.samples[str(string)].keys():191 self.samples[str(string)][str(fret)].append( s[onsetidx:] )192 else:193 self.samples[str(string)][str(fret)] = [ s[onsetidx:] ]194 else:195 self.samples[str(string)] = {}196 self.samples[str(string)][str(fret)] = [ s[onsetidx:] ]197 # end append_sample198 199 def augment_octaves(self):200 for sidx in range(1, 7, 1):201 for fidx in range(1, 13, 1):202 self.samples[ str(sidx) ][ str(fidx+12) ] = []203 for sidx in range(1, 7, 1):204 for fidx in range(1, 13, 1):205 for s in self.samples[ str(sidx) ][ str(fidx) ]:206 self.samples[ str(sidx) ][ str(fidx+12) ].append( librosa.effects.pitch_shift(s, sr=self.constants.sample_rate, n_steps=4) )207 # end augment_octaves208 209 def get_random_sample(self, string, fret, duration_samples=None, duration_secs=None):210 samples = self.samples[str(string)][str(fret)]211 idx = np.random.randint( len(samples) )212 s = samples[idx]213 # fix duration214 if duration_samples is None:215 d = self.constants.secs2samples(duration_secs)216 else:217 d = duration_samples218 # s_out = librosa.effects.time_stretch(s, rate=len(s)/d)219 pad_length = d - s.size220 if pad_length <= 0:221 # print('s.size1: ', s.size)222 s = s[:pad_length]223 # print('s.size2: ', s.size)224 # fade out225 fade_length = 100226 while fade_length > s.size:227 fade_length -= 1228 s[-fade_length:] = s[-fade_length:]*np.linspace(1,0,fade_length)229 # print('s.size3: ', s.size)230 else:231 # print('s.size4: ', s.size)232 s = np.pad( s, [0,pad_length] )233 # print('s.size5: ', s.size)234 # s_out = librosa.effects.time_stretch(s, rate=len(s)/d)235 s_out = s236 return s_out237 # end get_sample238# end GuitarSamples239class GPAudioPieceEvents:240 def __init__(self, file_path, constants=None):241 if constants is None:242 constants = Constants()243 song = gp.parse( file_path )244 self.name = file_path.split( os.sep )[-1]245 self.track_events = []246 self.tempo = song.tempo247 tracks = song.tracks248 aborted = False249 self.max_pitch = -1250 self.min_pitch = 1000251 for track in tracks:252 strings = track.strings253 # check if proper guitar tunning254 proper_guitar = True255 proper_tunning = [64, 59, 55, 50, 45, 40] # make static256 for i, s in enumerate(strings):257 if i >= len(proper_tunning) or s.value != proper_tunning[i]:258 # print( file_path + ' - ' + str(s.value) + ': tunning not proper - ABORTING')259 proper_guitar = False260 aborted = True261 break262 if proper_guitar:263 measures = track.measures264 note_events = []265 for measure in measures:266 voices = measure.voices267 for voice in voices:268 beats = voice.beats269 for beat in beats:270 if beat.effect.mixTableChange:271 pass272 # print( file_path + ': mixTableChange - ABORTING beat')273 # aborted = True274 else:275 if beat.status.name != 'normal':276 pass277 # print( file_path + ': not normal - ABORTING beat')278 # aborted = True279 else:280 note_event = {}281 note_event['beat_duration'] = beat.duration.time/constants.ticks_per_quarter282 note_event['beat_onset_piece'] = beat.start/constants.ticks_per_quarter283 note_event['beat_onset_measure'] = beat.startInMeasure/constants.ticks_per_quarter284 note_event['secs_duration'] = note_event['beat_duration']*60/self.tempo285 note_event['secs_onset_piece'] = note_event['beat_onset_piece']*60/self.tempo286 note_event['secs_onset_measure'] = note_event['beat_onset_measure']*60/self.tempo287 note_event['samples_duration'] = np.floor(note_event['secs_duration']*constants.sample_rate).astype(int)288 note_event['samples_onset_piece'] = np.floor(note_event['secs_onset_piece']*constants.sample_rate).astype(int)289 note_event['samples_onset_measure'] = np.floor(note_event['secs_onset_measure']*constants.sample_rate).astype(int)290 note_event['pitches'] = []291 # only normal notes appended292 for n in beat.notes:293 if n.type.value == 1:294 pitch_event = {}295 pitch_event['string'] = n.string296 pitch_event['fret'] = n.value297 pitch_event['pitch'] = n.realValue298 if pitch_event['pitch'] > self.max_pitch:299 self.max_pitch = pitch_event['pitch']300 if pitch_event['pitch'] < self.min_pitch:301 self.min_pitch = pitch_event['pitch']302 pitch_event['midi_velocity'] = n.velocity303 pitch_event['velocity_ratio'] = n.velocity/127304 pitch_event['duration_percentage'] = n.durationPercent305 pitch_event['secs_duration_percentage'] = n.durationPercent*note_event['secs_duration']306 note_event['pitches'].append( pitch_event )307 note_events.append( note_event )308 else:309 pass310 # print(file_path + 'note type NOT 1 - ABORTING event')311 if not aborted:312 if len( note_events ) > 0:313 self.track_events.append(note_events)314# end class GPAudioPieceEvents315class GPPieceEvents:316 def __init__(self, file_path):317 song = gp.parse( file_path )318 self.name = file_path.split( os.sep )[-1]319 self.track_events = []320 self.tempo = song.tempo321 tracks = song.tracks322 aborted = False323 self.max_pitch = -1324 self.min_pitch = 1000325 for track in tracks:326 strings = track.strings327 # check if proper guitar tunning328 proper_guitar = True329 proper_tunning = [64, 59, 55, 50, 45, 40] # make static330 for i, s in enumerate(strings):331 if i >= len(proper_tunning) or s.value != proper_tunning[i]:332 # print( file_path + ' - ' + str(s.value) + ': tunning not proper - ABORTING')333 proper_guitar = False334 aborted = True335 break336 if proper_guitar:337 measures = track.measures338 note_events = []339 for measure in measures:340 voices = measure.voices341 for voice in voices:342 beats = voice.beats343 for beat in beats:344 if beat.effect.mixTableChange:345 pass346 # print( file_path + ': mixTableChange - ABORTING beat')347 # aborted = True348 else:349 if beat.status.name != 'normal':350 pass351 # print( file_path + ': not normal - ABORTING beat')352 # aborted = True353 else:354 note_event = {}355 note_event['duration'] = beat.duration.time356 note_event['onset_piece'] = beat.start357 note_event['onset_measure'] = beat.startInMeasure358 note_event['pitches'] = []359 # only normal notes appended360 for n in beat.notes:361 if n.type.value == 1:362 pitch_event = {}363 pitch_event['string'] = n.string364 pitch_event['fret'] = n.value365 pitch_event['pitch'] = n.realValue366 if pitch_event['pitch'] > self.max_pitch:367 self.max_pitch = pitch_event['pitch']368 if pitch_event['pitch'] < self.min_pitch:369 self.min_pitch = pitch_event['pitch']370 pitch_event['velocity'] = n.velocity371 pitch_event['duration_percentage'] = n.durationPercent372 note_event['pitches'].append( pitch_event )373 note_events.append( note_event )374 else:375 pass376 # print(file_path + 'note type NOT 1 - ABORTING event')377 if not aborted:378 if len( note_events ) > 0:379 self.track_events.append(note_events)380# end class GPPieceEvents381class TrackRepresentation():382 def __init__(self, track, piece_name='undefined', track_number=-1, keep_full=False, keep_events=False, random_pr=None):383 self.piece_name = piece_name384 self.track_number = track_number385 self.keep_full= keep_full386 self.keep_events= keep_events387 if self.keep_events:388 self.events = track389 onsets = np.array( [ t['onset_piece'] for t in track ] )390 onsets -= onsets[0]391 g = np.gcd.reduce(onsets)392 if g > 0:393 onsets = (onsets/g).astype('int')394 else:395 onsets = onsets.astype('int')396 397 durations = np.array( [ t['duration'] for t in track ] )398 if g > 0:399 durations = np.floor( durations/g ).astype( 'int' )400 else:401 durations = np.floor( durations ).astype('int')402 durations[durations==0] = 1403 404 self.pianoroll = np.zeros( ( 128 , onsets[-1]+durations[-1] ), dtype=np.float32)405 self.onsetsroll = np.zeros( ( 128 , onsets[-1]+durations[-1] ), dtype=np.float32 )406 407 for i, t in enumerate(track):408 pitches = t['pitches']409 for p in pitches:410 tmp_duration = np.max( [np.floor( durations[i]/p['duration_percentage'] ), 1])411 tmp_velocity = p['velocity']412 for d in range(tmp_duration.astype('int')):413 # check if random components need to be added in the pianoroll414 random_pitch = -1415 if random_pr is not None:416 if np.random.rand() <= random_pr:417 random_pitch = p['pitch'] + [-12, -5, -4, -3, 3, 4, 7, 12][np.random.randint(8)]418 if d == 0:419 self.onsetsroll[ p['pitch'] , onsets[i]+d ] = tmp_velocity420 if random_pitch >= 0:421 self.onsetsroll[ random_pitch , onsets[i]+d ] = tmp_velocity422 self.pianoroll[ p['pitch'] , onsets[i]+d ] = tmp_velocity423 if random_pitch >= 0:424 self.pianoroll[ random_pitch , onsets[i]+d ] = tmp_velocity425 426 # keep only active range of notes427 # self.pianoroll = self.pianoroll[40:95, :]428 # self.onsetsroll = self.onsetsroll[40:95, :]429 430 self.tablature = -1*np.ones( ( 6 , onsets[-1]+durations[-1] ), dtype=np.float32 )431 self.string_activation = np.zeros( ( 6 , onsets[-1]+durations[-1] ), dtype=np.float32 )432 433 for i, t in enumerate(track):434 pitches = t['pitches']435 for p in pitches:436 self.tablature[ p['string']-1 , onsets[i] ] = p['fret']437 self.string_activation[ p['string']-1 , onsets[i] ] = 1438 439 # remove zeros440 nz_idxs = np.sum(self.pianoroll, axis=0)!=0441 p0 = self.pianoroll[:, nz_idxs]442 # get difference idxs443 d = np.diff(p0, axis=1)444 dsum = np.sum( np.abs(d), axis=0)445 idx2keep = np.append(0, np.where( dsum != 0 )[0] + 1 )446 if self.keep_events:447 tmp_all_idxs = np.arange( onsets[-1]+durations[-1] ).astype(int)448 tmp_nz_idxs = tmp_all_idxs[nz_idxs]449 self.event_onsets_kept = tmp_nz_idxs[ np.array(idx2keep, dtype=int) ]*g450 451 self.pianoroll_changes = p0[:, idx2keep]452 t0 = self.tablature[:, nz_idxs]453 self.tablature_changes = t0[:, idx2keep]454 s0 = self.string_activation[:, nz_idxs]455 self.string_activation_changes = s0[:, idx2keep]456 if not self.keep_full:457 del self.pianoroll458 del self.onsetsroll459 del self.tablature460 del self.string_activation461 # end constructor462 463 def plot_pianoroll_part(self, start_idx=0, end_idx=50):464 plt.imshow( self.pianoroll_changes[:,start_idx:end_idx], cmap='gray_r', origin='lower' )465 # end plot_pianoroll_part466 467 def plot_tab_part(self, start_idx=0, end_idx=50):468 tablature_part = self.tablature_changes[:,start_idx:end_idx]469 x = np.arange(tablature_part.shape[1])470 x_length = len(x)471 y_height = x_length/5.472 473 y_offset = y_height/10.474 y_room = y_height - 2*y_offset475 476 plt.clf()477 for string in range(6):478 # plot string479 string_height = y_offset + (6-string)*y_room/6480 plt.plot( [x[0], x[-1]] , [string_height ,string_height], 'gray' )481 for i, f in enumerate(tablature_part[string,:]):482 if f > -1:483 plt.text(i, string_height, str(f.astype(int)))484 plt.axis('equal')485 # end plot_tab_part486 def tab2events(self):487 if not self.keep_events:488 print('ERROR: events should have been kept')489 return490 # start reading midi and tab changes491 i = 0492 for ev in self.events:493 # get pitches of event494 p = [ n['pitch'] for n in ev.pitches ]495 # end tab2events496# end TrackRepresentation497class GuitarTabDataset():498 def __init__(self, history=2, task='string_activation',499 output_representation='binary_tab',):500 self.history = history501 self.task = task502 self.output_representation = output_representation503 # collections of matrices504 self.pianoroll_changes = []505 self.tablature_changes = []506 self.string_activation_changes = []507 # final matrices508 self.x_train = None509 self.y_train = None510 self.x_valid = None511 self.y_valid = None512 self.x_test = None513 self.y_test = None514 # end constructor515 def add_matrices_old(self, r):516 # add from TrackRepresentation object517 tmp_all_x = np.concatenate( (np.zeros((r.pianoroll_changes.shape[0], self.history)), r.pianoroll_changes ), axis=1)518 tmp_x = tmp_all_x[:, self.history:]519 for i in range(1, self.history+1, 1):520 tmp_x = np.vstack( (tmp_x , tmp_all_x[:, self.history-i:-i]) )521 self.pianoroll_changes.append( tmp_x )522 if self.output_representation == 'binary_tab':523 self.tablature_changes.append( tablature2binary(r.tablature_changes) )524 elif self.output_representation == 'flat_tablature':525 self.tablature_changes.append( tablature2flatFretboard(r.tablature_changes) )526 elif self.output_representation == 'full_tablature':527 self.tablature_changes.append( tablature2Fretboard(r.tablature_changes) )528 else:529 print('unknown output_representation')530 self.string_activation_changes.append( r.string_activation_changes )531 # self.tablature_changes.append( np.concatenate( (np.zeros((r.tablature_changes.shape[0], self.history)), r.tablature_changes ), axis=1) )532 # self.string_activation_changes.append( np.concatenate( (np.zeros((r.string_activation_changes.shape[0], self.history)), r.string_activation_changes ), axis=1) )533 # end add_matrices_old534 def add_matrices(self, r):535 # add from TrackRepresentation object536 # tmp_all_x = np.concatenate( (np.zeros((r.pianoroll_changes.shape[0], self.history)), r.pianoroll_changes ), axis=1)537 if self.output_representation == 'binary_tab':538 # TODO: put binary tab history539 tmp_all_x = np.concatenate( (np.zeros((r.pianoroll_changes.shape[0], self.history)), r.pianoroll_changes ), axis=1)540 tmp_x = tmp_all_x[:, self.history:]541 elif self.output_representation == 'flat_tablature':542 tmp_flat_tab = tablature2flatFretboard(r.tablature_changes)543 tmp_all_x = np.concatenate( (np.zeros((tmp_flat_tab.shape[0], self.history)), tmp_flat_tab ), axis=1)544 tmp_x = r.pianoroll_changes545 if self.output_representation == 'flat_tablature' or self.output_representation == 'binary_tab':546 for i in range(1, self.history+1, 1):547 tmp_x = np.vstack( (tmp_x , tmp_all_x[:, self.history-i:-i]) )548 self.pianoroll_changes.append( tmp_x.astype(bool) )549 if self.output_representation == 'binary_tab':550 self.tablature_changes.append( tablature2binary(r.tablature_changes) )551 elif self.output_representation == 'flat_tablature':552 self.tablature_changes.append( tablature2flatFretboard(r.tablature_changes).astype(bool) )553 elif self.output_representation == 'full_tablature':554 self.tablature_changes.append( tablature2Fretboard(r.tablature_changes) )555 else:556 print('unknown output_representation')557 # self.string_activation_changes.append( r.string_activation_changes )558 # self.tablature_changes.append( np.concatenate( (np.zeros((r.tablature_changes.shape[0], self.history)), r.tablature_changes ), axis=1) )559 # self.string_activation_changes.append( np.concatenate( (np.zeros((r.string_activation_changes.shape[0], self.history)), r.string_activation_changes ), axis=1) )560 # end add_matrices561 def load_data(self, train_ratio=0.8, validation=True, validation_ratio=0.2):562 self.validation = validation563 # shuffled_idxs = np.arange( len( self.pianoroll_changes ) )564 # np.random.shuffle( shuffled_idxs )565 # self.pianoroll_changes, self.tablature_changes, self.string_activation_changes = shuffle( self.pianoroll_changes,566 # self.tablature_changes,567 # self.string_activation_changes)568 self.pianoroll_changes, self.tablature_changes= shuffle( self.pianoroll_changes,569 self.tablature_changes)570 train_idx = np.floor( len( self.pianoroll_changes )*train_ratio ).astype(int)571 valid_idx = 0572 if self.validation:573 valid_idx = np.floor( train_idx*validation_ratio ).astype(int)574 x_valid = self.pianoroll_changes[train_idx-valid_idx:train_idx]575 x_train = self.pianoroll_changes[:train_idx-valid_idx]576 x_test = self.pianoroll_changes[train_idx:]577 578 self.x_train = np.concatenate( x_train , axis=1 )579 self.x_test = np.concatenate( x_test , axis=1 )580 581 if self.validation:582 self.x_valid = np.concatenate( x_valid , axis=1 )583 if self.task == 'string_activation':584 y = self.string_activation_changes585 else:586 y = self.tablature_changes587 if self.validation:588 y_valid = y[train_idx-valid_idx:train_idx]589 # self.y1 = y590 y_train = y[:train_idx-valid_idx]591 y_test = y[train_idx:]592 self.y_train = np.concatenate( y_train , axis=1 )593 self.y_test = np.concatenate( y_test , axis=1 )594 if self.validation:595 self.y_valid = np.concatenate( y_valid , axis=1 )596 if self.validation:597 return [self.x_train, self.y_train, self.x_valid, self.y_valid, self.x_test, self.y_test]598 else:599 return [self.x_train, self.y_train, self.x_test, self.y_test]600 # end load_data601 def load_full_tabs(self, train_ratio=0.8, validation=True, validation_ratio=0.2):602 self.validation = validation603 self.tablature_changes = shuffle( self.tablature_changes )604 train_idx = np.floor( len( self.tablature_changes )*train_ratio ).astype(int)605 valid_idx = 0606 if self.validation:607 valid_idx = np.floor( train_idx*validation_ratio ).astype(int)608 x_valid = self.tablature_changes[train_idx-valid_idx:train_idx]609 x_train = self.tablature_changes[:train_idx-valid_idx]610 x_test = self.tablature_changes[train_idx:]611 612 self.x_train = np.concatenate( x_train , axis=0 )613 self.x_test = np.concatenate( x_test , axis=0 )614 615 if self.validation:616 self.x_valid = np.concatenate( x_valid , axis=0 )617 618 if self.validation:619 return [self.x_train, self.y_train, self.x_valid, self.y_valid, self.x_test, self.y_test]620 else:621 return [self.x_train, self.y_train, self.x_test, self.y_test]622 # end load_full_tabs...
wjd.py
Source:wjd.py
1import os2import sys3import numpy as np4from sqlalchemy import Column, ForeignKey, Integer, Float, String5from sqlalchemy.ext.declarative import declarative_base6from sqlalchemy.orm import relationship7from sqlalchemy import create_engine8from sqlalchemy.orm import sessionmaker9import csv10import settings11from base import ImporterBase12Base = declarative_base()13class Song(Base):14 __tablename__ = 'song'15 songid = Column(Integer, primary_key=True)16 title = Column(String, nullable=False)17 filename_track = Column(String, nullable=False)18 solos = relationship('Solo')19class Solo(Base):20 __tablename__ = 'solo_info'21 melid = Column(Integer, primary_key=True)22 songid = Column(Integer, ForeignKey('song.songid'))23 melodies = relationship('Melody', backref='solo_info')24 beats = relationship('Beat', backref='solo_info')25 performer = Column(String)26 title = Column(String)27 instrument = Column(String)28 key = Column(String)29 signature = Column(String)30class Beat(Base):31 __tablename__ = 'beats'32 beatid = Column(Integer, primary_key=True)33 melid = Column(Integer, ForeignKey('solo_info.melid'))34 onset = Column(Float)35# class Section(Base):36# __tablename__ = 'sections'37# melid = Column(Integer, ForeignKey('solo_info.melid'))38# type = Column(String)39# start = Column(Integer)40# end = Column(Integer)41# value = Column(String)42class Melody(Base):43 __tablename__ = 'melody'44 eventid = Column(Integer, primary_key=True)45 melid = Column(Integer, ForeignKey('solo_info.melid'))46 onset = Column(Float)47 pitch = Column(Float)48 duration = Column(Float)49 period = Column(Integer)50 division = Column(Integer)51 bar = Column(Integer)52 beat = Column(Integer)53 tatum = Column(Integer)54 subtatum = Column(Integer)55 num = Column(Integer)56 denom = Column(Integer)57def get_melids():58 engine = create_engine('sqlite:///../data/wjazzd_new.db')59 db_session = sessionmaker(bind=engine)60 session = db_session()61 melids_query_gen = session.query(Solo.melid).distinct()62 melids = []63 for cur_melid in melids_query_gen:64 melids.append(cur_melid)65 return melids66def get_solo(melid):67 engine = create_engine('sqlite:///../data/wjazzd_new.db')68 db_session = sessionmaker(bind=engine)69 session = db_session()70 solo = session.query(Solo).get(melid)71 return solo72def get_solo_activity(melid, frame_times):73 solo = get_solo(melid)74 solo_length = solo.melodies[-1].onset + solo.melodies[-1].duration75 solo_activity = np.zeros_like(frame_times)76 for note_event in solo.melodies:77 idx_start = np.argmin(np.abs(frame_times-note_event.onset))78 idx_end = np.argmin(np.abs(frame_times-(note_event.onset+note_event.duration)))79 solo_activity[idx_start:idx_end] = 1.080 return solo_activity81def get_solo_beats(solo, subdivisions=0):82 """83 Parameter84 ---------85 melid : integer86 subdivisions : integer, optional87 Defaults to 0.88 Return89 ------90 beats : ndarray91 """92 # include first and second note93 subdivisions_mod = subdivisions + 194 beats = np.zeros(1)95 for cur_beat, note_event in enumerate(solo.beats):96 if subdivisions == 0:97 beats[cur_beat] = note_event.onset98 else:99 if cur_beat == 0:100 beats[0] = note_event.onset101 else:102 last_onset = beats[-1]103 cur_onset = note_event.onset104 # fill with subdivisions105 subdivsions_onsets = np.linspace(last_onset, cur_onset, subdivisions_mod)106 beats = np.r_[beats, subdivsions_onsets]107 return beats108def get_transposition_offset(solo):109 # define musical pitch classes110 pitch_classes_sharp = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'A', 'A#', 'B']111 pitch_classes_flat = ['C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'A', 'Bb', 'B']112 # split the string113 cur_key = solo.key.split('-')[0]114 transp_offset = None115 # find it116 try:117 transp_offset = pitch_classes_sharp.index(cur_key)118 except ValueError:119 pass120 try:121 transp_offset = pitch_classes_flat.index(cur_key)122 except ValueError:123 pass124 # this means there was no annotation in the database125 if not transp_offset:126 transp_offset = 0127 return transp_offset128def visualize_piano_roll(piano_roll):129 import matplotlib.pyplot as plt130 plt.imshow(piano_roll, cmap=plt.get_cmap('gray_r'))131class ImporterWJD(ImporterBase):132 """Base Class for the dataset import.133 """134 def __init__(self, beats_per_measure, melody_range, harmony_range, continuation_range, metric_range, path='../data/rock_corpus_v2-1/rs200_melody_nlt'):135 self.output = []136 super(ImporterWJD, self).__init__(beats_per_measure, melody_range, harmony_range, continuation_range, metric_range)137 self.path = path138 self.output = []139 #'pr' stands for piano roll140 self.pr_n_pitches = melody_range[1] - melody_range[0]141 self.pr_width = self.metric_range[1]142 self.pr_bar_division = beats_per_measure143 melids = get_melids()144 for cur_melid in melids:145 self.output.append(self.import_piano_roll(cur_melid))146 def get_solo_pitch_shape(self, solo, frame_times, n_pitch_classes, transposition_offset):147 if n_pitch_classes:148 n_pitches = n_pitch_classes149 else:150 n_pitches = 120151 solo_length = solo.melodies[-1].onset + solo.melodies[-1].duration152 solo_piano_roll = np.zeros((self.pr_width, len(frame_times)))153 pitch_range_start = np.min([mel.pitch for mel in solo.melodies])154 pitch_range_end = np.max([mel.pitch for mel in solo.melodies])155 lowest_octave = int((pitch_range_start - transposition_offset) / 12) * 12156 for note_event in solo.melodies:157 note_metric_index = (note_event.beat - 1) * 4 + note_event.tatum - 1158 idx_start = np.argmin(np.abs(frame_times-note_event.onset))159 idx_end = np.argmin(np.abs(frame_times-(note_event.onset+note_event.duration)))160 cur_metric_level = self.get_metric_level_from_num_divisions(note_metric_index, self.pr_bar_division)161 if n_pitch_classes:162 cur_pitch = (note_event.pitch-transposition_offset - lowest_octave) % n_pitch_classes163 else:164 cur_pitch = note_event.pitch-transposition_offset - lowest_octave165 cur_pitch_vector = np.zeros((self.pr_width, 1))166 cur_pitch_vector[cur_pitch] = 1.0167 solo_piano_roll[:, idx_start:idx_end] = cur_pitch_vector168 solo_piano_roll[self.metric_range[0] + cur_metric_level, idx_start] = 1.0169 #import matplotlib.pyplot as plt170 #plt.imshow(solo_piano_roll, cmap=plt.get_cmap('gray_r'))171 #plt.show()172 return solo_piano_roll173 def import_piano_roll(self, cur_melid):174 solo = get_solo(cur_melid)175 transp_offset = get_transposition_offset(solo)176 beats = get_solo_beats(solo, 4)177 solo_piano_roll = self.get_solo_pitch_shape(solo, beats, self.num_pitches, transp_offset)178 return solo_piano_roll179 def add_beat_flags(self):180 pass181if __name__ == '__main__':182 importer = ImporterWJD(settings.BEATS_PER_MEASURE, settings.MELODY_INDICES_RANGE, settings.HARMONY_INDICES_RANGE, settings.CONTINUATION_FLAG_RANGE, settings.METRIC_FLAGS_RANGE)...
message_processing.py
Source:message_processing.py
1import re2from regex import time_regex, note_regex3from metadata import get_encoding4from encoding import decode5def parse_time(note_event):6 return round(float(re.search(time_regex, note_event).group(1)), 3)7def parse_note(note_event):8 # TODO: are these registers an octave higher than I expect?9 pitches = re.search(note_regex, note_event).group(0)10 encoding = get_encoding()...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!