From 6906adc39ee54b0f03eb60433847c5732b32fc88 Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Fri, 1 Nov 2019 12:23:03 +0100 Subject: [PATCH] speaker encoder implementation --- speaker_encoder/README.md | 12 + speaker_encoder/__init__.py | 0 speaker_encoder/compute_embeddings.py | 64 ++++ speaker_encoder/config.json | 58 ++++ speaker_encoder/dataset.py | 128 +++++++ speaker_encoder/generic_utils.py | 41 +++ speaker_encoder/loss.py | 104 ++++++ speaker_encoder/model.py | 87 +++++ .../notebooks/PlotUmapLibriTTS.ipynb | 250 ++++++++++++++ speaker_encoder/tests.py | 80 +++++ speaker_encoder/train.py | 315 ++++++++++++++++++ speaker_encoder/umap.png | Bin 0 -> 23864 bytes speaker_encoder/visual.py | 40 +++ 13 files changed, 1179 insertions(+) create mode 100644 speaker_encoder/README.md create mode 100644 speaker_encoder/__init__.py create mode 100644 speaker_encoder/compute_embeddings.py create mode 100644 speaker_encoder/config.json create mode 100644 speaker_encoder/dataset.py create mode 100644 speaker_encoder/generic_utils.py create mode 100644 speaker_encoder/loss.py create mode 100644 speaker_encoder/model.py create mode 100644 speaker_encoder/notebooks/PlotUmapLibriTTS.ipynb create mode 100644 speaker_encoder/tests.py create mode 100644 speaker_encoder/train.py create mode 100644 speaker_encoder/umap.png create mode 100644 speaker_encoder/visual.py diff --git a/speaker_encoder/README.md b/speaker_encoder/README.md new file mode 100644 index 0000000..8f7b675 --- /dev/null +++ b/speaker_encoder/README.md @@ -0,0 +1,12 @@ +### Speaker embedding (Experimental) + +This is an implementation of https://arxiv.org/abs/1710.10467. This model can be used for voice and speaker embedding. So you can generate d-vectors for multi-speaker TTS or prune bad samples from your TTS dataset. Below is an example showing embedding results of various speakers. You can generate the same plot with the provided notebook. + +![](https://user-images.githubusercontent.com/1402048/64603079-7fa5c100-d3c8-11e9-88e7-88a00d0e37d1.png) + +To run the code, you need to follow the same flow as in TTS. + +- Define 'config.json' for your needs. Note that, audio parameters should match your TTS model. +- Example training call ```python speaker_encoder/train.py --config_path speaker_encoder/config.json --data_path ~/Data/Libri-TTS/train-clean-360``` +- Generate embedding vectors ```python speaker_encoder/compute_embeddings.py --use_cuda true /model/path/best_model.pth.tar model/config/path/config.json dataset/path/ output_path``` . This code parses all .wav files at the given dataset path and generates the same folder structure under the output path with the generated embedding files. +- Watch training on Tensorboard as in TTS \ No newline at end of file diff --git a/speaker_encoder/__init__.py b/speaker_encoder/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/speaker_encoder/compute_embeddings.py b/speaker_encoder/compute_embeddings.py new file mode 100644 index 0000000..ff90acb --- /dev/null +++ b/speaker_encoder/compute_embeddings.py @@ -0,0 +1,64 @@ +import argparse +import glob +import os + +import numpy as np +from tqdm import tqdm + +import torch +from torch.utils.data import DataLoader +from TTS.datasets.preprocess import get_preprocessor_by_name +from TTS.speaker_encoder.dataset import MyDataset +from TTS.speaker_encoder.model import SpeakerEncoder +from TTS.speaker_encoder.visual import plot_embeddings +from TTS.utils.audio import AudioProcessor +from TTS.utils.generic_utils import load_config + +parser = argparse.ArgumentParser( + description='Compute embedding vectors for each wav file in a dataset. ') +parser.add_argument( + 'model_path', + type=str, + help='Path to model outputs (checkpoint, tensorboard etc.).') +parser.add_argument( + 'config_path', + type=str, + help='Path to config file for training.', +) +parser.add_argument( + 'data_path', + type=str, + help='Defines the data path. It overwrites config.json.') +parser.add_argument( + 'output_path', + type=str, + help='path for training outputs.') +parser.add_argument( + '--use_cuda', type=bool, help='flag to set cuda.', default=False +) +args = parser.parse_args() + + +c = load_config(args.config_path) +ap = AudioProcessor(**c['audio']) + +wav_files = glob.glob(args.data_path + '/**/*.wav', recursive=True) +output_files = [wav_file.replace(args.data_path, args.output_path).replace( + '.wav', '.npy') for wav_file in wav_files] + +for output_file in output_files: + os.makedirs(os.path.dirname(output_file), exist_ok=True) + +model = SpeakerEncoder(**c.model) +model.load_state_dict(torch.load(args.model_path)['model']) +model.eval() +if args.use_cuda: + model.cuda() + +for idx, wav_file in enumerate(tqdm(wav_files)): + mel_spec = ap.melspectrogram(ap.load_wav(wav_file)).T + mel_spec = torch.FloatTensor(mel_spec[None, :, :]) + if args.use_cuda: + mel_spec = mel_spec.cuda() + embedd = model.compute_embedding(mel_spec) + np.save(output_files[idx], embedd.detach().cpu().numpy()) diff --git a/speaker_encoder/config.json b/speaker_encoder/config.json new file mode 100644 index 0000000..79c42bc --- /dev/null +++ b/speaker_encoder/config.json @@ -0,0 +1,58 @@ +{ + "run_name": "libritts_360-half", + "run_description": "train speaker encoder for libritts 360", + "audio": { + // Audio processing parameters + "num_mels": 40, // size of the mel spec frame. + "num_freq": 1025, // number of stft frequency levels. Size of the linear spectogram frame. + "sample_rate": 16000, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled. + "frame_length_ms": 50, // stft window length in ms. + "frame_shift_ms": 12.5, // stft window hop-lengh in ms. + "preemphasis": 0.98, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis. + "min_level_db": -100, // normalization range + "ref_level_db": 20, // reference level db, theoretically 20db is the sound of air. + // Normalization parameters + "signal_norm": true, // normalize the spec values in range [0, 1] + "symmetric_norm": true, // move normalization to range [-1, 1] + "max_norm": 4, // scale normalization to range [-max_norm, max_norm] or [0, max_norm] + "clip_norm": true, // clip normalized values into the range. + "mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!! + "mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!! + "do_trim_silence": false // enable trimming of slience of audio as you load it. LJspeech (false), TWEB (false), Nancy (true) + }, + "reinit_layers": [], + "grad_clip": 3.0, // upper limit for gradients for clipping. + "epochs": 1000, // total number of epochs to train. + "lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate. + "lr_decay": false, // if true, Noam learning rate decaying is applied through training. + "warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr" + "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. + "steps_plot_stats": 10, // number of steps to plot embeddings. + "num_speakers_in_batch": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'. + "wd": 0.000001, // Weight decay weight. + "checkpoint": true, // If true, it saves checkpoints per "save_step" + "save_step": 1000, // Number of training steps expected to save traning stats and checkpoints. + "print_step": 1, // Number of steps to log traning on console. + "output_path": "/media/erogol/data_ssd/Models/libri_tts/speaker_encoder/", // DATASET-RELATED: output path for all training outputs. + "model": { + "input_dim": 40, + "proj_dim": 128, + "lstm_dim": 384, + "num_lstm_layers": 3 + }, + "datasets": + [ + { + "name": "libri_tts", + "path": "/home/erogol/Data/Libri-TTS/train-clean-360/", + "meta_file_train": null, + "meta_file_val": null + }, + { + "name": "libri_tts", + "path": "/home/erogol/Data/Libri-TTS/train-clean-100/", + "meta_file_train": null, + "meta_file_val": null + } + ] +} \ No newline at end of file diff --git a/speaker_encoder/dataset.py b/speaker_encoder/dataset.py new file mode 100644 index 0000000..2dd50c7 --- /dev/null +++ b/speaker_encoder/dataset.py @@ -0,0 +1,128 @@ +import os +import numpy as np +import collections +import torch +import random +from torch.utils.data import Dataset + +from TTS.utils.text import text_to_sequence, phoneme_to_sequence, pad_with_eos_bos +from TTS.utils.data import prepare_data, prepare_tensor, prepare_stop_target + + +class MyDataset(Dataset): + def __init__(self, + ap, + meta_data, + voice_len=1.6, + num_speakers_in_batch=64, + num_utter_per_speaker=10, + skip_speakers=False, + verbose=False): + """ + Args: + ap (TTS.utils.AudioProcessor): audio processor object. + meta_data (list): list of dataset instances. + seq_len (int): voice segment length in seconds. + verbose (bool): print diagnostic information. + """ + self.items = meta_data + self.sample_rate = ap.sample_rate + self.voice_len = voice_len + self.seq_len = int(voice_len * self.sample_rate) + self.num_utter_per_speaker = num_utter_per_speaker + self.skip_speakers = skip_speakers + self.ap = ap + self.verbose = verbose + self.__parse_items() + if self.verbose: + print("\n > DataLoader initialization") + print(f" | > Number of instances : {len(self.items)}") + print(f" | > Sequence length: {self.seq_len}") + print(f" | > Num speakers: {len(self.speakers)}") + + def load_wav(self, filename): + audio = self.ap.load_wav(filename) + return audio + + def load_data(self, idx): + text, wav_file, speaker_name = self.items[idx] + wav = np.asarray(self.load_wav(wav_file), dtype=np.float32) + mel = self.ap.melspectrogram(wav).astype('float32') + # sample seq_len + + assert text.size > 0, self.items[idx][1] + assert wav.size > 0, self.items[idx][1] + + sample = { + 'mel': mel, + 'item_idx': self.items[idx][1], + 'speaker_name': speaker_name + } + return sample + + def __parse_items(self): + """ + Find unique speaker ids and create a dict mapping utterances from speaker id + """ + speakers = list(set([item[-1] for item in self.items])) + self.speaker_to_utters = {} + self.speakers = [] + for speaker in speakers: + speaker_utters = [item[1] for item in self.items if item[2] == speaker] + if len(speaker_utters) < self.num_utter_per_speaker and self.skip_speakers: + print(f" [!] Skipped speaker {speaker}. Not enough utterances {self.num_utter_per_speaker} vs {len(speaker_utters)}.") + else: + self.speakers.append(speaker) + self.speaker_to_utters[speaker] = speaker_utters + + def __len__(self): + return int(1e+10) + + def __sample_speaker(self): + speaker = random.sample(self.speakers, 1)[0] + if self.num_utter_per_speaker > len(self.speaker_to_utters[speaker]): + utters = random.choices(self.speaker_to_utters[speaker], k=self.num_utter_per_speaker) + else: + utters = random.sample(self.speaker_to_utters[speaker], self.num_utter_per_speaker) + return speaker, utters + + def __sample_speaker_utterances(self, speaker): + """ + Sample all M utterances for the given speaker. + """ + feats = [] + labels = [] + for idx in range(self.num_utter_per_speaker): + # TODO:dummy but works + while True: + if len(self.speaker_to_utters[speaker]) > 0: + utter = random.sample(self.speaker_to_utters[speaker], 1)[0] + else: + self.speakers.remove(speaker) + speaker, _ = self.__sample_speaker() + continue + wav = self.load_wav(utter) + if wav.shape[0] - self.seq_len > 0: + break + else: + self.speaker_to_utters[speaker].remove(utter) + + offset = random.randint(0, wav.shape[0] - self.seq_len) + mel = self.ap.melspectrogram(wav[offset:offset+self.seq_len]) + feats.append(torch.FloatTensor(mel)) + labels.append(speaker) + return feats, labels + + def __getitem__(self, idx): + speaker, _ = self.__sample_speaker() + return speaker + + def collate_fn(self, batch): + labels = [] + feats = [] + for speaker in batch: + feats_, labels_ = self.__sample_speaker_utterances(speaker) + labels.append(labels_) + feats.extend(feats_) + feats = torch.stack(feats) + return feats.transpose(1, 2), labels \ No newline at end of file diff --git a/speaker_encoder/generic_utils.py b/speaker_encoder/generic_utils.py new file mode 100644 index 0000000..c568d12 --- /dev/null +++ b/speaker_encoder/generic_utils.py @@ -0,0 +1,41 @@ +import os +import datetime +import torch + + +def save_checkpoint(model, optimizer, model_loss, out_path, + current_step, epoch): + checkpoint_path = 'checkpoint_{}.pth.tar'.format(current_step) + checkpoint_path = os.path.join(out_path, checkpoint_path) + print(" | | > Checkpoint saving : {}".format(checkpoint_path)) + + new_state_dict = model.state_dict() + state = { + 'model': new_state_dict, + 'optimizer': optimizer.state_dict() if optimizer is not None else None, + 'step': current_step, + 'epoch': epoch, + 'GE2Eloss': model_loss, + 'date': datetime.date.today().strftime("%B %d, %Y"), + } + torch.save(state, checkpoint_path) + + +def save_best_model(model, optimizer, model_loss, best_loss, out_path, + current_step): + if model_loss < best_loss: + new_state_dict = model.state_dict() + state = { + 'model': new_state_dict, + 'optimizer': optimizer.state_dict(), + 'step': current_step, + 'GE2Eloss': model_loss, + 'date': datetime.date.today().strftime("%B %d, %Y"), + } + best_loss = model_loss + bestmodel_path = 'best_model.pth.tar' + bestmodel_path = os.path.join(out_path, bestmodel_path) + print("\n > BEST MODEL ({0:.5f}) : {1:}".format( + model_loss, bestmodel_path)) + torch.save(state, bestmodel_path) + return best_loss \ No newline at end of file diff --git a/speaker_encoder/loss.py b/speaker_encoder/loss.py new file mode 100644 index 0000000..9b5a29b --- /dev/null +++ b/speaker_encoder/loss.py @@ -0,0 +1,104 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# adapted from https://github.com/cvqluu/GE2E-Loss +class GE2ELoss(nn.Module): + + def __init__(self, init_w=10.0, init_b=-5.0, loss_method='softmax'): + ''' + Implementation of the Generalized End-to-End loss defined in https://arxiv.org/abs/1710.10467 [1] + Accepts an input of size (N, M, D) + where N is the number of speakers in the batch, + M is the number of utterances per speaker, + and D is the dimensionality of the embedding vector (e.g. d-vector) + Args: + - init_w (float): defines the initial value of w in Equation (5) of [1] + - init_b (float): definies the initial value of b in Equation (5) of [1] + ''' + super(GE2ELoss, self).__init__() + self.w = nn.Parameter(torch.tensor(init_w)) + self.b = nn.Parameter(torch.tensor(init_b)) + self.loss_method = loss_method + + assert self.loss_method in ['softmax', 'contrast'] + + if self.loss_method == 'softmax': + self.embed_loss = self.embed_loss_softmax + if self.loss_method == 'contrast': + self.embed_loss = self.embed_loss_contrast + + def calc_new_centroids(self, dvecs, centroids, spkr, utt): + ''' + Calculates the new centroids excluding the reference utterance + ''' + excl = torch.cat((dvecs[spkr, :utt], dvecs[spkr, utt+1:])) + excl = torch.mean(excl, 0) + new_centroids = [] + for i, centroid in enumerate(centroids): + if i == spkr: + new_centroids.append(excl) + else: + new_centroids.append(centroid) + return torch.stack(new_centroids) + + def calc_cosine_sim(self, dvecs, centroids): + ''' + Make the cosine similarity matrix with dims (N,M,N) + ''' + cos_sim_matrix = [] + for spkr_idx, speaker in enumerate(dvecs): + cs_row = [] + for utt_idx, utterance in enumerate(speaker): + new_centroids = self.calc_new_centroids( + dvecs, centroids, spkr_idx, utt_idx) + # vector based cosine similarity for speed + cs_row.append(torch.clamp(torch.mm(utterance.unsqueeze(1).transpose(0, 1), new_centroids.transpose( + 0, 1)) / (torch.norm(utterance) * torch.norm(new_centroids, dim=1)), 1e-6)) + cs_row = torch.cat(cs_row, dim=0) + cos_sim_matrix.append(cs_row) + return torch.stack(cos_sim_matrix) + + def embed_loss_softmax(self, dvecs, cos_sim_matrix): + ''' + Calculates the loss on each embedding $L(e_{ji})$ by taking softmax + ''' + N, M, _ = dvecs.shape + L = [] + for j in range(N): + L_row = [] + for i in range(M): + L_row.append(-F.log_softmax(cos_sim_matrix[j, i], 0)[j]) + L_row = torch.stack(L_row) + L.append(L_row) + return torch.stack(L) + + def embed_loss_contrast(self, dvecs, cos_sim_matrix): + ''' + Calculates the loss on each embedding $L(e_{ji})$ by contrast loss with closest centroid + ''' + N, M, _ = dvecs.shape + L = [] + for j in range(N): + L_row = [] + for i in range(M): + centroids_sigmoids = torch.sigmoid(cos_sim_matrix[j, i]) + excl_centroids_sigmoids = torch.cat( + (centroids_sigmoids[:j], centroids_sigmoids[j+1:])) + L_row.append( + 1. - torch.sigmoid(cos_sim_matrix[j, i, j]) + torch.max(excl_centroids_sigmoids)) + L_row = torch.stack(L_row) + L.append(L_row) + return torch.stack(L) + + def forward(self, dvecs): + ''' + Calculates the GE2E loss for an input of dimensions (num_speakers, num_utts_per_speaker, dvec_feats) + ''' + centroids = torch.mean(dvecs, 1) + cos_sim_matrix = self.calc_cosine_sim(dvecs, centroids) + torch.clamp(self.w, 1e-6) + cos_sim_matrix = self.w * cos_sim_matrix + self.b + L = self.embed_loss(dvecs, cos_sim_matrix) + return L.mean() diff --git a/speaker_encoder/model.py b/speaker_encoder/model.py new file mode 100644 index 0000000..000cc96 --- /dev/null +++ b/speaker_encoder/model.py @@ -0,0 +1,87 @@ +import torch +from torch import nn + + +class LSTMWithProjection(nn.Module): + def __init__(self, input_size, hidden_size, proj_size): + super().__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.proj_size = proj_size + self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True) + self.linear = nn.Linear(hidden_size, proj_size, bias=False) + + def forward(self, x): + self.lstm.flatten_parameters() + o, (h, c) = self.lstm(x) + return self.linear(o) + + +class SpeakerEncoder(nn.Module): + def __init__(self, input_dim, proj_dim=256, lstm_dim=768, num_lstm_layers=3): + super().__init__() + layers = [] + layers.append(LSTMWithProjection(input_dim, lstm_dim, proj_dim)) + for _ in range(num_lstm_layers-1): + layers.append(LSTMWithProjection(proj_dim, lstm_dim, proj_dim)) + self.layers = nn.Sequential(*layers) + self._init_layers() + + def _init_layers(self): + for name, param in self.layers.named_parameters(): + if 'bias' in name: + nn.init.constant_(param, 0.0) + elif 'weight' in name: + nn.init.xavier_normal_(param) + + def forward(self, x): + # TODO: implement state passing for lstms + d = self.layers(x) + d = torch.nn.functional.normalize(d[:, -1], p=2, dim=1) + return d + + def inference(self, x): + d = self.layers.forward(x) + d = torch.nn.functional.normalize(d[:, -1], p=2, dim=1) + return d + + def compute_embedding(self, x, num_frames=160, overlap=0.5): + """ + Generate embeddings for a batch of utterances + x: 1xTxD + """ + num_overlap = int(num_frames * overlap) + max_len = x.shape[1] + embed = None + cur_iter = 0 + for offset in range(0, max_len, num_frames - num_overlap): + cur_iter += 1 + end_offset = min(x.shape[1], offset + num_frames) + frames = x[:, offset:end_offset] + if embed is None: + embed = self.inference(frames) + else: + embed += self.inference(frames) + return embed / cur_iter + + def batch_compute_embedding(self, x, seq_lens, num_frames=160, overlap=0.5): + """ + Generate embeddings for a batch of utterances + x: BxTxD + """ + num_overlap = num_frames * overlap + max_len = x.shape[1] + embed = None + num_iters = seq_lens / (num_frames - num_overlap) + cur_iter = 0 + for offset in range(0, max_len, num_frames - num_overlap): + cur_iter += 1 + end_offset = min(x.shape[1], offset + num_frames) + frames = x[:, offset:end_offset] + if embed is None: + embed = self.inference(frames) + else: + embed[cur_iter <= num_iters, :] += self.inference(frames[cur_iter <= num_iters, :, :]) + return embed / num_iters + + diff --git a/speaker_encoder/notebooks/PlotUmapLibriTTS.ipynb b/speaker_encoder/notebooks/PlotUmapLibriTTS.ipynb new file mode 100644 index 0000000..4171659 --- /dev/null +++ b/speaker_encoder/notebooks/PlotUmapLibriTTS.ipynb @@ -0,0 +1,250 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "checkpoint_10000.pth.tar config.json\n", + "checkpoint_20000.pth.tar events.out.tfevents.1567518806.erogol-desktop\n", + "checkpoint_30000.pth.tar\n" + ] + } + ], + "source": [ + "!ls /media/erogol/data_ssd/Models/libri_tts/speaker_encoder/libritts_360-September-03-2019_03+53PM-dc69074/" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " > Setting up Audio Processor...\n", + " | > sample_rate:16000\n", + " | > num_mels:40\n", + " | > min_level_db:-100\n", + " | > frame_shift_ms:12.5\n", + " | > frame_length_ms:50\n", + " | > ref_level_db:20\n", + " | > num_freq:1025\n", + " | > power:None\n", + " | > preemphasis:0.98\n", + " | > griffin_lim_iters:None\n", + " | > signal_norm:True\n", + " | > symmetric_norm:True\n", + " | > mel_fmin:0\n", + " | > mel_fmax:8000.0\n", + " | > max_norm:4.0\n", + " | > clip_norm:True\n", + " | > do_trim_silence:False\n", + " | > n_fft:2048\n", + " | > hop_length:200\n", + " | > win_length:800\n" + ] + } + ], + "source": [ + "import torch\n", + "import os\n", + "import umap\n", + "import random\n", + "import glob\n", + "import numpy as np\n", + "\n", + "from TTS.speaker_encoder.model import SpeakerEncoder\n", + "from TTS.utils.audio import AudioProcessor\n", + "from TTS.utils.generic_utils import load_config\n", + "\n", + "MODEL_PATH = \"/media/erogol/data_ssd/Models/libri_tts/speaker_encoder/libritts_360-half-September-28-2019_10+46AM-8565c50/best_model.pth.tar\"\n", + "CONFIG_PATH = \"/media/erogol/data_ssd/Models/libri_tts/speaker_encoder/libritts_360-September-03-2019_03+53PM-dc69074/config.json\"\n", + "EMBED_PATH = \"/home/erogol/Data/Libri-TTS/train-clean-360-embed_128/\"\n", + "CONFIG = load_config(CONFIG_PATH)\n", + "ap = AudioProcessor(**CONFIG['audio'])" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "116500\n" + ] + } + ], + "source": [ + "embed_files = glob.glob(EMBED_PATH+\"/**/*.npy\", recursive=True)\n", + "print(len(embed_files))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'/home/erogol/Data/Libri-TTS/train-clean-360-embed_128/1025/75365/1025_75365_000002_000002.npy'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "embed_files[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "904\n" + ] + } + ], + "source": [ + "speaker_paths = list(set([os.path.dirname(os.path.dirname(embed_file)) for embed_file in embed_files]))\n", + "speaker_to_utter = {}\n", + "for embed_file in embed_files:\n", + " speaker_path = os.path.dirname(os.path.dirname(embed_file))\n", + " try:\n", + " speaker_to_utter[speaker_path].append(embed_file)\n", + " except:\n", + " speaker_to_utter[speaker_path]=[embed_file]\n", + "print(len(speaker_paths))" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "embeds = []\n", + "labels = []\n", + "num_speakers = 20\n", + "num_utters = 10\n", + "speaker_idxs = np.random.choice(range(len(speaker_paths)), num_speakers, replace=False )\n", + "\n", + "for speaker_num, speaker_idx in enumerate(speaker_idxs):\n", + " speaker_path = speaker_paths[speaker_idx]\n", + " speakers_utter = speaker_to_utter[speaker_path]\n", + " utter_idxs = np.random.randint(0, len(speakers_utter) , num_utters)\n", + " for utter_idx in utter_idxs:\n", + " embed_path = speaker_to_utter[speaker_path][utter_idx]\n", + " embed = np.load(embed_path)\n", + " embeds.append(embed)\n", + " labels.append(speaker_num)\n", + "embeds = np.concatenate(embeds)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from matplotlib import cm\n", + "viridis = cm.get_cmap('tab20', num_speakers)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/erogol/miniconda3/lib/python3.7/site-packages/sklearn/metrics/pairwise.py:258: RuntimeWarning: invalid value encountered in sqrt\n", + " return distances if squared else np.sqrt(distances, out=distances)\n", + "/home/erogol/miniconda3/lib/python3.7/site-packages/umap/spectral.py:229: UserWarning: Embedding a total of 5 separate connected components using meta-embedding (experimental)\n", + " n_components\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABHgAAALICAYAAAAE6EcMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzs3Xmcn3dd7/339/ebmez70qZp05bSvdBW0iKbLLIUEVA8IIvAAT14cw56uzz01htRjvfhPhyP261HD4Iiigh6EAQRQZYDPGSTtpTSIqVp0zZJ0zRpkzT7bN/7j5lC2kwyaTO/5Zp5Ph+PPDJzXd+5rk/yRyd9zbWUWmsAAAAAaK5WrwcAAAAA4NQIPAAAAAANJ/AAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAnIJSys2llGc05bgAwOwk8AAAHVFKqaWUxz5s21tLKX81+fEzJtd86GFrLp/c/rmHbS+llNtLKd+a4lyfK6UcLqXsL6XsKqV8qJSyrgN/rGPUWi+ttX5u2oUnUEp5Tynlv8z0cQGAuUPgAQB6aWeSJ5dSVh217bVJvjPF2h9IsjbJY0opV02x/0211sVJLkiyPMnvnepwpZSBUz0GAEA3CDwAQC8NJ/n7JC9PklJKO8nLkrxvirWvTfKRJB+f/HhKtdb7k/xdksum2j95tc9/LaX8ayllbynlI6WUlZP7zpm8eugnSyl3Jfns5PYXTd4ytWfy6y8+6nh3lFKePflxq5TyK6WU20op95VS/vbBY0/uf2op5UuTx9lSSvn3pZQ3JHlVkl+evALpH6Y47rxSyu+XUu6e/PX7pZR5k/ueUUrZWkr5xVLKvaWU7aWU153E3z0AMIsIPABAr/1lktdMfvy8JDcnufvoBaWUhUn+XSbCz/uSvLyUMjTVwUopq5P8WJKvn+Ccr0ny+iRnJBlN8gcP2//0JBcneV4p5YIk70/yc0nWZCIw/cNxzv+zSX5k8uvPSLI7yR9NzrUhyT8l+cPJ41yR5IZa6zsn/0y/VWtdXGt94RTHfXOS75/8msuTXJ3k147af3qSZUnWJ/nJJH9USllxgj8/ADDLCDwAQE/VWr+UZGUp5cJMhJe/nGLZS5IcSfLPST6WZCDJCx625g9KKXuSfCPJ9iS/cILTvrfWelOt9UCStyR52eTVQw96a631QK31UJIfT/KPtdZP1VpHkvx2kgVJnjzFcX86yZtrrVtrrUeSvDXJv5u81etVST5da31/rXWk1npfrfWGE8x4tFcl+c1a67211p1J/nOSVx+1f2Ry/0it9eNJ9ie58CSPDQDMAgIPANApY0kGH7ZtMBMx4uHem+RNSZ6Z5MNT7H9tkr+ttY5OhpMP5djbtH621rq81rq+1vqqyRByPFuO+vjOyblWH2f/GZNrkiS11vHJ/eunOO7ZST48eQvWniT/lom/h9OSnJXkthPMdCIPmWHy4zOO+vy+WuvoUZ8fTLL4UZ4LAGggDw4EADrlriTnZCJyPOjcTP0A5fcm2ZTkL2utB0sp391RSjkzybOSXF1K+bHJzQuTzC+lrK617noUs5111McbMhGddh21vR61/+4kjztqnjK5btsUx92S5PW11i8+fEcpZUsmbq2aSj3O9qNnODsTt689OPPdx18OAMw1ruABADrlb5L8WinlzMmHDz87yQuTfPDhC2utmzPx3Jo3T3GcV2ciCl2YiWfQXJGJN2VtTfKKRznbT5RSLpl8ts9vJvlgrXXsOGv/NskLSik/WEoZTPKLmbhd7EtTrH1HkreVUs5OklLKmlLKiyf3vS/Js0spLyulDJRSVpVSrpjctyPJY04w7/sz8Xe5ZvIZQ7+e5K8ewZ8XAJjlBB4AoFN+MxMR5F8y8bDh30ryqlrrTVMtrrX+S611qqtSXpvkj2ut9xz9KxMx5bhv05rGe5O8J8k9SeZn4uHIU6q13pLkJzLxcORdmYhUL6y1Dk+x/P9L8tEk/1xK2ZfkK0meOHmcu5L8UCYC0f1JbsjEA5OT5M+SXDJ5a9ffT3Hc/5Lk2iQ3JvlmkusntwEAJElKrdNdEQwAMHuUUj6X5K9qrX86Q8e7K8lP1Fq/MBPHAwB4NFzBAwDwKJVS1mTiled39HgUAGCOE3gAAB6FUspVSW5N8oeTt18BAPSMW7QAAAAAGs4VPAAAAAANN9DrAY62evXqes455/R6DAAAAIC+cN111+2qta6Zbl1fBZ5zzjkn1157ba/HAAAAAOgLpZQ7T2adW7QAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEGej0AALPbvUe2Z9fwvVk6sCzr55+dUkqvRwIAgFlH4AGgI0bHR/NP9/5d7j2yPUlNScnC9uK8aN0rsrC9qNfjAQDArOIWLQA64vo9X8qOw3dntI5ktI5mpI5k7+ju/M3WP8vekd29Hg8AAGYVgQeAjvj2gZsyltFjtg/XI/nAtj/N9kNbezAVAADMTgIPAB0xXsdOuP+jO96f2/bf0qVpAABgdhN4AOiIcxaeP+2aT+/6aLYf3taFaQAAYHYTeADoiCeueFrmlXnTrvvoPX+dsWmu9gEAAE5M4AGgIxa0F+WVZ/4fKZn+teg3P/D1LkwEAACzl8ADQMcMtYfyI6e/ctp19w3v7MI0AAAwewk8AHTU2vln5JrVP3bCNevnb+jSNAAAMDsJPAB03NmLH5PnrH7RlPuGyrw8dvHFXZ4IAABmF4EHgK54zOIL87IzXp+l7RXf3Xb60Pq8/MyfSqv4dgQAAKdioNcDADB3rBhalVec9VO9HgMAAGYdPzIFAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGm5gJg5SSnl3kh9Ocm+t9bLJbW9N8h+S7Jxc9n/XWj8+E+cDAKD/DQ8P533ve1/uvPPOJMmqVavy0pe+NKeffnqPJwOA2WemruB5T5Jrptj+e7XWKyZ/iTsAAHPE2NhY3v72t3837iTJfffdl3e84x3Zs2dPDycDgNlpRgJPrfULSe6fiWMBANBs27dvzx/8wR9kfHx8yv2f+MQnujwRAMx+M3KL1gm8qZTymiTXJvnFWuvuhy8opbwhyRuSZMOGDR0eBwCATjly5Eje9773Zdu2bRkbGzvuum3btnVxKgCYGzr5kOX/meS8JFck2Z7kd6ZaVGt9Z611Y61145o1azo4DgAAnfSJT3wiW7ZsOWHcSZLly5d3aSIAmDs6FnhqrTtqrWO11vEk70pydafOBQBAb9Vac8MNN6TWOu3a5z3veV2YCADmlo4FnlLKuqM+/dEkN3XqXAAA9Nbtt99+UnHn4osvzplnntmFiQBgbpmp16S/P8kzkqwupWxN8htJnlFKuSJJTXJHkp+eiXMBANB/PvWpT0275od+6Idy9dUu6gaATpiRwFNrfcUUm/9sJo4NAED/2737mHdpPMTq1avFHQDooE4+ZBkAgDli5cqVJ9z/2te+tkuTAMDcJPAAAHDKnvWsZ2Vg4NiLw1utVt74xjdmyZIlPZgKAOaOGblFCwCAue3888/PS17yknzyk5/M3r17Mzg4mCuvvDLXXHNNWi0/UwSAThN4AACYEZdcckkuueSSjI+PizoA0GW+8wIAMKPEHQDoPt99AQAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaLiBXg8AAHA8Y+PjefH1m3LtvoNJknaSXzr39PzcOaf3djAAgD7jCh4AoG89+Svf/m7cSZKxJG/ffE9+6pu3924oAIA+JPAAAH1p26EjufPI8JT7Prbrgfz8v93V5YkAAPqXwAMA9KXP7d53wv3vv+f+fHD7fV2aBgCgvwk8AEBfumrpomnXvOnbW/KVaUIQAMBcIPAAAH3pgsULsrCUade9+pubMzw+3oWJAAD614wEnlLKu0sp95ZSbjpq28pSyqdKKbdO/r5iJs4FAMwdX3/yZdOuqUm+uHt/54cBAOhjM3UFz3uSXPOwbb+S5DO11vOTfGbycwCAk7ZsqJ1NT33ctP9gOTJeuzIPAEC/mpHAU2v9QpL7H7b5xUn+YvLjv0jyIzNxLgBgblk82M5tT3tcVg20p9w/WmuesmJxl6cCAOgvnXwGz2m11u1JMvn72g6eCwCYxRYMtHPjUy/LU5YvzrzJ5/IMJJnfKvlvF5yZJceJPwAAc8VArwcopbwhyRuSZMOGDT2eBgDoV+1S8r+uOC+fv39fPrlrb5YOtPOydSvz2IXzez0aAEDPdTLw7CilrKu1bi+lrEty71SLaq3vTPLOJNm4caMb6AGA42qVkmeuWppnrlra61EAAPpKJ2/R+miS105+/NokH+nguQAAAADmrJl6Tfr7k3w5yYWllK2llJ9M8vYkzyml3JrkOZOfAwAAADDDZuQWrVrrK46z6wdn4vgAAAAAHF8nb9ECAAAAoAsEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABouIFeDwAAADTflvsP5p1fuD03bNmT89cuzk8//bxcePqSXo8FMGcIPAAAwCn5wi335nXv+VrG6sTnN2/bm3+66Z78+euuyvc/ZlVvhwOYI9yiBQAAPGofvn5LXvPn34s7STKe5NDIWN784W/2bC6AuUbgAQAAHpU//t+b8vN/e+Nx99+x62AODY91cSKAucstWgAAwCP2x5+9Nb/1z9854Zp2Kxlsly5NBDC3uYIHAAB4RP7ii5unjTtJ8tInnJmBtv/lAOgG/7UFAABO2o1b9+Q3/uFb065bu2Qob3nhpV2YCIDELVoAAI0wvvdgxrbvTUrSXrc8raULej0Sc9T/9cHjP3PnQUPtks//0jMzf7DdhYkASAQeAIC+N/Lt7Rm7875kfOI1RWN33Jf2eWsz+Ni1PZ6Muej2nQdOuL8k+dqbn50FQ/5XA6Cb3KIFANDHxh849JC4M7GxZuzWHRk/cKR3gzFnLVs4eML91/7aD2bZwqEuTQPAgwQeAIA+NrbjgYfGnaMMf/X21Dr1PuiU//SM89Ka4sVY7VbJDb/+nKxaPL/7QwEg8AAA9LOxbbuPv/PIaEbv2NW9YSDJa558Tl73lHMy0CpplYlbsi48bXG+/pbnZLkrdwB6xo2xAAB9auTWe5JDIydcM3brjgyeu6ZLE0FSSslbfvjS/OyzLsjtu/Zn/fIFWbvUVTsAveYKHgCAPlSPjGZs087pF47VjGy6t/MDwcMsWziYKzesEHcA+oTAAwDQh4avv+Ok147ddm/q8GjnhgEA+p7AAwDQZ8YPHEl94PDJf0GrZPz+E7+6GgCY3QQeAIB+c2Q0U76m6HhqUgbbnZsHAOh7Ag8AQJ8pS+Yf99XoUxpopaxc1LmBAIC+J/AAAPSZMthO+7Frk/ZRV/GUJPMG0r7o9ImrewZaSbuVzB/M0NXnppRHcMUPADDreE06AEAfGjxvbVqL52d0865keDSttUsycO6alHkDGThrZcb3HEoZaKUsWyDuAAACDwBAv2qftjTt05Yes70MtNNevbgHEwEA/cotWgAAAAANJ/AAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAAAA0nMADAAAA0HACDwAAAEDDCTwAAAAADSfwAAAAADScwAMAAADQcAIPAAAAQMMJPAAAAAANJ/AAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAAAA0nMADAAAA0HACDwAAAEDDDfR6AAAAeuyBu5N7rk/Gx5K1j0sWrEi2X58M709WnZ+svigpfi4IAP1M4AEAmMs2fy7Z/NlkfDRJTe6+diL0pCQZnwg9i9clT/ippOWfjgDQr3yXBgCY7UYOJt/5eHLvN5Nak7WXJhe8YCLqbP7MZNyZ9N2P68RvY8PJvruTbdcmZ31/10cHAE6OwAMAMJvV8eRr70gO3Z/UsYltO25M9tyZnP20TFypM43xkeSeGwQeAOhjbqYGAJjN7vtOcmTv9+JOMhF9Rg4k++9NykkEnuShXw8A9J2OX8FTSrkjyb4kY0lGa60bO31OAAAm7d+RjI0eu31sOGm1J27ZOil+LggA/axb36mfWWu9QtwBAOiyhauT9hQ/02sPJUvXJ49/ZU7qNq2RQzM+GgAwczyDBwBgNlt9UTK4aPIqnvHJjSVpz0vWXpa0Byc+zzRX8ixc1dk5AYBT0o0reGqSfy6lXFdKecPDd5ZS3lBKubaUcu3OnTu7MA4AwBzSaidXvTFZc1FSWklKsuqC5Or/OBl3JtdM54wndHRMAODUdOMKnqfUWu8upaxN8qlSyrdrrV94cGet9Z1J3pkkGzduPNmbwAEAOFnzliSXv3ri4crJZOg5yumXJ9u/foIHKbeSeUs7OiIAcGo6fgVPrfXuyd/vTfLhJFd3+pwAAEyhtI6NO0lywQuSxafluM/iKSVZsLyjowEAp6ajgaeUsqiUsuTBj5M8N8lNnTwnAACP0MD85Oo3JRe8MCkPu12rNZCsvsAVPADQ5zp9i9ZpST5cSnnwXH9da/1Eh88JAMAjVUqy4UkTt3Pd8pFk9PDEK9TXXpZc/KO9ng4AmEZHA0+t9fYkl3fyHAAAzKDTLkvWXpIc2TdxZc/AvF5PBACcBK9JBwDgoUormb+s11MAAI9AN16TDgAAAEAHCTwAAAAADSfwAAAAADScwAMAAADQcAIPAAAAQMMJPAAAAAANJ/AAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAAAA0nMADAAAA0HACDwAAAEDDCTwAAAAADSfwAAAAADScwAMAAADQcAIPAAAAQMMJPAAAAAANJ/AAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAAAA0nMADAAAA0HACDwAAAEDDCTwAAAAADSfwAAAAADScwAMAAADQcAIPAAAAQMMJPAAAAAANJ/AAAAAANNxArwcAAJirxsYOZueuz2R05IGsXPnkLFx4bq9HAgAaSuABAOiBvXu/nq/f8O8zPj6cWoeTJGtW/3Ae97jfTymlx9MBAE3jFi0AgC4bHx/NN278Dxkb2//duJMkO3d9LF/+ynN7OBkA0FQCDwBAl+3de31GRvZNue/Qodtz443/scsTAQBNJ/AAAHRZrSNJRo+7f+euT+bmm3+pewMBAI0n8AAAdNmyZU+Yds09Oz6UTbf9ThemAQBmA4EHAKDL2u35WbniWdOuu/POP84D+27uwkQAQNMJPAAAPXD55X+SZHDadZs2/bfODwMANJ7AAwDQA61WK898xs1ptZaccN2hQ3d2aSIAoMkEHgCAHmm12nn6D1yXUuYfd83Spd/XxYkAgKYSeAAAeqjVaucZT/9G2u1lx+wrZV7OPdcr0wGA6Qk8AAA91moN5Ok/8LWsX//qtFoLk7SyZMnj84QnvD+LF53f6/EAgAYY6PUAAAAkpbRz0YVvzUUXvrXXowAADeQKHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpuoNcDAABAkwwPD2fv3r1pt9tZvnx5Wi0/MwWg9wQeAAA4Sdu2bcuWLVsesu2ss87K+vXrezQRAEzw4wYAADgJ+/bty9atW4/ZvmXLltx44409mAgAvkfgAQCAk7Bjx47UWqfcd/Dgwdx0001dnggAvkfgAQCAkzA+Pn7C/fv37883vvGNLk0DAA8l8AAAwElYtWrVtGsOHTqUzZs3d2EaAHgoD1kGAKAxHth1KF/7xzuy7Tu7s2jZUL7vmnNy7uNXd+XcK1euzODgYEZGRk64bseOHVm4cGFOO+20rswFAIkreAAA6GNjY+OptabWmpu/uC3v/bUv59tf3p599x3OPbc/kE++65v5xme3TH+gGVBKyRVXXHFSazdv3jxtCAKAmeQKHgAA+s7ff/mu/OUtO3J4ZCyP3zGas3eMZnzk2GfgjI3UfPUjt+fSp52RgcF2x+dqt9t54hOfmOuuuy6jo6PHXVdKyZ49e7JmzZqOzwQAiSt4AADoM2/+0m35mf335ctnDeT6x8zL+564KB+9fH6mfn9VMl7Hs/feQ12br5SSjRs3ZmDg+D8rLaV0bR4ASAQeAAD6yJ2HjuQvDj6QkYGS2ipJq2RkoOSms+dl26qpr9AZG65ZsGSoy5MmGzduzNKlS6fcV2vNihUrujwRAHOZwAMAQN/4zH0PZKpLdUbayS1nHD/iLFgy2MGpju+SSy7JGWec8d0rdkopKaXkvPPOO+EVPgAw03zXAQCgb8xvtab8CWRrPBkcO95NWkkdrynt3twWtWHDhqxZsya7d+9Oq9XKypUrMzTU/SuKAJjbBB4AAPrGNWuW5VcHjg01rZpcdtfwlF9z+mOWptXu7YXpCxYsyIIFC3o6AwBzW8e/E5ZSriml3FJK2VRK+ZVOnw8AgOZaOTiQnxzZn4HRmqGR8QyN1AyM1lxz/YGs3P/Qt2iVkgzOb+fpr7yoR9MCQP/o6BU8pZR2kj9K8pwkW5N8rZTy0Vrrtzp5XgAAmukLW7+QTV//UH5+y4ty27qhjLeS87aPZOHw927PKq1k4dKhXPSkdXnc08/MouXzejgxAPSHTt+idXWSTbXW25OklPKBJC9OIvAAAHCM3732d3NoQcng2Ggu3XLs/rXnLsnTX35h1p499durAGCu6nTgWZ/k6G/NW5M8scPnBACgoe7ad1dGlo5k94J7svLgGXnwkcs1NUPzB/Li//PKDM33GMmm2vcvW7Pv81uTdivLnn9OFj5+zXffQAbAqen0M3im+q/1Q15/UEp5Qynl2lLKtTt37uzwOAAA9LN1i9YlJfnopX+Yb6/9SkZaRzJWRrNt9bfzil+/WtxpsK1v/WL2fmxzxveNZHzPkex+/y255/eu7/VYALNGpwPP1iRnHfX5mUnuPnpBrfWdtdaNtdaNa9as6fA4AAD0s5+58mcyvz0/wwOH84Xz/iZ/9sRfzl899dfyuFetyJKV3lLVVFt/44vJ4fFjto/dezAHv7WrBxMBzD6dDjxfS3J+KeXcUspQkpcn+WiHzwkAQENdc+41ecuT3pK1C9ampGTFvBX5ue/7ubziolf0ejQepW1v+0py5Ni486C9H7+je8MAzGIdvca11jpaSnlTkk8maSd5d6315k6eEwCAZnvReS/Ki857UUbGRjLQGvCMlgbb/Q+bUveNnHhRrSfeD8BJ6fhNzLXWjyf5eKfPAwDA7DLYHuz1CJyCA1+7Jwe+uH3adUuff07nhwGYAzylDgAAmFF7P31n9n12ivfcP0xZNpRFl3kOJ8BM6PQzeAAAgDlkbP9w9n1uSzJ+4luvyrKhrP/VJ3ZpKoDZzxU8AADAjBm+44GUdit1dOy4a9pr52fdL1zVxakAZj+BBwAAmDGthSf+X4wlLzw3y55yZpemAZg7BB4AAGDGDJ2zLK357YwdedgVPO2S1a+7NPMfu6I3gwHMcp7BAwAAzJjSKln9U49Le+X8lKFWyrx2ylArK15yvrgD0EGu4AEAAGbU4JqFOf2XNmbk7gMZPzyaobOWpDXU7vVYALOawAMAAMy4UkqG1i/u9RgAc4ZbtAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAaKhDe/dk/307U8fHez0KANBjXpMOANAwB/fszk3/+OGMHD6UJCntdh771Gdm7WMv7PFkAECvuIIHAKBBxoaH8/W/++vvxp0kqWNjufXzn87+XTt7OBkA0EsCDwBAQwwfOZSvvPddx92/+av/0sVpAIB+4hYtAIAGOHJgf679wF+ccM2hB/Z0aRoAoN+4ggcAoAGu++D7pl2zaMWqLkwCAPQjV/AAAPSxWmu+/ZlPpI6OTrv23Cc+tQsTAQD9yBU8AAB9bOemW7J7yx3Trjv7qidn4YqVnR8IAOhLAg8AQB/b/q0bU8fHT7hm3WVX5Mwn0IM3AAAdTUlEQVTHX9mliQCAfiTwAAD0sdGRkRPuX3vhJXnME5/SpWkAgH4l8AAA9LHV55yX0mpPuW/9lVfl/Kc+s8sTAQD9yEOWAQD62PrHXZldmzdl+OCBjI+OJikprZILnvHcrD73vF6P1x3j48mX/0fyjQ8krYHk8pcnG1+fDM7v9WQA0DcEHgCAPjYwb16u+NEfz7233pK9d2/JvMVLc/rFl2bB0uW9Hq079m5N/udTksN7vrft3m8lN38oef0nk+Nc3QQAc43AAwDQ59oDg1l38WVZd/FlvR6lu47sS/7HVcnIwYduHx9J7rkp+c4nkote0JvZAKDPeAYPAAD9Z9uNydvPOTbuPGj0UHL757s6EgD0M4EHAID+ctOHknc9Lamjx19TSrJkXfdmAoA+J/AAANA/7rk5+eDrp19XJh+2DAAkEXgAAOgXO7+TvOsZSeo0C0vy43+VLHUFDwA8yEOWAQDoD5/7r8nY8PTrfvKzyVnf1/l5AKBBXMEDAEB/2PLVk1u3YHFn54A5rA4PZ8d//+3cctXV+bdLL8udr35NDt/ynV6PBZwEgQcAgP6w7Mzp15R2smxD52eBOaiOjGTzS1+W+9/97ozv25eMjeXg176WO1/5yozcfXevxwOmIfAAANAffuCXkvbQ8feXVvLUX0gG53dvJpgjxsfGcttLfixHbrklqQ99Dtb48HDu/8v39mgy4GQJPAAA9Ifzn5O84HeSgYVJylE7SrJobfL8/5486829mg5mrdEHHsgtG6/KyK23Tr1gZCSHb76pu0MBj5iHLAMA0D++7zXJ5a9MHtiWDC1KFq5KSpn+64BHZeT++7PpyU858aJWK/MuvqQ7AwGPmsADAEB/aQ8kK87u9RQw640PD2fTU5467boyMJCVr3lNFyYCToVbtAAAAOaY8eHh3PaCFxzzvJ1jlJKz/vRdGTpzfXcGAx41V/AAAADMMTve9raMbtk67bqz3//XWXjFFV2YCDhVruABAACYQ8aHh7P37z8y7bozfu93xR1oEIEHAABgDhk/cGDaW7OWvuQlWfb853dpImAmCDwAAABzSHv58rSXLTvu/hWvf13W/79v6+JEwEwQeAAAAOaQUkpOe8tbUubPf+iOdjtnvefPc/ov/3JvBgNOiYcsAwAAzDFLn/ucDKx8V3a9408yfNddWXD55Vn9xjdm3mPO7fVowKMk8AAAAMxBCzduzIY/3djrMYAZ4hYtAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4QQeAAAAgIYTeAAAAAAaTuABAAAAaDiBBwAAAKDhBB4AAACAhhN4AAAAABpO4AEAAABoOIEHAAAAoOEEHgAAAICGE3gAAAAAGk7gAQAAAGg4gQcAAACg4ToWeEopby2lbCul3DD564c6dS4AAACAuWygw8f/vVrrb3f4HAAAAABzmlu0AAAAABqu04HnTaWUG0sp7y6lrJhqQSnlDaWUa0sp1+7cubPD4wAAAADMPqXW+ui/uJRPJzl9il1vTvKVJLuS1CT/T5J1tdbXn+h4GzdurNdee+2jngcAAABgNimlXFdr3TjdulN6Bk+t9dknOcy7knzsVM4FAAAAwNQ6+RatdUd9+qNJburUuQAAAADmsk6+Reu3SilXZOIWrTuS/HQHzwUAwKSx8Zr9h0Yzb7CV+UPtXo8DAHRBxwJPrfXVnTo2AABTu+2eg7n5rv0pScZrzdplQ9l4/rIMtr08FQBmM9/pAQBmibvvP5yb7tyXsfGa0fGa8Zrs2DOca2/d2+vRAIAO6+QtWgAAdMl37j6Qm+/af8z2monIc2RkPPMG/WwPAGYr3+UBABpu667D+fbWY+POg2qSHbsPd28gAKDrXMEDADx6DzyQ3LYp2bkzGRlJVixPLrokWbky2bcv2bMnWb48WbKk15POards25+x8ROvue72fTltxXxX8QDALCXwAACPzr99K/nC5x+6beuW5KabkhUrkz27k1onfq1Zk7zwxcngYG9mneUOj0xTdyZdd9uePPmilR2eBgDoBT/CAQAeuYMHj407D6o1uf++ZHx84uNk4gqfv/nAxDZm3KolQye1bufekQ5PAgD0isADADxyX/3qI/+aA/uTu+6a+VnIOWvnn9S6Ujo8CADQMwIPAPDIjI4mm77z6L72rjtndhaSJDdN8fasqaxfdXIhCABoHoEHAHhkdu1MWo/ynxALFszsLOTA4bEcODI27bqBdnL5OUu7MBEA0AsesgwAPDJD8x7911540czNQZKk1prp7rxaPL+dZz1uZdpt92gBwGwl8AAAj8yKFROvPd+9+5F93Yazk6WuIJlpi+a3MzTQyqHhYx9gvXzRQC7dsDhrlg6leAAPAMxqbtECAB6ZUpLnvyBZtiwZGEha7YntZ5yRnLVh6if5DgwkP/D07s45R5RSctX5yzLQKmlN/tW3WyXLFg7kaZeszNpl88QdAJgDXMEDADxyS5YkP/6KidefHz6UrD0tmT8/GRlJ/vdnkzs2f+8V6aevS37w2cmiRb2deRZbtWQoz71yde7aeSgHj4xl9dKhrFs5Ly1hBwDmDIEHAHh0SknWrn3otsHB5LnPS44cmQg/i5ck7XZv5ptj5g22cv4ZIhoAzFUCDwAw8+bNm/gFAEBXeAYPAAAAQMMJPAAAAAANJ/AAAAAANJxn8ADfNTq6L3v3fitjY/uzaNG5WbjwXK/WBQAAaACBB0iSHDhwe7Zv/2iS8STJAw98I+32opx99uvTag32djgAAABOyC1aQGodzT33fCwPxp0HjY0dyO23/1HGxo70ZjAAAABOisAD5PDh7al17Dh7x7N58x9ldPRQV2cCAADg5Ak8QJJ2knrCFXfc8Y6Mj490ZxwAAAAeEYEHyPz5p2ci8pxIzebNIg8AAEA/EniAlNLKaac9b9p1tY5k27b/1YWJAAAAeCQEHiBJsmTJRVm16pnTrjty5J6MjOzpwkQAAACcLIEH+K4VK67M4sWXTbOqleHh+7oyDwAAACdH4AEe4vTTn5vly590ghUlg4PLuzYPAAAA0xN4gGOsXv2krF79g1PsKVmw4IwMDa3q+kwAAAAc30CvBwD60/Lll2fevDXZseMTGR3dk6SVJUsuzpo10z+nBwAAgO4SeIDjWrDgjJxzzutT61iSklJc9AcAANCPBB5gWqW0ez0CAAAAJ+DH8QAAAAANJ/AAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAAAA0nMADAAAA0HACDwAAAEDDCTwAAAAADSfwAAAAADTcQK8HAAA6q9aaA3sPZ+TIaBYvX5DBeb79AwDMNv6FBwCz2PDhkXzrS3fl8MHhlFIyPl5zxnmrsuHiNSml9Ho8AABmiFu0AGAW+/a/bs3BfUcyPlYzNjqeOl5z9227ct/2fb0eDQCAGSTwAMAsdfjgcA7sPXzM9jqebLp+Ww7uO3YfAADNJPAAwCw1NjJ+3Nuwxsdqbvjs7dlx1+4uTwUAQCcIPAAwSy1YMi9lmu/0t319e/bvPtSdgQAA6BiBBwBmqVar5LzL1yXTPEv55i/dmTpeuzMUAAAdIfAAwCy2ev2yLF6+4IRrxkbHc9uN27s0EQAAnSDwAMAsN3xoZNo1O+/a04VJAADoFIEHAGa5oQWD066pNanVbVoAAE0l8ADALHf6uSumXdMaaB33jVsAAPQ/gQcAZrnV65dNu+bsS9Z2YRIAADpF4AGAWa7VKnnslWdM+Tat0krOufS0rDt3ZfcHAwBgxgz0egAAoPPWblieBYuHsm3TfRk+NJJlqxdlzdnLs2DRkFuzAABmAYEHAOaIJSsX5qKrF/Z6DAAAOsAtWgAAAAANJ/AAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAAAA0nMADAAAA0HACDwAAAEDDCTwAAAAADSfwAAAAADScwAMAAADQcAIPAAAAQMOdUuAppby0lHJzKWW8lLLxYft+tZSyqZRySynleac2JgAAAADHM3CKX39Tkpck+ZOjN5ZSLkny8iSXJjkjyadLKRfUWsdO8XwAAAAAPMwpXcFTa/23WustU+x6cZIP1FqP1Fo3J9mU5OpTORcAAAAAU+vUM3jWJ9ly1OdbJ7cBAAAAMMOmvUWrlPLpJKdPsevNtdaPHO/LpthWj3P8NyR5Q5Js2LBhunEAAAAAeJhpA0+t9dmP4rhbk5x11OdnJrn7OMd/Z5J3JsnGjRunjEAAAAAAHF+nbtH6aJKXl1LmlVLOTXJ+kn/t0LkAAAAA5rRTfU36j5ZStiZ5UpJ/LKV8MklqrTcn+dsk30ryiST/yRu0AAAAADrjlF6TXmv9cJIPH2ff25K87VSODwAAAMD0OnWLFgAAAABdIvAAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAAAA0nMADAAAA0HACDwAAAEDDCTwAAAAADSfwAAAAADScwAMAAADQcAIPAAAAQMMJPAAAAAANJ/AAAAAANJzAAwAAANBwAg8AAABAwwk8AAAAAA0n8AAAAAA0nMADAAAA0HACDwAAAEDDCTwAAAAADSfwAAAAADScwAMAAADQcAIPAAAAQMMJPAAAAAANJ/AAAAAANJzAAwAAANBwAg8AAPz/7d17jKVlfQfw729nLxKxsMjKnQJ2axSDFDbCJlVbJbgWw3qBBKspXhokoUmb+IfiJm164Y9qrI1SqBgvmFKVuCBYIMIaWzURKK1gsXhZNBaEArqCF+zuzszTP+asjuvZncXZOWefmc8nmex7nueZc35//HLOzve87/MCQOcEPAAAAACdE/AAAAAAdE7AAwAAANA5AQ8AAABA5wQ8AAAAAJ0T8AAAAAB0TsADAAAA0DkBDwAAAEDnBDwAAAAAnRPwAAAAAHROwAMAAADQOQEPAAAAQOcEPAAAAACdE/AAAAAAdE7AAwAAANA5AQ8AAABA5wQ8AAAAAJ0T8AAAAAB0TsADAAAA0DkBDwAAAEDnBDwAAAAAnRPwAAAAAHROwAMAAADQOQEPAAAAQOcEPAAAAACdE/AAAAAAdE7AAwAAANA5AQ8AAABA5wQ8AAAAAJ0T8AAAAAB0TsADAAAA0DkBDwDAiLXWsuNnT6ZNT4+7FABgkVg+7gIAAJaSL2/+eO64/tpM7dyZZcuX57RXnJsXv/5Nqapxl7bk/WjHZJ6cnMrqVSuyasL3oAD0RcADADAiX/r4x3LHp6/9+ePpycnc9ZnrMrVzZ176preOsbKlbfvUdG568LE8sWPql8YPXzmRM49YnTVPWzmmygBg3/lqAgBgBO770ud/KdyZ7e5bb0prbcQVscvNQ8KdJPn+jqnc8sD3s237zjFUBQBPjYAHAGCB3fmZzbn5/e/Z43ybns7UpBBhHH42OZnHh4Q7u0wl+fIjj4+uIAD4NQl4AAAW0D1bPpsv/tNH5ly3fIXLgMbhxzv3HO7s8uj2nXlihwAOgAObgAcAYIH88H8fzpYPvn/Odcc89+QRVMMwh6zcty0pb/vetgWuBADmR8ADALBAtnzw8jnXLFu2LOdt+psRVMMwqyYmsi/3L/vx5Nxn+gDAOAl4AAAWyCPfuX/ONW987weyfMWKEVTDMJPT07G9NQCLgYAHAGCBHHzo6r3O//HlH8rqI48aUTUMs6wqy/bhFJ7fWDGx8MUAwDwIeAAAFsj68/8wtWz4f7fe8r4P5pA1R4y4Ina3rConHXzQnJdpnX30M0dSDwD8uuYV8FTV+VX1taqarqp1s8ZPqKqfVdXdg59/nH+pAAB9ec76F2X9ea+bCXlqJkJ4xjPX5K1XfjSHHuHMnQPFmc86JEcetHLof4wPXr4sbzjpyDxjHzdjBoBxme8n1b1JXpPkA0Pm7m+tnTrP5wcA6Nr6174up5/zqmz73oN5+urVecZhh4+7JHazYtmybDj28DyxYzI/3LEz/zc5lZbKkQetzOpV9kcCoA/zCnhaa/clSdW+3HsAAGBpWvm0g3Lks9eOuwzmcMjK5ft823QAONAs5B48J1bVV6rq36rqRXtaVFUXVdVdVXXXY489toDlAAAAACxOc35FUVVbkhw5ZGpTa+2GPfzaw0mOb639oKpOT/Lpqjq5tfaj3Re21q5KclWSrFu3zl0qAQAAAJ6iOQOe1tpZT/VJW2vbk2wfHP9HVd2f5LeT3PWUKwQAAABgrxbkEq2qWlNVE4Pjk5KsTfLthXgtAAAAgKVuvrdJf3VVPZhkfZKbquqzg6kXJ/lqVd2T5FNJLm6tbZtfqQAAAAAMM9+7aF2f5Poh45uTbJ7PcwMAAACwbxbyLloAAAAAjICABwAAAKBzAh4AAACAzgl4AAAAADon4AEAAADonIAHAAAAoHMCHgAAAIDOCXgAAAAAOifgAQAAAOicgAcAAACgcwIeAAAAgM4JeAAAAAA6J+ABAAAA6JyABwAAAKBzAh4AAACAzgl4AAAAADon4AEAAADonIAHAAAAoHMCHgAAAIDOCXgAAAAAOifgAQAAAOicgAcAAACgcwIeAAAAgM4JeAAAAAA6J+ABAAAA6JyABwAAAKBzAh4AAACAzgl4AAAAADon4AEAAADonIAHAAAAoHMCHgAAAIDOCXgAAAAAOifgAQAAAOicgAcAAACgcwIeAAAAgM4JeAAAAAA6J+ABAAAA6JyABwAAAKBzAh4AAACAzgl4AAAAADon4AEAAADonIAHAAAAoHMCHgAAAIDOCXgAAAAAOifgAQAAAOicgAcAAACgcwIeAAAAgM4JeAAAAAA6J+ABAAAA6JyABwAAAKBzAh4AAACAzgl4AAAAADon4AEAAADonIAHAAAAoHMCHgAAAIDOCXgAAAAAOifgAQAAAOicgAcAAACgcwIeAAAAgM4JeAAAAAA6J+ABAAAA6JyABwAAAKBzAh4AAACAzgl4AAAAADon4AEAAADonIAHAAAAoHMCHgAAAIDOCXgAAAAAOifgAQAAAOjcvAKeqnp3VX29qr5aVddX1aGz5i6tqq1V9Y2qevn8SwUAAABgmPmewXNbkue31k5J8s0klyZJVT0vyQVJTk6yIckVVTUxz9cCAAAAYIh5BTyttVtba5ODh7cnOXZwvDHJJ1pr21tr30myNckL5/NaAAAAAAy3P/fgeXOSWwbHxyR5YNbcg4MxAAAAAPaz5XMtqKotSY4cMrWptXbDYM2mJJNJrtn1a0PWtz08/0VJLkqS448/fh9KBgAAAGC2OQOe1tpZe5uvqguTvDLJy1pru0KcB5McN2vZsUke2sPzX5XkqiRZt27d0BAIAAAAgD2b7120NiR5e5JzW2tPzpq6MckFVbWqqk5MsjbJnfN5LQAAAACGm/MMnjlcnmRVktuqKklub61d3Fr7WlVdm+S/M3Pp1iWttal5vhYAAAAAQ8wr4Gmt/dZe5i5Lctl8nh8AAACAue3Pu2gBAAAAMAYCHgAAAIDOzXcPHgDG5KGfPJQbtt6QR558JGefcHbWH7U+g/3QAACAJUbAA9Chm799cy794qWZznSSZPO3NuewVYdly3lbsmL5ijFXBwAAjJpLtAA689OdP807v/TOn4c7u2zbvi2nX3N6tk9uH1NlAADAuAh4ADpz+8O3Z6pNDZ1raTnjmjMyNT18HgAAWJwEPACdmaiJvc5PZSov+eRLMt2m97oOAABYPAQ8AJ0546gz5lzzxI4ncuU9V46gGgAA4EAg4AHozEHLD8olL7hkznXXfeu6EVQDAAAcCAQ8AB26+NSLc9qzTtvrGpdoAQDA0iHgAejU1a+4Omcff/Ye5zc+e+MIqwEAAMZJwAPQsff8/nvyxpPf+CvjRz/96Fx0ykWjLwgAABiL5eMuAID5edu6t+W1a1+bK+6+Itu2b8s5J56Tc046JysnVo67NAAAYEQEPACLwAmHnJB3veRd4y4DAAAYE5doAQAAAHROwAMAAADQOQEPAAAAQOcEPAAAAACdE/AAAAAAdE7AAwAAANA5AQ8AAABA5wQ8AAAAAJ0T8AAAAAB0TsADAAAA0DkBDwAAAEDnBDwAAAAAnRPwAAAAAHROwAMAAADQOQEPAAAAQOcEPAAAAACdE/AAAAAAdE7AAwAAANA5AQ8AAABA5wQ8AAAAAJ0T8AAAAAB0TsADAAAA0DkBDwAAAEDnBDwAAAAAnavW2rhr+LmqeizJd8ddB3t0eJLvj7sIlhQ9x6jpOUZNzzFqeo5R03OM2mLsud9sra2Za9EBFfBwYKuqu1pr68ZdB0uHnmPU9ByjpucYNT3HqOk5Rm0p95xLtAAAAAA6J+ABAAAA6JyAh6fiqnEXwJKj5xg1Pceo6TlGTc8xanqOUVuyPWcPHgAAAIDOOYMHAAAAoHMCHgAAAIDOCXjYq6r666r6alXdXVW3VtXRg/GqqvdV1dbB/GnjrpXFoareXVVfH/TV9VV16Ky5Swc9942qevk462TxqKrzq+prVTVdVet2m9NzLIiq2jDoq61V9Y5x18PiVFUfrqpHq+reWWOHVdVtVfWtwb+rx1kji0dVHVdVn6+q+wafq386GNdzLIiqelpV3VlV9wx67i8H4ydW1R2DnvtkVa0cd62jIuBhLu9urZ3SWjs1yb8k+fPB+CuSrB38XJTkyjHVx+JzW5Lnt9ZOSfLNJJcmSVU9L8kFSU5OsiHJFVU1MbYqWUzuTfKaJF+YPajnWCiDPvqHzHyWPi/J6wb9BvvbRzPz/jXbO5J8rrW2NsnnBo9hf5hM8rbW2nOTnJnkksF7m55joWxP8tLW2guSnJpkQ1WdmeRvk7x30HM/TPKWMdY4UgIe9qq19qNZD5+eZNeu3BuTfKzNuD3JoVV11MgLZNFprd3aWpscPLw9ybGD441JPtFa295a+06SrUleOI4aWVxaa/e11r4xZErPsVBemGRra+3brbUdST6RmX6D/aq19oUk23Yb3pjk6sHx1UleNdKiWLRaaw+31v5zcPzjJPclOSZ6jgUy+Fv0J4OHKwY/LclLk3xqML6kek7Aw5yq6rKqeiDJ6/OLM3iOSfLArGUPDsZgf3pzklsGx3qOUdNzLBS9xTgd0Vp7OJn5gzzJs8ZcD4tQVZ2Q5HeS3BE9xwKqqomqujvJo5m5EuD+JI/P+sJ4SX3GCnhIVW2pqnuH/GxMktbaptbacUmuSfInu35tyFO1IWPwK+bqucGaTZk51feaXUNDnkrPsU/2peeG/dqQMT3H/qC3gEWrqg5OsjnJn+12NQDsd621qcF2Isdm5gzZ5w5bNtqqxmf5uAtg/FprZ+3j0n9OclOSv8hMEnrcrLljkzy0n0tjkZqr56rqwiSvTPKy1tquN2Q9x6/tKbzPzabnWCh6i3F6pKqOaq09PLi8/tFxF8TiUVUrMhPuXNNau24wrOdYcK21x6vqXzOz/9OhVbV8cBbPkvqMdQYPe1VVa2c9PDfJ1wfHNyb5o8HdtM5M8sSuUy9hPqpqQ5K3Jzm3tfbkrKkbk1xQVauq6sTMbPB95zhqZMnQcyyUf0+ydnCXj5WZ2cz7xjHXxNJxY5ILB8cXJrlhjLWwiFRVJflQkvtaa383a0rPsSCqas2uO+5W1UFJzsrM3k+fT3LeYNmS6rn6xZfj8KuqanOS5ySZTvLdJBe31r43eAO/PDN3ZngyyZtaa3eNr1IWi6rammRVkh8Mhm5vrV08mNuUmX15JjNz2u8tw58F9l1VvTrJ+5OsSfJ4krtbay8fzOk5FkRV/UGSv08ykeTDrbXLxlwSi1BVfTzJ7yU5PMkjmTkL+9NJrk1yfJL/SXJ+a233jZjhKauq303yxST/lZm/HZLknZnZh0fPsd9V1SmZ2UR5IjMnr1zbWvurqjopMzcwOCzJV5K8obW2fXyVjo6ABwAAAKBzLtECAAAA6JyABwAAAKBzAh4AAACAzgl4AAAAADon4AEAAADonIAHAAAAoHMCHgAAAIDO/T+x6oRZgU4S3AAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "from matplotlib import pylab as plt\n", + "%matplotlib inline\n", + "\n", + "model = umap.UMAP()\n", + "projection = model.fit_transform(embeds)\n", + "colors = [viridis(i) for i in labels]\n", + "\n", + "fig, ax = plt.subplots(figsize=(16, 10))\n", + "im = ax.scatter(projection[:, 0], projection[:, 1], c=colors)\n", + "plt.gca().set_aspect(\"equal\", \"datalim\")\n", + "plt.title(\"UMAP projection\")\n", + "plt.tight_layout()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/speaker_encoder/tests.py b/speaker_encoder/tests.py new file mode 100644 index 0000000..bb5ac28 --- /dev/null +++ b/speaker_encoder/tests.py @@ -0,0 +1,80 @@ +import os +import unittest +import torch as T + +from TTS.speaker_encoder.model import SpeakerEncoder +from TTS.speaker_encoder.loss import GE2ELoss +from TTS.speaker_encoder.dataset import MyDataset +from TTS.utils.audio import AudioProcessor +from torch.utils.data import DataLoader +from TTS.datasets.preprocess import libri_tts +from TTS.utils.generic_utils import load_config + + +file_path = os.path.dirname(os.path.realpath(__file__)) + "/../tests/" +c = load_config(os.path.join(file_path, 'test_config.json')) + + +class SpeakerEncoderTests(unittest.TestCase): + def test_in_out(self): + dummy_input = T.rand(4, 20, 80) # B x T x D + dummy_hidden = [T.rand(2, 4, 128), T.rand(2, 4, 128)] + model = SpeakerEncoder(input_dim=80, proj_dim=256, lstm_dim=768, num_lstm_layers=3) + # computing d vectors + output = model.forward(dummy_input) + assert output.shape[0] == 4 + assert output.shape[1] == 256 + output = model.inference(dummy_input) + assert output.shape[0] == 4 + assert output.shape[1] == 256 + # compute d vectors by passing LSTM hidden + # output = model.forward(dummy_input, dummy_hidden) + # assert output.shape[0] == 4 + # assert output.shape[1] == 20 + # assert output.shape[2] == 256 + # check normalization + output_norm = T.nn.functional.normalize(output, dim=1, p=2) + assert_diff = (output_norm - output).sum().item() + assert output.type() == 'torch.FloatTensor' + assert abs(assert_diff) < 1e-4, f" [!] output_norm has wrong values - {assert_diff}" + # compute d for a given batch + dummy_input = T.rand(1, 240, 80) # B x T x D + output = model.compute_embedding(dummy_input, num_frames=160, overlap=0.5) + assert output.shape[0] == 1 + assert output.shape[1] == 256 + assert len(output.shape) == 2 + + + +class GE2ELossTests(unittest.TestCase): + def test_in_out(self): + # check random input + dummy_input = T.rand(4, 5, 64) # num_speaker x num_utterance x dim + loss = GE2ELoss(loss_method='softmax') + output = loss.forward(dummy_input) + assert output.item() >= 0. + # check all zeros + dummy_input = T.ones(4, 5, 64) # num_speaker x num_utterance x dim + loss = GE2ELoss(loss_method='softmax') + output = loss.forward(dummy_input) + # check speaker loss with orthogonal d-vectors + dummy_input = T.empty(3, 64) + dummy_input = T.nn.init.orthogonal(dummy_input) + dummy_input = T.cat([dummy_input[0].repeat(5, 1, 1).transpose(0, 1), dummy_input[1].repeat(5, 1, 1).transpose(0, 1), dummy_input[2].repeat(5, 1, 1).transpose(0, 1)]) # num_speaker x num_utterance x dim + loss = GE2ELoss(loss_method='softmax') + output = loss.forward(dummy_input) + assert output.item() < 0.005 + + +# class LoaderTest(unittest.TestCase): +# def test_output(self): +# items = libri_tts("/home/erogol/Data/Libri-TTS/train-clean-360/") +# ap = AudioProcessor(**c['audio']) +# dataset = MyDataset(ap, items, 1.6, 64, 10) +# loader = DataLoader(dataset, batch_size=32, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn) +# count = 0 +# for mel, spk in loader: +# print(mel.shape) +# if count == 4: +# break +# count += 1 \ No newline at end of file diff --git a/speaker_encoder/train.py b/speaker_encoder/train.py new file mode 100644 index 0000000..e154f61 --- /dev/null +++ b/speaker_encoder/train.py @@ -0,0 +1,315 @@ +import argparse +import os +import sys +import time +import traceback + +import torch +from torch import optim +from torch.utils.data import DataLoader +from TTS.datasets.preprocess import load_meta_data +from TTS.speaker_encoder.dataset import MyDataset +from TTS.speaker_encoder.generic_utils import save_best_model, save_checkpoint +from TTS.speaker_encoder.loss import GE2ELoss +from TTS.speaker_encoder.model import SpeakerEncoder +from TTS.speaker_encoder.visual import plot_embeddings +from TTS.utils.audio import AudioProcessor +from TTS.utils.generic_utils import (NoamLR, check_update, copy_config_file, + count_parameters, + create_experiment_folder, get_git_branch, + gradual_training_scheduler, load_config, + remove_experiment_folder, set_init_dict, + setup_model, split_dataset) +from TTS.utils.logger import Logger +from TTS.utils.radam import RAdam +from TTS.utils.visual import plot_alignment, plot_spectrogram + +torch.backends.cudnn.enabled = True +torch.backends.cudnn.benchmark = True +torch.manual_seed(54321) +use_cuda = torch.cuda.is_available() +num_gpus = torch.cuda.device_count() +print(" > Using CUDA: ", use_cuda) +print(" > Number of GPUs: ", num_gpus) + + +def setup_loader(ap, is_val=False, verbose=False): + global meta_data_train + global meta_data_eval + if "meta_data_train" not in globals(): + meta_data_train, meta_data_eval = load_meta_data(c.datasets) + if is_val: + loader = None + else: + dataset = MyDataset(ap, + meta_data_eval if is_val else meta_data_train, + voice_len=1.6, + num_utter_per_speaker=10, + skip_speakers=False, + verbose=verbose) + # sampler = DistributedSampler(dataset) if num_gpus > 1 else None + loader = DataLoader(dataset, + batch_size=c.num_speakers_in_batch, + shuffle=False, + num_workers=0, + collate_fn=dataset.collate_fn) + return loader + + +def train(model, criterion, optimizer, scheduler, ap, global_step): + data_loader = setup_loader(ap, is_val=False, verbose=True) + model.train() + epoch_time = 0 + best_loss = float('inf') + avg_loss = 0 + end_time = time.time() + for num_iter, data in enumerate(data_loader): + start_time = time.time() + + # setup input data + inputs = data[0] + labels = data[1] + loader_time = time.time() - end_time + global_step += 1 + + # setup lr + if c.lr_decay: + scheduler.step() + optimizer.zero_grad() + + # dispatch data to GPU + if use_cuda: + inputs = inputs.cuda(non_blocking=True) + # labels = labels.cuda(non_blocking=True) + + # forward pass model + outputs = model(inputs) + + # loss computation + loss = criterion( + outputs.view(c.num_speakers_in_batch, + outputs.shape[0] // c.num_speakers_in_batch, -1)) + loss.backward() + grad_norm, _ = check_update(model, c.grad_clip) + optimizer.step() + + step_time = time.time() - start_time + epoch_time += step_time + + avg_loss = 0.01 * loss.item( + ) + 0.99 * avg_loss if avg_loss != 0 else loss.item() + current_lr = optimizer.param_groups[0]['lr'] + + if global_step % c.steps_plot_stats == 0: + # Plot Training Epoch Stats + train_stats = { + "GE2Eloss": avg_loss, + "lr": current_lr, + "grad_norm": grad_norm, + "step_time": step_time + } + tb_logger.tb_train_epoch_stats(global_step, train_stats) + figures = { + # FIXME: not constant + "UMAP Plot": plot_embeddings(outputs.detach().cpu().numpy(), + 10), + } + tb_logger.tb_train_figures(global_step, figures) + + if global_step % c.print_step == 0: + print( + " | > Step:{} Loss:{:.5f} AvgLoss:{:.5f} GradNorm:{:.5f} " + "StepTime:{:.2f} LoaderTime:{:.2f} LR:{:.6f}".format( + global_step, loss.item(), avg_loss, grad_norm, step_time, + loader_time, current_lr), + flush=True) + + # save best model + best_loss = save_best_model(model, optimizer, avg_loss, best_loss, + OUT_PATH, global_step) + + end_time = time.time() + return avg_loss, global_step + + +# def evaluate(model, criterion, ap, global_step, epoch): +# data_loader = setup_loader(ap, is_val=True) +# model.eval() +# epoch_time = 0 +# avg_loss = 0 +# print("\n > Validation") +# with torch.no_grad(): +# if data_loader is not None: +# for num_iter, data in enumerate(data_loader): +# start_time = time.time() + +# # setup input data +# inputs = data[0] +# labels = data[1] + +# # dispatch data to GPU +# if use_cuda: +# inputs = inputs.cuda() +# # labels = labels.cuda() + +# # forward pass +# outputs = model.forward(inputs) + +# # loss computation +# loss = criterion(outputs.reshape( +# c.num_speakers_in_batch, outputs.shape[0] // c.num_speakers_in_batch, -1)) +# step_time = time.time() - start_time +# epoch_time += step_time + +# if num_iter % c.print_step == 0: +# print( +# " | > Loss: {:.5f} ".format(loss.item()), +# flush=True) + +# avg_loss += float(loss.item()) + +# eval_figures = { +# "prediction": plot_spectrogram(const_spec, ap), +# "ground_truth": plot_spectrogram(gt_spec, ap), +# "alignment": plot_alignment(align_img) +# } +# tb_logger.tb_eval_figures(global_step, eval_figures) + +# # Sample audio +# if c.model in ["Tacotron", "TacotronGST"]: +# eval_audio = ap.inv_spectrogram(const_spec.T) +# else: +# eval_audio = ap.inv_mel_spectrogram(const_spec.T) +# tb_logger.tb_eval_audios( +# global_step, {"ValAudio": eval_audio}, c.audio["sample_rate"]) + +# # compute average losses +# avg_loss /= (num_iter + 1) + +# # Plot Validation Stats +# epoch_stats = {"GE2Eloss": avg_loss} +# tb_logger.tb_eval_stats(global_step, epoch_stats) +# return avg_loss + + +# FIXME: move args definition/parsing inside of main? +def main(args): # pylint: disable=redefined-outer-name + ap = AudioProcessor(**c.audio) + model = SpeakerEncoder(input_dim=40, + proj_dim=128, + lstm_dim=384, + num_lstm_layers=3) + optimizer = RAdam(model.parameters(), lr=c.lr) + criterion = GE2ELoss(loss_method='softmax') + + if args.restore_path: + checkpoint = torch.load(args.restore_path) + try: + # TODO: fix optimizer init, model.cuda() needs to be called before + # optimizer restore + # optimizer.load_state_dict(checkpoint['optimizer']) + if c.reinit_layers: + raise RuntimeError + model.load_state_dict(checkpoint['model']) + except: + print(" > Partial model initialization.") + model_dict = model.state_dict() + model_dict = set_init_dict(model_dict, checkpoint, c) + model.load_state_dict(model_dict) + del model_dict + for group in optimizer.param_groups: + group['lr'] = c.lr + print(" > Model restored from step %d" % checkpoint['step'], + flush=True) + args.restore_step = checkpoint['step'] + else: + args.restore_step = 0 + + if use_cuda: + model = model.cuda() + criterion.cuda() + + if c.lr_decay: + scheduler = NoamLR(optimizer, + warmup_steps=c.warmup_steps, + last_epoch=args.restore_step - 1) + else: + scheduler = None + + num_params = count_parameters(model) + print("\n > Model has {} parameters".format(num_params), flush=True) + + global_step = args.restore_step + train_loss, global_step = train(model, criterion, optimizer, scheduler, ap, + global_step) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--restore_path', + type=str, + help='Path to model outputs (checkpoint, tensorboard etc.).', + default=0) + parser.add_argument( + '--config_path', + type=str, + help='Path to config file for training.', + ) + parser.add_argument('--debug', + type=bool, + default=True, + help='Do not verify commit integrity to run training.') + parser.add_argument( + '--data_path', + type=str, + default='', + help='Defines the data path. It overwrites config.json.') + parser.add_argument('--output_path', + type=str, + help='path for training outputs.', + default='') + parser.add_argument('--output_folder', + type=str, + default='', + help='folder name for training outputs.') + args = parser.parse_args() + + # setup output paths and read configs + c = load_config(args.config_path) + _ = os.path.dirname(os.path.realpath(__file__)) + if args.data_path != '': + c.data_path = args.data_path + + if args.output_path == '': + OUT_PATH = os.path.join(_, c.output_path) + else: + OUT_PATH = args.output_path + + if args.output_folder == '': + OUT_PATH = create_experiment_folder(OUT_PATH, c.run_name, args.debug) + else: + OUT_PATH = os.path.join(OUT_PATH, args.output_folder) + + new_fields = {} + if args.restore_path: + new_fields["restore_path"] = args.restore_path + new_fields["github_branch"] = get_git_branch() + copy_config_file(args.config_path, os.path.join(OUT_PATH, 'config.json'), + new_fields) + + LOG_DIR = OUT_PATH + tb_logger = Logger(LOG_DIR) + + try: + main(args) + except KeyboardInterrupt: + remove_experiment_folder(OUT_PATH) + try: + sys.exit(0) + except SystemExit: + os._exit(0) # pylint: disable=protected-access + except Exception: # pylint: disable=broad-except + remove_experiment_folder(OUT_PATH) + traceback.print_exc() + sys.exit(1) diff --git a/speaker_encoder/umap.png b/speaker_encoder/umap.png new file mode 100644 index 0000000000000000000000000000000000000000..94cd32541b9937940ccdb1be35625e15b089ccf0 GIT binary patch literal 23864 zcmeIa2~?Bkx;7kY3$+fkwN?=YtqfKL6l5NPwTck|6@(%~R3@2~(J+OU7U~2NL}bP? ziO4J2KV!ElbZ=sgU$~wE+&qBm=#LH-Bez9Hw;rNt|= zR#~RmT`&x|zDv{h7hV6DpT2Rg;FIo+&%YLYNKyOjOTmY8yM;a%d^iw}`AqQPJ3Wk$ z;KS|x!Ggb-H{a+AJ_LXKa}0}Xj3pCw zqR&Xf|BN%vIxaryFv3<5T0%FAs~m>!%&ZA^*|SE;I#nE{gVAToy%`UWhX{{6?=Wmi z=C6(!@Pb@F44Nksd7K<3$4Gn8B;#ys`{Y=%-(*&=U5r&pBS8og+>OS3F2Ap@?`*P; zwlg)?wSdTBj+8BSTNs;}mA7tcyx{7aF#fj|4Uu{hu^iTzLrt)d#Rv<%&C;jWE=Tx2aXF0Gu*Y|x;9~10 z4Cc8ZOknVDiO@XZ!yh|5_-pLa9%7SRsclP~viiz+s_M ziw$xwbHPN0Bv@+2f$R1Skpf?P7G|8D)2XJH(S-lR)28AKJdrh!|I3Q=gHWD^JA}1A~@|lGA->Y3ft?wU>Ur|HY>7 zZOLm(O(#UPAGimx;>*}He|sxuq*dvZo%WO2OdA)Xv-tgQ{E-jg1w zSK>!=Dvq;>&{jc5$xw$wAAmg(t3~k0CG%Ex2h6>6gn2IrdimQIW$%C6#4OGWl=Gz# z9CTQ*ZS=cJtqmJQwdSg^cnoIw$z(Te1DtvbYE1P)dIH=Pd(`HXj6vK3I5l>#8t6c~ zRZN$dM#QSPUh-QQkG!=_R>pkCc|*0Bj1>bU7Hy2pRJl`w;LqSYkd53e<7TM(T{f&TG2F_wsGOGj8w8(o0jK?Ewqw{ z|J)zmkbkAtVg3rcPJD z7tNuo%ouXBLeTsj%NuXDmyQ2*ha`pe_xqP2o*!??j)sc6HN-k3^;YtVsn$*C%~SAB z90?}x2z!V~v~PYC37=t|Kf?q^95f5YV01r}^t)A{4PLg?=Un8sI5}{=c8mJ-^NtHy zCYP-EuuOKX3F@&&uKc94R1hJJGgSNVduMmI+gyo0I;JJP_VMsf`_<+7i6Js^rix8l zU{GfR$Kv4$&(+i2Z+=e=*Ek5Pr`!F^yyNQhjru**kBhJV*i>}w-Cqd-OiIiA@ZfB~ zz>WHm$6*qE?~8sKZHbRjrQO`0nc#88G|zglGRLYyB}_8trbN*4hiAVO{NP@-Xo^-$ zA#g%J-s@yj#v^TiACh-zonPKIL$!S{o@u|*_baf{{kyTGG`$FkUbOT4?i@wi$MXwN zG+v(#mgjK0))Yc&D&QT7t}u>}`s5pRVq)qZw$+O{?jv1hc&k~_*;#MU6d9LtMwf>p z+~>7!r$*>#K<{jl;A94T+?IUpM)MZcf%`^5%qX;eD~pU6p$)<=OkV~}M>f|q=Th`Y zQ%vbiLkd1;nC<~EkkX=7MjJfCA)baVdWW=WlZkot(7M}P32DRZdiw79DbIb|s$B@}d>4Z`@6 zl_!jI^!B0LYX!4W$*c5JZRDf+4-Y$fUSTSBybldqNF%HHktg=!gP^ARdDWphNI?MvD)<&s`xRzKSO6vpouoa2Ns|F z9E<|OP2GIN2}to=`aQ}{`pp&<@Y8Bqmig%RQV1CveOF+j)qD2ji15}&+>V* zo}K6QMt<8Q4~hZ5`f&#V&X1yFds=g>l0%hiUe!_~T8pl1GvfH{LN2tf4eG@usLG|c zZ~&wz=hcqt`?*#XZVNry>;WYZ2gc|~I2HF*65rh3UN&9Q&*ZcbtGlwXB=7U%Hj><5 zl0d1#>lo;!B5=U9|l`c0(6hI#Kdo9x|fuFf9t1j)&!Ke7quoEw_3va0ZDd34l9 zk-(%sgLA@PZR01@)zy73syTFD-m07qE81XI&5XV)iPdpP^fm>R7DqH3;w_be*7xam zjhQ9#x#}YijG3_S=&;F+hN=Tuli7LpAd3m0UOZcrsZzLV?%=FFy&;k_Fs2hjUh5~f zz$tqegAY>o9rQK}TCSDM0A+h3Y^woj6(psele2S2s$RrqF~zj5ezSXGN-|-4e14Iy zUjBGYMaBO0pWhIe6j|0i_kjJ}sUjn!5JZy>FJqrUk%mg&|3vW+&f(bHGP%q(pV|yLVe>QPwZN|43HYzd&=&+b=Z+74l zV2VeRx%c;-@aVc|W(V_#R1(_kXcOkNT4tFbwSP8AOC6bb;c6fMv?Po~t8?g4O8!ez zquDqNCRjB0Lg1|TM^JRGbk10E8??quoDGoOVSV$1DRzhr6O;h6i; z_x@!Iag2WiZkeSmJBZ zw2Q$$vj=e{*PTchjWw^ld4si7E6K~x(fb1R4x^Ff<{J14EYG|*46s7v1G!5rpta&a z^XC(YdDd0!WFo&rV%-%xpj(06koNfTK3sc+@9g(n_saRYg^3PX|LI~g`O3Lhhb>B6 zLXzKgc0NEtMI3_}F|Sf73%tU_21OX$p{tC5)OI%2mM3xz{5O{N8SUO2xo`U2D=(yq)b?~?-$V;N}LPJA4ySiM9IYO9g znwzX~TdRH~Oiz2~9l|}uO*)e_Wu#KEw4@H0o_uN8EE(0+L3dUI$yOU0(r{58NG!e- zk!(&m9-{JM=AE8o%iM=r)_7`!eNX-sM%54uA|oSXlzQ*pJu$7D`Cx;e`?DTz7;0qH zUBZlamc|*B+P<8iNK{dYAiINGZS;lj`&4|bAb@%T&QpNvkGQv@p9y|imzrKw&qMcW3h1gjp)J33p*G8Ept^R4i<&oFP#KeLXv)KDeZjMPzc3_KVeu8QBn z>?ZzPTB7p-hN%b^-vD7OF)Jshg(R&{l3H9`M2ZQFf9wdz@aAvyc1P+2uF$hGlN!I+ zVR=7rsTT>oYm6H(tFlH_4|gwj&==XaBxn5?i8-2RA0+C>1;aHOYC|HCoJq0!_;U?6)$T94D5OJiW+6(*1tNuG!d`m<{GjZpDiwif!-H7R?N^Qu>G|$+i5`lVPY|Z{^+moLJrN_^89`vJ*3^NYJYMdPo<8 zxwd1AL436jJ;k;_&`hj1JN1q!h`d__}^?dLw{Py4k$juZhlYB3TEIRi|zgYO!0F9CRW4S z37KEusAjkiRjV)c*c1WW^1U+{Y96iXV`+fw^@1(zx$sVJEIr2B>EEe=Tbx_5{8eyW z_mpu)<5}-RMC}346T=1tAYZq87z7ktiRT1*(K8r)A7!MBL8W`X|5YRdWUc1R3mfnp zlj*Ow8J#5|pDIK|w6%@MW9|0oxiZU*1znYumuGb1#0jvJN|un^fvY4~j>BH_Aqt;{ zA4q8c(BMdVl7YvNBB=}C(_d--zUEgk4Xi&2$R7*JZw$^tIz?IlY2o4o*abyv+QI9?sQVAs_xJ&+GG*JT|gL{RS4fl-i(nB z$JVWUB7<#Zkz;BMctPMSXm%X*iM0gAp4-H~7&2*tpEdlA7NvlOj$^ZDhp^eEIgNl= zSv(_-M4Pqe;mM}h?2HTisLs#U`)(W=prAjg&hW1De zKQk|iW6N1rsu95p@c@|d`Qi(~v%0dKIXOmZ$Ug+!3&Ba?mBc4A&g1dPN!tEkJ?H@1 zc1r%m``k-;<4+J;{Ae*amFPzq~#2q)3|y4aaiS+(5(HIj0IwHaOz#*emr zX)iI%_dqR3n&-mJ!~SydlGqZ1cAG=ZNb2Zjg3<7{Kv^o5FBHz5dbmD-*4Hh^G2<_S zlIanW?_cc03A=wVM{SIoppIt{aJ*R&HMa<<+LAdyW5D*(0NuZ*Z5!wEyv;A&UkD;K zI!vt%p9hDJP^65A!NW_pknw`iRT6^lhI^%q0)vNjqHNpbv;^BeF%FENw@06yVUF8N zbR43LMNPdJsTYjJbDJ(^e5dv43xmxzThCk|Z?_kG-65E#(7F~&ve|2ai=uKG#%I%$ z;QXzKEN7v6mtZNrg~Q^tIYi~gU=r?>3ycb`1HWQE)2)B2i6gVkS+}IP&tI+&!VeAN zgft^}>YChdCZx^!%wCm5uxY@hbn%>du@z%s#+FJYRYua^NM%QLO81VC1psrfffSlU z6VlWi9vmMKoVsqmuDe+OhJTDqF}VKzkQ93AzvCDufG|R^Y`Dhclg+!;jzE}8$)VL{ zo(FzI960w^`@!n(_d~WM!W5lZn8K@XZchQgTedQpt@>gVp&cLJ+=(wmAuw09eS#a` z_If)rwuOtU$`9Aw>mNsG)57c!`JT+WEI)Ep zdo#AKAxkR3*#))d9!Z`pr&NT2Ow`8slN~3VA@1|$u z1wd5B0RCx)oBRB~TklQOHP#{BhYI*4hvdK#<i}F|tWFe*mY-aF$l3WY_p$x9`9W zu_KptlK{;@Sl>iPrcw3s8-u?%kU4E0sgXh*strk0RQfQIocQh@Sgn->`f*>a{#E_N5QQPB3sX< zU!Xt`!lDwNV~P$n5~$k#OU+Ntr!oQIDaffuY75N30+6~89cVz^4C33n6HslC&;H#-EiJFK(>ACmsU=v3SKA>|qq}C37ptO%S{x zNugLR3O$J_+0&mDwK_HZi{<_Fg!Tq{*;rB&Kt+l^Q$3@BfLE!Hr-UfF(<-+}fGGPH zU=MmX_pa;lXAJ^SX>DcExqu9QK+9~g{bXkrI3eydVNr?7i({WgQnGcsF6|;1{gt_< z*^FubJbn2xsV!T!wDjLmsD4qb_vyBL6{2uM9q0forToxwgmP8#qs^?%XP@3@Kh4U* z85p#fCJPijW_-6s=k~oPLO0|1yfr0=AdLnh{{Xp5$f_T<6-=S7L;I*%@L_zlR+|pj z1u{31{N;_b=708M&m<=L($pc7BS@aS%AA6*I0~JkfOIBA*-#9?A_67kfMIjNRYF&S zK#v4@S;bRgM4Xz_0Z!a6#juLU;3YN?!QXT{STBT|A`q+Xt(`03G85gJ*`TQ zYSqn=;7>`p`tASmYnLpO13T2eygTsuCDE@wru?b6ckfLhtyHyeuS(1hn|FtPoG=kp zQ+4?Io9%S9+groKH#E9_ee!_sce_Wn=DhuEgru8=i=SfYrCb_3MZB)cuF2MDsNeMw45;G8v6m zh3opKU{#vfL!q?v^fy`Svg>k9=%8FaiH>q{AaIL7`rY20=Luk|QvCvbeSOQz%S|B* z3|*ZRhtfM68aw=@E{P+X_X8g0?{_vbcJAsE9@gTw{6jtrPQ z?Q$x9cj8$KU?xQ^D*Y(t*}73SA-~VLyrQCGaIlKW$j!|~vA@K`L`P@mY$_O$`};on zyHf&HE#pR-lA7xB26Z$mr^oNcKffUu>E-VxCMVDM`}-T{3`st0!w||KU&ekc(YF<( zXK9`DB@2s=zP?NKSrB7(OqNSdCRX>})Z`(3IzqxU4fz@(gxAHFq|1*a{u@@7|f}ANp@{{l_On!3ybn?uPTTS zsbtQa##!VFhNw;%2E)oMXh;&(s0rL#1B+34&lZ==6nxvfhbBa2;GQF&A=#hWOwyZa zEyWgF%#?q0p0IjXL}N%i;@1Z7J4-Xh{um3rj?nfeTOj-K@^^sFaVYANXWI&MO}M5m z@B6B?IB)YjI@qII-+94p=QP1&uQ^b-Op7m~f52z(MQKB^UaqaRLpV4qhzVV8m$oJ>r*Axlx`Ge1=mHHA4mN;)zvGGmS%R1kH)(*a+ z52AYK$?w-@>f)xo`S-A0=JFxd**X4akf}hWc3%Y&Oe&F@K-qI|)=KE6!CJ5OVjKSRf zHY(FKhSE`@e<6z!6rC|z=~G{tW;gXBZb}61DV7r415TjyhM`GoMd;ydL5mj#5c{=*rfSVy}KA zXIH%d$%{{{@l??i<{nRq%-Za8vg zdU;9IA-;-#n9S`hWi_X=ZJU?x6%IbDRkG_J+@*~q(dIY|+yA=yPf2#c=o(L>tuSYBm&Ie)$`c+~~TCiFPW!i2;uz8_WRaI52qFv*`S)*7*={q|Vtf%83TROd}DsaRZ z5`!0aBJG30?92$Y!Vqxj7tBW(PQ@!v+14JDS(zC{MV=$hA71^jxDyV#;I>PO=TA3kuh%V zjM!emKQSTF!RsIX?-TUV^M9B5ngBK$NIc^lnI<=!pTyZX8L4eUamPwk{SKnGcIje- zQZ_fuoc=yJ;V^!#P}PD&kvyZKsh)LA)#VtvW3mmBqjqx2i7~6Z9$U^UvjCfu%&9*p zwPDg-v{Pr%X44G;L3lSnzdFGrMm8&#$893UYb`%ab0=5{OvMyb#c-;GHWro6k4|&n zLzTlx`1$iae*$qHnVzRm#_p!q@lVgJoGg5$pemH_25w@vV&*7CNt5}*{^G8r-8U{2wq4mGzA5;F5k}~;tusvu=Sf-&sH-QH;bx<7 z^$w)lA@^ZXT`p!*+{a}3a|LF}^q`pux;Qa$A@}r>cHvQ$(2=Q)Faj5YZKQj&L@jpe zBA(UEp%e`n+7H}+epkG_7kavd^*%2uIcR?wi%c3B#5MKs`tdmwHqTv=(MBrZS&PeurXB6#gyG>$bL|e{2We~W4vtPb8$c(a`iKMuiE!9UznZGMqQf^8z3R*1M zC%Gj!60C;Hba5r^-jCwq)wwG_F?%wdv9NCWSIUzNTlp^SW(F(WtfdkK!qTnBu^WTG zfw87+-nMjB1?yW&m8<%}e3gpx-au@C0o zXjmS%;|{4JS6{{mXHQh`9lci^_jT|$4VhY)2X^h%6dzxiVfH}2w|<_%_lXH6B6Ayd zz;vt0i7%fedlQ!C>38a-?npeDuEyLw0QSvD)?Hl2T1RM}Kd+v`GV2V2GkkfP_QL%_ z&b({3>4H>DBEDZ+Xy5Hs^NK27eG;zz`{I7l;BN%>n66c^TTJr{4Z=(#{SR@{ zg3PcFu8$UVEN3k6cPMCi{H{E=zdJYL%!Mqeu%M^H{z6AcXqW3m7vTCUxKJxlGk5f{ z9AVmrV#Aj0nq0|qcAANhs5(R{7$!vo?IWy7oKTg;?EejebVAMCxhyG>pkrn4JsG}% z#-4?>G|EFw8SSQ(VT!eg^l;*gFo+Vk@v>U%q66iXKj~0)q)8mfX(%plnqfd&o9B%M zVrmZE{1Vymm|DGb((c7FdZ)Xrzr>@v@h{5ntndqlxKYzXCe(S%PW6QK#+IQMYk9Qo z6eDt^3M}9j!f*$z~JI4Ps|Na}yKd>}hin~l99W=Yr5E8RbMSQYB z_R8Ipxa{4k!FYy3Mu*5r#~d$g$Xr*{r{b=N>8kWyONEeF@jDyA>SOkI=2v`SN(z|2 zgKy4=cZ;*~>6P{zYc()3YP*y-J2PWwW=1`zAh`GP!Y&_KU7f`&BY`0WX@m*!?$zp< zNq$a{eE-<;`pYkzEtSBa0+2g1+<4uHsx?Q}LD-6@I;&ZUDt5rFY~&%bJM#SakoMp5 zAxtt!TK}Of`0iHIeKRvNUcSD0tL53*hakdblz|Z3+POTg?L98(-h^Ts(?z zFf=ZC%D)VCyU0fv{0t!!yWZ#4OEwyg@!mKM>yZu<9xD<3=6@tS9uxIW+KfrrEC`@> znnPHITa8t1V;{B{R<7!*J{-i0wPeOKCe_3+y6OTs{-h`w>tPmDYWqPW&-tkdGm7h$ zRr@ofRnLEofSo5r{Sp=KRk@68Q%~wP(XplanWn`}K~XcbkzVgf6+-`AtM*R1?=B%s z_bDWsf;F6TpAjpU2T6bQuC4ZFrI9kuNW+_5s^&*Iyl*n?N*(*Re0TsDU_5?@;C^wc zqJ0R}#Mr>SSOLduD2i|x+Juc4NeX(_4MzWWC~Fj2^?$*-WpY7mZiVKAf0#2)QCD5H zK;iBKcLd7Dn;F0^Odv8wlnMEq+toD7)of>{Bik{$v5*`B!cKeB^zg|`V|9)PI| zQ?}2i(~vtFs97x?EfsBITFr0Ix?k#E{Egs@{R8$3JRgQP*i> ziwXWFPXIh&bSEHDf|(bVNGf$H^WxJIMVc6+@&-s@7-;<2_!9Ya1jDA9OWEmV@cC;{FETswcAl+Yb*j*XHk zn|rm%jJtLyuTqh}HpEZ-W77_MDCEVFP(36E$}1AeJjW31X#EN706V~6xZ$2E?NK6B zDceB>gYV$j8AyNM9f)`VDu?$VHRoD7Sn_Y<9uSy-5)Jc_DriCtp%9SuV^(2Lt0aKN z#&hCy0{yg4ts5z!S#h7=hq?%*=reXXc^2+f-c{^ulY@DFd6#l>m)4}z7dwqRdV9;5 z#gKn1#P=0U4Y-tXS{rPkCPIDkNou!Wi>ZZcv9-SeWcR2JdUYI})du?lRX%4MhE}E( z7c0Zq7{yT71lPH0cp<2MFE6jotJ!E;!NNf$N`;W=BRfGn;s;QQ4-^u{t%?aA(Cu~l zR#)ZqZ(nR;E_4_LXhZza>L@C} zlU%M5K?%p5{^&%4FY&t|jGP+wq!_|B&r3@)i;6n3K7m8~oH(2y<9qh&cFazdbw2>} zU&NGvy7zYhG5^R!Ow^hGb8k{)o+|LbFI7XO6qw-SWy&#Q&!>`4D5-?mo;9RcC}FWq z3%@|zgxWri7BD!H;m_%}Oz3ETFuIVgievV#Dv~K}O43RsDc9a(+>u99NgG(_X3pYQ z3ImrKoeS;u>xp2#k`0DuF&jMG-CEzM%i&DxB4YU5VZyZ$>}*p`_OqNclD6k{-oYJd z+Izg?c4BrKtShmd_nVjsjuP)@O-+qkvMH+?nF<>FHN=wT8iM zkJs}w%57D3hG3R1cKz4;M;-SpNOVb=jsEshbOUDYD}hcrve$p~{jRACE!zlb!dy@Q zXOJ?&2-}i(TSWz0pLTF-i&pO4gn2Lh>2_cG?OoF*6zs_U2?O_gpg-5=N@xYwn*GkO zODlS4%&aWq6vR~p+PMZJ*BYWckVASyNC|~6Q`eo%%?+=gQ?Hjg$=G{rVQ;{k()`rN zH4m@2cFU-$I{N_HgAk{}9T?po)@N%WDL`>syJ^&y9zPql{QkV_@HUG_ zVU=N9hcPv~fRk=y0kNwTWQvx_vVDc9xYiwyGCrIy3UgOxJ)^Vp^`qyb4GBqin7{DUeeFss_v7qF*oU%gO=e%9 z6VJFJJDy}qoaevlai$gYLpDz*3*jBzp?;)IW0I;oxQ74$@(6?q%)*Fq=97H`KDY)BPK_x z2ww>>Y!3ud!3;?>i3&JE@wJ}kEoPUMC{bji4jXyA`nnAikTetWqrrMzK0)%Ocg@>a z85%Jyr@`6}{s_Pj26Mv2oKz}Z<`nEq{H6Z}h+h-I?hozR(ZL^x`1P70^X3)@xci-8 z8F&6vh2u%Dwefe3ugE#6LdDbFU8n~&U;F()Asv5~{D}U3R|`nzR2&o_qB>1TSvvdZ z$i669BkO79=rbc1^MFVsFUbb(={(EWOC{f@hu|chpuFTGah=M+3kwT$!~}uojo@<^ z%MoFN`SWY<8mXZFEiyO4Np^CMlJd8Ej}5pOwW>x*!p-D$or8zJdfv$rjfYxg68+UC z1xtoEyt1`3%b0NlbVtowO}UVYdl%hFtx~emoGS(p(+*~CX>tf|PSV!gvu6*XBcApE zdZqv+3eHv46&0G`d}mj}1clyVOQ}2D_g~21OEnCKNP;(e>W}gyjJYJr@fFa)0MSF) zXq>YL@J-lLNYd(S3mw!K$QC$c6ay`6)+3+cyQk!=Q7gK06)3++89OOLpB}irmDO#9 zEr7po(m35bCjJ#Gwic$Y3~xhw|9nQ$#sf)i{C{ z#aHpg@;r&6+yh?tsgUEzm{?$bKncvn)W7F%xeAw3z5hOjzc4|#Z##I~$;dtn1l z35~CP3(j=2N5mNt7FRg^9_@_tSKbPxfw>0rQvArS7)6`zLiTc$ELU3=c8KlM?rRZc z6QcWIUdy`{#rAIc{%`}LivcQ>w9-;lR8>N<1t6dWN;8?ml6*CY3bg`Cq7<~cYzavo zh)RQ55E0nC5#a92ulqDZ02f}C!W(&|LX0=sD5LO6mLrZ9L+9fB%vfs~QkpV^Bb4@qWabkY zu^4(Sjwio{0{XuYQKalnM^Eh;$e-xz?*|Jkwxp!wR0K@peTS&2Wct$T{ILf8(xSc} zDQ%>?224q)a8T5yR79z562;V9L!L0h?=xuY^7FU^dJL{Qk^Y-hL7+{fJH4nATX8EZ-2p9TeKd)tP*-p#kc6}{^QDc)c# zuHf-GG94LP1`)ueYXI7`W_)p~2&$FDt7jhBIAIa4aLyP6$`OsgF-@GF9t>B!WG26s zG+iPd2i7@kc}K?u`0LFMk(2_{F*L6~kPV?xCloosobyEPT540G?^5!|o^uXKu>%2~ z?E{vqNc3pmHeHZo!*$|ZqZ=+y5`7JMqwXXbqBn@5;K)}-^U#~Q_aSI*0|UiN+?aHF zNq?xl3_N@u_WNkg?+MBQddl zosxYln1=_iHhzss324)acPXD((dOF}wLlS_LET-km9hiYZA#jlm!sIzc(w+w)C@=b zjxcZA@TlZyFPj-y)cdR@)bD4Q$}8!Rtl|Hq2A^ETjVLb9!9(sFG!EEo` z4*Z^sTLudF?fJJdo;-IfDgGp`zo*ll#%C-Me#p3v&*HED2p&cbVM@)l6T>m`( zmu}jDDCG;B8&wyE?&3dG`OJ=SMOZ-iebMe5e=a4mHFvy6JDT11sIPz#?mZ z2m7&R?w7oC%-nPZr9PjI-|~?y8Ty!=g`eue0bN|%6Us2I2XbDF9d^y z-IQ{i(wP+|d42b4Aa$sST~<{el+u{?xa>m@l3Yi0%Ua>jLTc^_t2M6_`haHJbTX>8 z->fU(5YanPx0Uk#aedHVi*Jj<>|dXhZv!i0&h1BC5Ji#~yG=)@`rC9yhc|0GU&Ee7 z4PF=glx)@GapfT6V8q1E)QpHp0Slk73}OvhD4I0RHs(t%)_dJGR?sR=hJtp?&POnp znCrU}ZY+vrJ$ZJM+BUS-G`OY)(_B-N3eqBY`29fdm_!Gw0zW&Gi9>TDXl=JhZL8SB zrWmWF&1Vh@^E+bq>Jct9bbt~tB&UmvF|8{Na+2tk{AAL{6Cz5VhMkxj4ztoyeXOExOV@)Q&K{e;(J zmH|U=vcxa?{!j-4K42k_3Yuhpv9htsC62D#UF+$wCPs>isY=BtRA}u8nE$PP3moZl zG6~4|mA>8>n@PW|m)pP~25EEoFR&qDn_Ho0nOEPQr%8bb4E9Y!1OE1@c8*GMX4gYM z$OH^79RBtf7zaqf-t)nN4;VYWPkGS~_Mq;cGB&?uuc!6&O(kT z1Vj{Fdh$(7Odj~{ZhQ;PDFT7b)IgaMR5_(fh|sptg;xySQfB}5kJ_b^=WQWj9thML zQ;ygB?1(4?Fa>y4#I*gqvg$8mp5*1RB~=z82Xu!hTNV=k_K)81FKw$gqvBIwGr;w) z%aYjm5UvZNLbE5MqoYy%w6Lgr(;>>s8~aq>`g&{#l>M7OwYN+kqC<`@VU?H~*eY-} zh9i3ct^=CrIKq`ObwF3}A-FIV^Q#UN4#%UC1VkSO{3jf`EEZWM*3Lj_L;BG;BMms{ z9Fhq3)x5>L%mcT-HQ1cK2yvy6qU1Ff#H|MzwF$s&0mTje?Y+Id&M-1};G=VwF-l56 z6m7r~i2WG1N7ILnSTUexxc%+h`>3?qo$~$WV?%r%8`)^+?d@qeV8jYCo{W5-26m_s zdf=?FvGKu!2brv7ekGOn*Mau-b`sQl0X>NwltTcLJEnh3E@Aig@|KT81IJH@skr=N zh+SJi2vmQGE29`efGF4o#n7Rf#YpjP;YED^~QD@1Y?*{tK{`&H*m!K6lXlb-c^e0Sd< zf-&g!;zQ_et(!*}-8oInk)|T3@PmjJ|8t|SWFzz>2vU%S++;>x>2je}sw3t;9cE;N zZGHHjbcrp7Ux|oHGz0SW0BFo4ZtwvVWYugCwFL&k-ux>kc8QB4<|wo?Wp7?kBOr1r zL>1MYhffBAt}#eR(nIAIP*}e7ELPRs+BDZP7MRi;2FtzGufNwt#9V-heQUTiT?sh+ z27qI!_r(nx6}N#$P&cEYIuzh(lqIi}aC-Y6dDK@&2~fI^d%#f-=Re+hyf8!N^PZQKj2aPFj7be!DnlfJn+Ej+rj5MV$AjWT zJ1_=%%RC(boF_>&#VE8PqCcobltt%p=Z-Uz;JGwO$5@iOyStNIQRRt!YeFmlvJbB- zs&U{P+m19wqckj1tpgE}B!Zt*23?8T%?q6ob4ZAU#H$g{WUl3p^?yXwAs-77KAHoo zN-m^#O$}D4q71kopwAfrnm&Ef^WtJV$eiPJ+>m2t6A% z%RW%jeZO;cF^||%D~e5^M9F5tbl{kfC~gnTr`nPqWhg+L6%Plka-m&cI_^6|d0;kc zX^7nKAM{XsyaJ+}irZujr+&neYRe|Z#~U&X;?JmwJvapcTNT&72T-2K?gM(6g;mIp zKJ?CHjy4aa{uz)MXBG++j*uMpo!D>0LZBp&njm0{fRhmzg~Y20J7CiSGTykAJ{QYC zQwiKq9RmZ0l@hLh_4UmiGmtnBWMNXX{zKeCz?dN=kUZW?g;=dpt6C~`MV090?v8!z z`HFIlyEKi+3GCaF28#?p7=}NH|7QQGwDR&xZaeSufFUt0JDc%mo~)t}pBAqkx$ma` z$@BD0_~oB+q0$ObiBUdGnV6Y500~)DcjML#gAe5_6urjZLRBZipb;M=Py&1w`{8%v zU|kZ3L0HGiVH?O+)Mk*OFw-6mg?X{l#e;_qv9gQ;pHf@m_=_Ie?lYcZl8rdLXVh+; zoJHUw74U9C?s*ker|in!o2q35S-nQR^WS;Y2K%H@x5Wmh%AS~8LXbXS%qmz8#fUe3 zsrO0>VopRXNw!dfRR)1)MSvAt2Fn*+sZE>fo8Dg##mI)Jdydk<*<9Al6;pMyY$J>B z+J&SWa_Z48gl!uIEPP0`K!vNj<1_YDZxlUeDC0MFVf84)2IM+24D1tWxP~noE`R`okwz^0{jhCr7K)u@)K^`X&ZnMQbcxxg<}x!- zhK?Q!q^2!UvugK?i;E-o5>hgAsH%PFy|4#ku9~R*2y;G`;0bh;sLBP-*nG+xWzro*UL)^7Z)Fol08uZb$Pi4uF$FLDV(1@i(!}Mtl!2AOwp<| zVF*p&bc2&pxOrD6_w%32uB50vq{b7w1_lx!VL&ynl6j_0`-8n%LL9rAct)Q`ni`#U75e0Ac_IV$GEW?$M`Dp9&h}_SBQ4(2J9RYYs{}UVOK|7MFG4HpnG7 zoR)U_aAk!Bvu|f62WbE~;aNzm)X>l0G3$UrOMihwVQ7yrsw;m!oF{o*PgoqX(@$dl7kKJn5_aYL$Z_9q&4B@6BIOJ zwMCE^Kr*QE!e^G22yBiAKqRM2NS^r!w1|yJOeL>PIe`5#Nl-90H;;w6L`*HPOMwQn z1!)*4m$rwaiIgnR$}K!pAimVihwTAd3M>O;AR!(;*1y0nzPK~Hz2_80CQE9-5@GNgk%35Z2Xh3Fz*Hjt^Ue-{2T;=sg(N1;bgFv zRA9*aMnq=iu$X4nU>O9oQV1*IZl