FLAML/notebook/automl_nlp.ipynb

5187 строки
439 KiB
Plaintext

{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "43f7-wG-Tjg_"
},
"source": [
"# FineTuning NLP Models with FLAML Library\n",
"\n",
"\n",
"## 1. Introduction\n",
"\n",
"FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models \n",
"with low computational cost. It is fast and economical. The simple and lightweight design makes it easy to use and extend, such as adding new learners. FLAML can \n",
"- serve as an economical AutoML engine,\n",
"- be used as a fast hyperparameter tuning tool, or \n",
"- be embedded in self-tuning software that requires low latency & resource in repetitive\n",
" tuning tasks.\n",
"\n",
"In this notebook, we demonstrate how to use the FLAML library to fine tune an NLP language model with hyperparameter search. We will use [flaml.tune](https://microsoft.github.io/FLAML/docs/Use-Cases/Tune-User-Defined-Function) with the built in GPU in colab for the tuning. However, if you have a machine with more than 1 GPU, you can also use FLAML's [parallel tuning](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) with the ray tune option. \n",
"\n",
"FLAML requires `Python>=3.8`. To run this notebook example, please install flaml with the `[automl,hf,blendsearch]` option:\n",
"```bash\n",
"pip install flaml[automl,hf,blendsearch]; \n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Q8c3VMy6TjhC",
"outputId": "3584a81d-f26e-4eb9-9929-629cfff97ee9"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
"Collecting flaml[blendsearch,notebook,ray]\n",
" Downloading FLAML-1.2.0-py3-none-any.whl (250 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m250.4/250.4 kB\u001b[0m \u001b[31m4.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: scikit-learn>=0.24 in /usr/local/lib/python3.9/dist-packages (from flaml[blendsearch,notebook,ray]) (1.2.2)\n",
"Requirement already satisfied: xgboost>=0.90 in /usr/local/lib/python3.9/dist-packages (from flaml[blendsearch,notebook,ray]) (1.7.5)\n",
"Requirement already satisfied: NumPy>=1.17.0rc1 in /usr/local/lib/python3.9/dist-packages (from flaml[blendsearch,notebook,ray]) (1.22.4)\n",
"Requirement already satisfied: pandas>=1.1.4 in /usr/local/lib/python3.9/dist-packages (from flaml[blendsearch,notebook,ray]) (1.5.3)\n",
"Requirement already satisfied: lightgbm>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from flaml[blendsearch,notebook,ray]) (3.3.5)\n",
"Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.9/dist-packages (from flaml[blendsearch,notebook,ray]) (1.10.1)\n",
"Collecting optuna==2.8.0\n",
" Downloading optuna-2.8.0-py3-none-any.whl (301 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m302.0/302.0 kB\u001b[0m \u001b[31m17.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting ray[tune]~=1.13\n",
" Downloading ray-1.13.0-cp39-cp39-manylinux2014_x86_64.whl (54.3 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.3/54.3 MB\u001b[0m \u001b[31m12.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting openml==0.10.2\n",
" Downloading openml-0.10.2.tar.gz (158 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m159.0/159.0 kB\u001b[0m \u001b[31m9.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
"Collecting jupyter\n",
" Downloading jupyter-1.0.0-py2.py3-none-any.whl (2.7 kB)\n",
"Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from flaml[blendsearch,notebook,ray]) (3.7.1)\n",
"Collecting liac-arff>=2.4.0\n",
" Downloading liac-arff-2.5.0.tar.gz (13 kB)\n",
" Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
"Collecting xmltodict\n",
" Downloading xmltodict-0.13.0-py2.py3-none-any.whl (10.0 kB)\n",
"Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from openml==0.10.2->flaml[blendsearch,notebook,ray]) (2.27.1)\n",
"Requirement already satisfied: python-dateutil in /usr/local/lib/python3.9/dist-packages (from openml==0.10.2->flaml[blendsearch,notebook,ray]) (2.8.2)\n",
"Collecting alembic\n",
" Downloading alembic-1.10.3-py3-none-any.whl (212 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m212.3/212.3 kB\u001b[0m \u001b[31m5.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting cliff\n",
" Downloading cliff-4.2.0-py3-none-any.whl (81 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m81.0/81.0 kB\u001b[0m \u001b[31m2.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: sqlalchemy>=1.1.0 in /usr/local/lib/python3.9/dist-packages (from optuna==2.8.0->flaml[blendsearch,notebook,ray]) (2.0.9)\n",
"Collecting cmaes>=0.8.2\n",
" Downloading cmaes-0.9.1-py3-none-any.whl (21 kB)\n",
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from optuna==2.8.0->flaml[blendsearch,notebook,ray]) (23.0)\n",
"Collecting colorlog\n",
" Downloading colorlog-6.7.0-py2.py3-none-any.whl (11 kB)\n",
"Requirement already satisfied: tqdm in /usr/local/lib/python3.9/dist-packages (from optuna==2.8.0->flaml[blendsearch,notebook,ray]) (4.65.0)\n",
"Requirement already satisfied: wheel in /usr/local/lib/python3.9/dist-packages (from lightgbm>=2.3.1->flaml[blendsearch,notebook,ray]) (0.40.0)\n",
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.9/dist-packages (from pandas>=1.1.4->flaml[blendsearch,notebook,ray]) (2022.7.1)\n",
"Requirement already satisfied: jsonschema in /usr/local/lib/python3.9/dist-packages (from ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (4.3.3)\n",
"Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (3.11.0)\n",
"Collecting click<=8.0.4,>=7.0\n",
" Downloading click-8.0.4-py3-none-any.whl (97 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m97.5/97.5 kB\u001b[0m \u001b[31m11.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting frozenlist\n",
" Downloading frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (158 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m158.8/158.8 kB\u001b[0m \u001b[31m20.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting grpcio<=1.43.0,>=1.28.1\n",
" Downloading grpcio-1.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m45.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting virtualenv\n",
" Downloading virtualenv-20.21.0-py3-none-any.whl (8.7 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.7/8.7 MB\u001b[0m \u001b[31m43.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: msgpack<2.0.0,>=1.0.0 in /usr/local/lib/python3.9/dist-packages (from ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (1.0.5)\n",
"Requirement already satisfied: protobuf<4.0.0,>=3.15.3 in /usr/local/lib/python3.9/dist-packages (from ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (3.20.3)\n",
"Requirement already satisfied: pyyaml in /usr/local/lib/python3.9/dist-packages (from ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (6.0)\n",
"Requirement already satisfied: attrs in /usr/local/lib/python3.9/dist-packages (from ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (22.2.0)\n",
"Collecting aiosignal\n",
" Downloading aiosignal-1.3.1-py3-none-any.whl (7.6 kB)\n",
"Requirement already satisfied: tabulate in /usr/local/lib/python3.9/dist-packages (from ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (0.8.10)\n",
"Collecting tensorboardX>=1.9\n",
" Downloading tensorboardX-2.6-py2.py3-none-any.whl (114 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m114.5/114.5 kB\u001b[0m \u001b[31m16.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.9/dist-packages (from scikit-learn>=0.24->flaml[blendsearch,notebook,ray]) (1.2.0)\n",
"Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.9/dist-packages (from scikit-learn>=0.24->flaml[blendsearch,notebook,ray]) (3.1.0)\n",
"Requirement already satisfied: jupyter-console in /usr/local/lib/python3.9/dist-packages (from jupyter->flaml[blendsearch,notebook,ray]) (6.1.0)\n",
"Collecting qtconsole\n",
" Downloading qtconsole-5.4.2-py3-none-any.whl (121 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m121.2/121.2 kB\u001b[0m \u001b[31m9.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: nbconvert in /usr/local/lib/python3.9/dist-packages (from jupyter->flaml[blendsearch,notebook,ray]) (6.5.4)\n",
"Requirement already satisfied: ipywidgets in /usr/local/lib/python3.9/dist-packages (from jupyter->flaml[blendsearch,notebook,ray]) (7.7.1)\n",
"Requirement already satisfied: ipykernel in /usr/local/lib/python3.9/dist-packages (from jupyter->flaml[blendsearch,notebook,ray]) (5.5.6)\n",
"Requirement already satisfied: notebook in /usr/local/lib/python3.9/dist-packages (from jupyter->flaml[blendsearch,notebook,ray]) (6.4.8)\n",
"Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->flaml[blendsearch,notebook,ray]) (5.12.0)\n",
"Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->flaml[blendsearch,notebook,ray]) (8.4.0)\n",
"Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->flaml[blendsearch,notebook,ray]) (3.0.9)\n",
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/dist-packages (from matplotlib->flaml[blendsearch,notebook,ray]) (0.11.0)\n",
"Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->flaml[blendsearch,notebook,ray]) (1.4.4)\n",
"Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->flaml[blendsearch,notebook,ray]) (1.0.7)\n",
"Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->flaml[blendsearch,notebook,ray]) (4.39.3)\n",
"Requirement already satisfied: six>=1.5.2 in /usr/local/lib/python3.9/dist-packages (from grpcio<=1.43.0,>=1.28.1->ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (1.16.0)\n",
"Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->flaml[blendsearch,notebook,ray]) (3.15.0)\n",
"Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.9/dist-packages (from sqlalchemy>=1.1.0->optuna==2.8.0->flaml[blendsearch,notebook,ray]) (2.0.2)\n",
"Requirement already satisfied: typing-extensions>=4.2.0 in /usr/local/lib/python3.9/dist-packages (from sqlalchemy>=1.1.0->optuna==2.8.0->flaml[blendsearch,notebook,ray]) (4.5.0)\n",
"Collecting Mako\n",
" Downloading Mako-1.2.4-py3-none-any.whl (78 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m78.7/78.7 kB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: PrettyTable>=0.7.2 in /usr/local/lib/python3.9/dist-packages (from cliff->optuna==2.8.0->flaml[blendsearch,notebook,ray]) (0.7.2)\n",
"Collecting autopage>=0.4.0\n",
" Downloading autopage-0.5.1-py3-none-any.whl (29 kB)\n",
"Collecting cmd2>=1.0.0\n",
" Downloading cmd2-2.4.3-py3-none-any.whl (147 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m147.2/147.2 kB\u001b[0m \u001b[31m19.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting stevedore>=2.0.1\n",
" Downloading stevedore-5.0.0-py3-none-any.whl (49 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.6/49.6 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.9/dist-packages (from cliff->optuna==2.8.0->flaml[blendsearch,notebook,ray]) (6.2.0)\n",
"Requirement already satisfied: tornado>=4.2 in /usr/local/lib/python3.9/dist-packages (from ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (6.2)\n",
"Requirement already satisfied: ipython>=5.0.0 in /usr/local/lib/python3.9/dist-packages (from ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (7.34.0)\n",
"Requirement already satisfied: traitlets>=4.1.0 in /usr/local/lib/python3.9/dist-packages (from ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (5.7.1)\n",
"Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.9/dist-packages (from ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (0.2.0)\n",
"Requirement already satisfied: jupyter-client in /usr/local/lib/python3.9/dist-packages (from ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (6.1.12)\n",
"Requirement already satisfied: jupyterlab-widgets>=1.0.0 in /usr/local/lib/python3.9/dist-packages (from ipywidgets->jupyter->flaml[blendsearch,notebook,ray]) (3.0.7)\n",
"Requirement already satisfied: widgetsnbextension~=3.6.0 in /usr/local/lib/python3.9/dist-packages (from ipywidgets->jupyter->flaml[blendsearch,notebook,ray]) (3.6.4)\n",
"Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.9/dist-packages (from jsonschema->ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (0.19.3)\n",
"Requirement already satisfied: pygments in /usr/local/lib/python3.9/dist-packages (from jupyter-console->jupyter->flaml[blendsearch,notebook,ray]) (2.14.0)\n",
"Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.9/dist-packages (from jupyter-console->jupyter->flaml[blendsearch,notebook,ray]) (3.0.38)\n",
"Requirement already satisfied: lxml in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (4.9.2)\n",
"Requirement already satisfied: jinja2>=3.0 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (3.1.2)\n",
"Requirement already satisfied: jupyter-core>=4.7 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (5.3.0)\n",
"Requirement already satisfied: nbclient>=0.5.0 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (0.7.3)\n",
"Requirement already satisfied: entrypoints>=0.2.2 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (0.4)\n",
"Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (1.5.0)\n",
"Requirement already satisfied: bleach in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (6.0.0)\n",
"Requirement already satisfied: defusedxml in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (0.7.1)\n",
"Requirement already satisfied: tinycss2 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (1.2.1)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (2.1.2)\n",
"Requirement already satisfied: jupyterlab-pygments in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (0.2.2)\n",
"Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (0.8.4)\n",
"Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (4.11.2)\n",
"Requirement already satisfied: nbformat>=5.1 in /usr/local/lib/python3.9/dist-packages (from nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (5.8.0)\n",
"Requirement already satisfied: nest-asyncio>=1.5 in /usr/local/lib/python3.9/dist-packages (from notebook->jupyter->flaml[blendsearch,notebook,ray]) (1.5.6)\n",
"Requirement already satisfied: Send2Trash>=1.8.0 in /usr/local/lib/python3.9/dist-packages (from notebook->jupyter->flaml[blendsearch,notebook,ray]) (1.8.0)\n",
"Requirement already satisfied: prometheus-client in /usr/local/lib/python3.9/dist-packages (from notebook->jupyter->flaml[blendsearch,notebook,ray]) (0.16.0)\n",
"Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.9/dist-packages (from notebook->jupyter->flaml[blendsearch,notebook,ray]) (23.2.1)\n",
"Requirement already satisfied: argon2-cffi in /usr/local/lib/python3.9/dist-packages (from notebook->jupyter->flaml[blendsearch,notebook,ray]) (21.3.0)\n",
"Requirement already satisfied: terminado>=0.8.3 in /usr/local/lib/python3.9/dist-packages (from notebook->jupyter->flaml[blendsearch,notebook,ray]) (0.17.1)\n",
"Collecting qtpy>=2.0.1\n",
" Downloading QtPy-2.3.1-py3-none-any.whl (84 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m84.9/84.9 kB\u001b[0m \u001b[31m11.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->openml==0.10.2->flaml[blendsearch,notebook,ray]) (3.4)\n",
"Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->openml==0.10.2->flaml[blendsearch,notebook,ray]) (2.0.12)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->openml==0.10.2->flaml[blendsearch,notebook,ray]) (2022.12.7)\n",
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->openml==0.10.2->flaml[blendsearch,notebook,ray]) (1.26.15)\n",
"Collecting distlib<1,>=0.3.6\n",
" Downloading distlib-0.3.6-py2.py3-none-any.whl (468 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m468.5/468.5 kB\u001b[0m \u001b[31m21.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: platformdirs<4,>=2.4 in /usr/local/lib/python3.9/dist-packages (from virtualenv->ray[tune]~=1.13->flaml[blendsearch,notebook,ray]) (3.2.0)\n",
"Requirement already satisfied: wcwidth>=0.1.7 in /usr/local/lib/python3.9/dist-packages (from cmd2>=1.0.0->cliff->optuna==2.8.0->flaml[blendsearch,notebook,ray]) (0.2.6)\n",
"Collecting pyperclip>=1.6\n",
" Downloading pyperclip-1.8.2.tar.gz (20 kB)\n",
" Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
"Requirement already satisfied: pickleshare in /usr/local/lib/python3.9/dist-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (0.7.5)\n",
"Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.9/dist-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (67.6.1)\n",
"Requirement already satisfied: backcall in /usr/local/lib/python3.9/dist-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (0.2.0)\n",
"Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.9/dist-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (0.1.6)\n",
"Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.9/dist-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (4.8.0)\n",
"Requirement already satisfied: decorator in /usr/local/lib/python3.9/dist-packages (from ipython>=5.0.0->ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (4.4.2)\n",
"Collecting jedi>=0.16\n",
" Downloading jedi-0.18.2-py2.py3-none-any.whl (1.6 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m82.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: fastjsonschema in /usr/local/lib/python3.9/dist-packages (from nbformat>=5.1->nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (2.16.3)\n",
"Collecting pbr!=2.1.0,>=2.0.0\n",
" Downloading pbr-5.11.1-py2.py3-none-any.whl (112 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m112.7/112.7 kB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: ptyprocess in /usr/local/lib/python3.9/dist-packages (from terminado>=0.8.3->notebook->jupyter->flaml[blendsearch,notebook,ray]) (0.7.0)\n",
"Requirement already satisfied: argon2-cffi-bindings in /usr/local/lib/python3.9/dist-packages (from argon2-cffi->notebook->jupyter->flaml[blendsearch,notebook,ray]) (21.2.0)\n",
"Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.9/dist-packages (from beautifulsoup4->nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (2.4)\n",
"Requirement already satisfied: webencodings in /usr/local/lib/python3.9/dist-packages (from bleach->nbconvert->jupyter->flaml[blendsearch,notebook,ray]) (0.5.1)\n",
"Requirement already satisfied: parso<0.9.0,>=0.8.0 in /usr/local/lib/python3.9/dist-packages (from jedi>=0.16->ipython>=5.0.0->ipykernel->jupyter->flaml[blendsearch,notebook,ray]) (0.8.3)\n",
"Requirement already satisfied: cffi>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from argon2-cffi-bindings->argon2-cffi->notebook->jupyter->flaml[blendsearch,notebook,ray]) (1.15.1)\n",
"Requirement already satisfied: pycparser in /usr/local/lib/python3.9/dist-packages (from cffi>=1.0.1->argon2-cffi-bindings->argon2-cffi->notebook->jupyter->flaml[blendsearch,notebook,ray]) (2.21)\n",
"Building wheels for collected packages: openml, liac-arff, pyperclip\n",
" Building wheel for openml (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for openml: filename=openml-0.10.2-py3-none-any.whl size=190321 sha256=6384a6a98dcf21a054e2457f2a12e83e7f09122e873ed8dab894d7a4649b869b\n",
" Stored in directory: /root/.cache/pip/wheels/90/70/b9/37e0bd30dd46291f37d970e2032d557d7eb36b6ccabe47419c\n",
" Building wheel for liac-arff (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for liac-arff: filename=liac_arff-2.5.0-py3-none-any.whl size=11732 sha256=45f0543f0ec70558329ca4338de37f0feb6b093e730eed20921f38040916fbf3\n",
" Stored in directory: /root/.cache/pip/wheels/08/82/8b/5c514221984e88c059b94e36a71d4722e590acaae04deab22e\n",
" Building wheel for pyperclip (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for pyperclip: filename=pyperclip-1.8.2-py3-none-any.whl size=11135 sha256=b59846b5e39f6f668d74e06e57b7ceaded7c46beffc70dc391b71c02c6425afb\n",
" Stored in directory: /root/.cache/pip/wheels/0c/09/9e/49e21a6840ef7955b06d47394afef0058f0378c0914e48b8b8\n",
"Successfully built openml liac-arff pyperclip\n",
"Installing collected packages: pyperclip, distlib, xmltodict, virtualenv, tensorboardX, qtpy, pbr, Mako, liac-arff, jedi, grpcio, frozenlist, colorlog, cmd2, cmaes, click, autopage, stevedore, alembic, aiosignal, ray, openml, cliff, qtconsole, optuna, flaml, jupyter\n",
" Attempting uninstall: grpcio\n",
" Found existing installation: grpcio 1.53.0\n",
" Uninstalling grpcio-1.53.0:\n",
" Successfully uninstalled grpcio-1.53.0\n",
" Attempting uninstall: click\n",
" Found existing installation: click 8.1.3\n",
" Uninstalling click-8.1.3:\n",
" Successfully uninstalled click-8.1.3\n",
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
"tensorboard 2.12.1 requires grpcio>=1.48.2, but you have grpcio 1.43.0 which is incompatible.\n",
"grpcio-status 1.48.2 requires grpcio>=1.48.2, but you have grpcio 1.43.0 which is incompatible.\n",
"google-cloud-bigquery 3.9.0 requires grpcio<2.0dev,>=1.47.0, but you have grpcio 1.43.0 which is incompatible.\u001b[0m\u001b[31m\n",
"\u001b[0mSuccessfully installed Mako-1.2.4 aiosignal-1.3.1 alembic-1.10.3 autopage-0.5.1 click-8.0.4 cliff-4.2.0 cmaes-0.9.1 cmd2-2.4.3 colorlog-6.7.0 distlib-0.3.6 flaml-1.2.0 frozenlist-1.3.3 grpcio-1.43.0 jedi-0.18.2 jupyter-1.0.0 liac-arff-2.5.0 openml-0.10.2 optuna-2.8.0 pbr-5.11.1 pyperclip-1.8.2 qtconsole-5.4.2 qtpy-2.3.1 ray-1.13.0 stevedore-5.0.0 tensorboardX-2.6 virtualenv-20.21.0 xmltodict-0.13.0\n"
]
},
{
"data": {
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
},
"text/plain": [
"'1.2.0'"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%pip install flaml[automl,hf,blendsearch]\n",
"import flaml\n",
"flaml.__version__"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "lo1id59ntQX_",
"outputId": "692c860d-d498-48f5-d983-f2d850f64bbb"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
"Collecting transformers\n",
" Downloading transformers-4.27.4-py3-none-any.whl (6.8 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m67.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting huggingface-hub<1.0,>=0.11.0\n",
" Downloading huggingface_hub-0.13.4-py3-none-any.whl (200 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m200.1/200.1 kB\u001b[0m \u001b[31m11.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.9/dist-packages (from transformers) (1.22.4)\n",
"Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from transformers) (2.27.1)\n",
"Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.9/dist-packages (from transformers) (2022.10.31)\n",
"Collecting tokenizers!=0.11.3,<0.14,>=0.11.1\n",
" Downloading tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (7.8 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.8/7.8 MB\u001b[0m \u001b[31m65.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.9/dist-packages (from transformers) (6.0)\n",
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from transformers) (23.0)\n",
"Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from transformers) (3.11.0)\n",
"Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.9/dist-packages (from transformers) (4.65.0)\n",
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.9/dist-packages (from huggingface-hub<1.0,>=0.11.0->transformers) (4.5.0)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->transformers) (3.4)\n",
"Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->transformers) (2.0.12)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->transformers) (2022.12.7)\n",
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->transformers) (1.26.15)\n",
"Installing collected packages: tokenizers, huggingface-hub, transformers\n",
"Successfully installed huggingface-hub-0.13.4 tokenizers-0.13.3 transformers-4.27.4\n",
"Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
"Collecting datasets\n",
" Downloading datasets-2.11.0-py3-none-any.whl (468 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m468.7/468.7 kB\u001b[0m \u001b[31m36.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.9/dist-packages (from datasets) (1.5.3)\n",
"Collecting responses<0.19\n",
" Downloading responses-0.18.0-py3-none-any.whl (38 kB)\n",
"Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.9/dist-packages (from datasets) (6.0)\n",
"Collecting dill<0.3.7,>=0.3.0\n",
" Downloading dill-0.3.6-py3-none-any.whl (110 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m110.5/110.5 kB\u001b[0m \u001b[31m11.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: pyarrow>=8.0.0 in /usr/local/lib/python3.9/dist-packages (from datasets) (9.0.0)\n",
"Collecting xxhash\n",
" Downloading xxhash-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (212 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m212.2/212.2 kB\u001b[0m \u001b[31m27.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting multiprocess\n",
" Downloading multiprocess-0.70.14-py39-none-any.whl (132 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m132.9/132.9 kB\u001b[0m \u001b[31m20.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: huggingface-hub<1.0.0,>=0.11.0 in /usr/local/lib/python3.9/dist-packages (from datasets) (0.13.4)\n",
"Requirement already satisfied: packaging in /usr/local/lib/python3.9/dist-packages (from datasets) (23.0)\n",
"Collecting aiohttp\n",
" Downloading aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.0 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m58.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.9/dist-packages (from datasets) (1.22.4)\n",
"Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.9/dist-packages (from datasets) (4.65.0)\n",
"Requirement already satisfied: fsspec[http]>=2021.11.1 in /usr/local/lib/python3.9/dist-packages (from datasets) (2023.3.0)\n",
"Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.9/dist-packages (from datasets) (2.27.1)\n",
"Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (1.3.1)\n",
"Collecting async-timeout<5.0,>=4.0.0a3\n",
" Downloading async_timeout-4.0.2-py3-none-any.whl (5.8 kB)\n",
"Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (22.2.0)\n",
"Collecting multidict<7.0,>=4.5\n",
" Downloading multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (114 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m114.2/114.2 kB\u001b[0m \u001b[31m14.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: charset-normalizer<4.0,>=2.0 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (2.0.12)\n",
"Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (1.3.3)\n",
"Collecting yarl<2.0,>=1.0\n",
" Downloading yarl-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (264 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m264.6/264.6 kB\u001b[0m \u001b[31m30.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.9/dist-packages (from huggingface-hub<1.0.0,>=0.11.0->datasets) (4.5.0)\n",
"Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from huggingface-hub<1.0.0,>=0.11.0->datasets) (3.11.0)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests>=2.19.0->datasets) (3.4)\n",
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests>=2.19.0->datasets) (1.26.15)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests>=2.19.0->datasets) (2022.12.7)\n",
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.9/dist-packages (from pandas->datasets) (2022.7.1)\n",
"Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.9/dist-packages (from pandas->datasets) (2.8.2)\n",
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.16.0)\n",
"Installing collected packages: xxhash, multidict, dill, async-timeout, yarl, responses, multiprocess, aiohttp, datasets\n",
"Successfully installed aiohttp-3.8.4 async-timeout-4.0.2 datasets-2.11.0 dill-0.3.6 multidict-6.0.4 multiprocess-0.70.14 responses-0.18.0 xxhash-3.2.0 yarl-1.8.2\n",
"Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
"Collecting rouge_score\n",
" Downloading rouge_score-0.1.2.tar.gz (17 kB)\n",
" Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
"Requirement already satisfied: absl-py in /usr/local/lib/python3.9/dist-packages (from rouge_score) (1.4.0)\n",
"Requirement already satisfied: nltk in /usr/local/lib/python3.9/dist-packages (from rouge_score) (3.8.1)\n",
"Requirement already satisfied: numpy in /usr/local/lib/python3.9/dist-packages (from rouge_score) (1.22.4)\n",
"Requirement already satisfied: six>=1.14.0 in /usr/local/lib/python3.9/dist-packages (from rouge_score) (1.16.0)\n",
"Requirement already satisfied: click in /usr/local/lib/python3.9/dist-packages (from nltk->rouge_score) (8.0.4)\n",
"Requirement already satisfied: tqdm in /usr/local/lib/python3.9/dist-packages (from nltk->rouge_score) (4.65.0)\n",
"Requirement already satisfied: joblib in /usr/local/lib/python3.9/dist-packages (from nltk->rouge_score) (1.2.0)\n",
"Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.9/dist-packages (from nltk->rouge_score) (2022.10.31)\n",
"Building wheels for collected packages: rouge_score\n",
" Building wheel for rouge_score (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for rouge_score: filename=rouge_score-0.1.2-py3-none-any.whl size=24954 sha256=4032d06ff03906dbf10b9d7bae49035b4d76498d3b86b286e1472939d2ee09b0\n",
" Stored in directory: /root/.cache/pip/wheels/9b/3d/39/09558097d3119ca0a4d462df68f22c6f3c1b345ac63a09b86e\n",
"Successfully built rouge_score\n",
"Installing collected packages: rouge_score\n",
"Successfully installed rouge_score-0.1.2\n"
]
}
],
"source": [
"%pip install transformers\n",
"%pip install datasets\n",
"%pip install rouge_score"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "j24pfyQktbln",
"outputId": "29aa3747-5597-4528-b82a-95567b9020b9"
},
"outputs": [
{
"data": {
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
},
"text/plain": [
"'4.27.4'"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import transformers\n",
"transformers.__version__"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "efPlAWTdTjhD"
},
"source": [
"Let's run some examples. To use CoLab's built in GPU, you need to select Runtime -> Change runtime type and select GPU. Then you can print the device information using:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2kx9QbI7uaU8",
"outputId": "c9ad909f-a2fe-4d4f-aabd-552c2505f09e"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[<torch.cuda.device object at 0x7fdb76c70fa0>]\n"
]
}
],
"source": [
"import torch\n",
"print([torch.cuda.device(i) for i in range(torch.cuda.device_count())])"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "-yEuLXoHua-f"
},
"source": [
"Note: throughout this notebook, you may see a few ModuleNotFoundErrors. As long as the cell successfully executes, you can ignore that error."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ZBr83DYlTjhD"
},
"source": [
"## 2. Sentiment Classification Example\n",
"### Load data and preprocess\n",
"\n",
"The Stanford Sentiment treebank (SST-2) dataset is a dataset for sentiment classification. First, let's load this dataset into pandas dataframes:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "hGP2eqTBTjhD",
"outputId": "2028b124-d720-49b6-ad8f-7cdf64d3f2bf"
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "9eb9517f746b49c69728f32c8a420816",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading builder script: 0%| | 0.00/28.8k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b9c718fbcb5e4adb80d56be430177143",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading metadata: 0%| | 0.00/28.7k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "cdfd229808b142c5a069d9c6bca79f1e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading readme: 0%| | 0.00/27.9k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Downloading and preparing dataset glue/sst2 to /root/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad...\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e85a6d9656ee4d38bd43e72fd9dc0b6f",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading data: 0%| | 0.00/7.44M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "116e256ce6a6402c825b0c421fcae089",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating train split: 0%| | 0/67349 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "657cc29c3f7345a0aae3b80ab9c698ef",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating validation split: 0%| | 0/872 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "62d3aece24b04df2b5b5e81f05459c1c",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating test split: 0%| | 0/1821 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dataset glue downloaded and prepared to /root/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad. Subsequent calls will reuse this data.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:datasets.builder:Found cached dataset glue (/root/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)\n",
"WARNING:datasets.builder:Found cached dataset glue (/root/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)\n"
]
}
],
"source": [
"from datasets import load_dataset\n",
"\n",
"train_dataset = load_dataset(\"glue\", \"sst2\", split=\"train\").to_pandas()\n",
"dev_dataset = load_dataset(\"glue\", \"sst2\", split=\"validation\").to_pandas()\n",
"test_dataset = load_dataset(\"glue\", \"sst2\", split=\"test\").to_pandas()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Nb7SAWVLTjhE"
},
"source": [
"Take a look at the first 5 examples of this dataset:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "65mLkoJhTjhE",
"outputId": "cde84b57-f647-4b6d-c4f4-aafa7b2b53a3"
},
"outputs": [
{
"data": {
"text/html": [
"\n",
" <div id=\"df-85394a2e-60f7-4c0f-ac29-8c8464a759ae\">\n",
" <div class=\"colab-df-container\">\n",
" <div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>sentence</th>\n",
" <th>label</th>\n",
" <th>idx</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>hide new secretions from the parental units</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>contains no wit , only labored gags</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>that loves its characters and communicates som...</td>\n",
" <td>1</td>\n",
" <td>2</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>remains utterly satisfied to remain the same t...</td>\n",
" <td>0</td>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>on the worst revenge-of-the-nerds clichés the ...</td>\n",
" <td>0</td>\n",
" <td>4</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>\n",
" <button class=\"colab-df-convert\" onclick=\"convertToInteractive('df-85394a2e-60f7-4c0f-ac29-8c8464a759ae')\"\n",
" title=\"Convert this dataframe to an interactive table.\"\n",
" style=\"display:none;\">\n",
" \n",
" <svg xmlns=\"http://www.w3.org/2000/svg\" height=\"24px\"viewBox=\"0 0 24 24\"\n",
" width=\"24px\">\n",
" <path d=\"M0 0h24v24H0V0z\" fill=\"none\"/>\n",
" <path d=\"M18.56 5.44l.94 2.06.94-2.06 2.06-.94-2.06-.94-.94-2.06-.94 2.06-2.06.94zm-11 1L8.5 8.5l.94-2.06 2.06-.94-2.06-.94L8.5 2.5l-.94 2.06-2.06.94zm10 10l.94 2.06.94-2.06 2.06-.94-2.06-.94-.94-2.06-.94 2.06-2.06.94z\"/><path d=\"M17.41 7.96l-1.37-1.37c-.4-.4-.92-.59-1.43-.59-.52 0-1.04.2-1.43.59L10.3 9.45l-7.72 7.72c-.78.78-.78 2.05 0 2.83L4 21.41c.39.39.9.59 1.41.59.51 0 1.02-.2 1.41-.59l7.78-7.78 2.81-2.81c.8-.78.8-2.07 0-2.86zM5.41 20L4 18.59l7.72-7.72 1.47 1.35L5.41 20z\"/>\n",
" </svg>\n",
" </button>\n",
" \n",
" <style>\n",
" .colab-df-container {\n",
" display:flex;\n",
" flex-wrap:wrap;\n",
" gap: 12px;\n",
" }\n",
"\n",
" .colab-df-convert {\n",
" background-color: #E8F0FE;\n",
" border: none;\n",
" border-radius: 50%;\n",
" cursor: pointer;\n",
" display: none;\n",
" fill: #1967D2;\n",
" height: 32px;\n",
" padding: 0 0 0 0;\n",
" width: 32px;\n",
" }\n",
"\n",
" .colab-df-convert:hover {\n",
" background-color: #E2EBFA;\n",
" box-shadow: 0px 1px 2px rgba(60, 64, 67, 0.3), 0px 1px 3px 1px rgba(60, 64, 67, 0.15);\n",
" fill: #174EA6;\n",
" }\n",
"\n",
" [theme=dark] .colab-df-convert {\n",
" background-color: #3B4455;\n",
" fill: #D2E3FC;\n",
" }\n",
"\n",
" [theme=dark] .colab-df-convert:hover {\n",
" background-color: #434B5C;\n",
" box-shadow: 0px 1px 3px 1px rgba(0, 0, 0, 0.15);\n",
" filter: drop-shadow(0px 1px 2px rgba(0, 0, 0, 0.3));\n",
" fill: #FFFFFF;\n",
" }\n",
" </style>\n",
"\n",
" <script>\n",
" const buttonEl =\n",
" document.querySelector('#df-85394a2e-60f7-4c0f-ac29-8c8464a759ae button.colab-df-convert');\n",
" buttonEl.style.display =\n",
" google.colab.kernel.accessAllowed ? 'block' : 'none';\n",
"\n",
" async function convertToInteractive(key) {\n",
" const element = document.querySelector('#df-85394a2e-60f7-4c0f-ac29-8c8464a759ae');\n",
" const dataTable =\n",
" await google.colab.kernel.invokeFunction('convertToInteractive',\n",
" [key], {});\n",
" if (!dataTable) return;\n",
"\n",
" const docLinkHtml = 'Like what you see? Visit the ' +\n",
" '<a target=\"_blank\" href=https://colab.research.google.com/notebooks/data_table.ipynb>data table notebook</a>'\n",
" + ' to learn more about interactive tables.';\n",
" element.innerHTML = '';\n",
" dataTable['output_type'] = 'display_data';\n",
" await google.colab.output.renderOutput(dataTable, element);\n",
" const docLink = document.createElement('div');\n",
" docLink.innerHTML = docLinkHtml;\n",
" element.appendChild(docLink);\n",
" }\n",
" </script>\n",
" </div>\n",
" </div>\n",
" "
],
"text/plain": [
" sentence label idx\n",
"0 hide new secretions from the parental units 0 0\n",
"1 contains no wit , only labored gags 0 1\n",
"2 that loves its characters and communicates som... 1 2\n",
"3 remains utterly satisfied to remain the same t... 0 3\n",
"4 on the worst revenge-of-the-nerds clichés the ... 0 4"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_dataset.head(5)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ENcUQbOgTjhE"
},
"source": [
"Separate the data into X and y:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "GA0VH9URTjhF"
},
"outputs": [],
"source": [
"custom_sent_keys = [\"sentence\"] # specify the column names of the input sentences\n",
"label_key = \"label\" # specify the column name of the label\n",
"\n",
"X_train, y_train = train_dataset[custom_sent_keys], train_dataset[label_key]\n",
"X_val, y_val = dev_dataset[custom_sent_keys], dev_dataset[label_key]\n",
"X_test = test_dataset[custom_sent_keys]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "NpRqB153TjhF"
},
"source": [
"### Run FLAML"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "2kXabqxZuzQl"
},
"source": [
"Now we can run AutoML with FLAML:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "asYbkzrXTjhF"
},
"outputs": [],
"source": [
"from flaml import AutoML\n",
"automl = AutoML()\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "2XZmrBRru_A0"
},
"source": [
"Let's run FLAML for 30 mins. Here we use Electra's [small model](https://huggingface.co/google/electra-small-discriminator) for the tuning. We set gpu_per_trial to 1, and n_concurrent_trials to 1 (the number of trials running at the same time). Make sure gpu_per_trial * n_concurrent_trials does not exceed the GPU number you have. While running you can observe the resource usage (including the GPU) on the right. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "QEvR2bZiTjhG"
},
"outputs": [],
"source": [
"MAX_ITER=20\n",
"automl_settings = {\n",
" \"max_iter\": MAX_ITER, # setting the time budget\n",
" \"task\": \"seq-classification\", # setting the task as seq-classification\n",
" \"fit_kwargs_by_estimator\": {\n",
" \"transformer\": {\n",
" \"output_dir\": \"data/output/\", # setting the output directory\n",
" \"model_path\": \"google/electra-small-discriminator\", # if model_path is not set, the default model is facebook/muppet-roberta-base: https://huggingface.co/facebook/muppet-roberta-base\n",
" }\n",
" },\n",
" \"gpu_per_trial\": 1, # using 1 GPU for each trial\n",
" \"log_file_name\": \"seqclass.log\", # set the file to save the log for HPO\n",
" \"log_type\": \"all\", # the log type for trials: \"all\" if logging all the trials, \"better\" if only keeping the better trials\n",
" \"use_ray\": False, # If parallel tuning, set \"use_ray\" to {\"local_dir\": \"data/output/\"}\n",
" \"n_concurrent_trials\": 1, # How many trials to run at the same time, n_concurrent_trials * gpu_per_trial must not exceed the total number of GPUs\n",
" \"keep_search_state\": True, # keeping the search state\n",
" # \"fp16\": False # whether to use fp16, this option is True by default. \n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "EXjF65hOTjhG",
"outputId": "b7c524a1-3da1-49ae-caf2-9aec208ffc69"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 02:51:07] {1768} INFO - task = seq-classification\n",
"[flaml.automl.logger: 04-12 02:51:07] {1775} INFO - Data split method: stratified\n",
"[flaml.automl.logger: 04-12 02:51:07] {1778} INFO - Evaluation method: holdout\n",
"[flaml.automl.logger: 04-12 02:51:07] {1891} INFO - Minimizing error metric: 1-accuracy\n",
"[flaml.automl.logger: 04-12 02:51:07] {2011} INFO - List of ML learners in AutoML Run: ['transformer']\n",
"[flaml.automl.logger: 04-12 02:51:07] {2341} INFO - iteration 0, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/flaml/automl/data.py:297: SettingWithCopyWarning: \n",
"A value is trying to be set on a copy of a slice from a DataFrame.\n",
"Try using .loc[row_indexer,col_indexer] = value instead\n",
"\n",
"See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
" X[str_columns] = X[str_columns].astype(\"string\")\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "9c7c478356f54c8d915d64dba5fa4f7e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)okenizer_config.json: 0%| | 0.00/29.0 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "ce0ce8336e9d4cd285ef4d8e25d1a632",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)lve/main/config.json: 0%| | 0.00/665 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "fa566ddeb2a34d448247684f6f181ec5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)solve/main/vocab.txt: 0%| | 0.00/232k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "0f8913c46e30455f882cc1ed771cf08f",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)/main/tokenizer.json: 0%| | 0.00/466k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "376203903043482f96bc749665168627",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading pytorch_model.bin: 0%| | 0.00/54.2M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.5731, 'learning_rate': 4.6751863684771026e-06, 'epoch': 1.6}\n",
"{'eval_loss': 0.43649888038635254, 'eval_automl_metric': 0.1754587155963303, 'eval_runtime': 13.2765, 'eval_samples_per_second': 65.68, 'eval_steps_per_second': 65.68, 'epoch': 2.0}\n",
"{'eval_loss': 0.4060048460960388, 'eval_automl_metric': 0.16284403669724767, 'eval_runtime': 14.9968, 'eval_samples_per_second': 58.146, 'eval_steps_per_second': 58.146, 'epoch': 3.0}\n",
"{'train_runtime': 97.6411, 'train_samples_per_second': 307.248, 'train_steps_per_second': 9.617, 'train_loss': 0.4901065034226488, 'epoch': 3.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 02:53:14] {2479} INFO - Estimated sufficient time budget=67349s. Estimated necessary time budget=67s.\n",
"[flaml.automl.logger: 04-12 02:53:14] {2526} INFO - at 127.6s,\testimator transformer's best error=0.1628,\tbest estimator transformer's best error=0.1628\n",
"[flaml.automl.logger: 04-12 02:53:14] {2341} INFO - iteration 1, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.4873640537261963, 'eval_automl_metric': 0.18463302752293576, 'eval_runtime': 13.2764, 'eval_samples_per_second': 65.681, 'eval_steps_per_second': 65.681, 'epoch': 2.0}\n",
"{'eval_loss': 0.4638785123825073, 'eval_automl_metric': 0.18119266055045868, 'eval_runtime': 13.1414, 'eval_samples_per_second': 66.355, 'eval_steps_per_second': 66.355, 'epoch': 3.0}\n",
"{'train_runtime': 71.0618, 'train_samples_per_second': 422.168, 'train_steps_per_second': 6.628, 'train_loss': 0.5612566192691746, 'epoch': 3.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 02:54:43] {2526} INFO - at 217.2s,\testimator transformer's best error=0.1628,\tbest estimator transformer's best error=0.1628\n",
"[flaml.automl.logger: 04-12 02:54:43] {2341} INFO - iteration 2, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.5903, 'learning_rate': 7.550901222797876e-06, 'epoch': 0.8}\n",
"{'loss': 0.3877, 'learning_rate': 4.805118959962285e-06, 'epoch': 1.6}\n",
"{'eval_loss': 0.36965879797935486, 'eval_automl_metric': 0.1513761467889908, 'eval_runtime': 13.4161, 'eval_samples_per_second': 64.996, 'eval_steps_per_second': 64.996, 'epoch': 2.0}\n",
"{'loss': 0.3432, 'learning_rate': 2.0593366971266936e-06, 'epoch': 2.4}\n",
"{'eval_loss': 0.371982604265213, 'eval_automl_metric': 0.1513761467889908, 'eval_runtime': 13.2983, 'eval_samples_per_second': 65.572, 'eval_steps_per_second': 65.572, 'epoch': 3.0}\n",
"{'train_runtime': 135.4608, 'train_samples_per_second': 221.466, 'train_steps_per_second': 13.842, 'train_loss': 0.41677737223307293, 'epoch': 3.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 02:57:20] {2526} INFO - at 373.7s,\testimator transformer's best error=0.1514,\tbest estimator transformer's best error=0.1514\n",
"[flaml.automl.logger: 04-12 02:57:20] {2341} INFO - iteration 3, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.5324, 'learning_rate': 8.879996750213199e-06, 'epoch': 0.8}\n",
"{'eval_loss': 0.383835107088089, 'eval_automl_metric': 0.15366972477064222, 'eval_runtime': 12.1911, 'eval_samples_per_second': 71.528, 'eval_steps_per_second': 71.528, 'epoch': 1.0}\n",
"{'loss': 0.3629, 'learning_rate': 2.959998916737733e-06, 'epoch': 1.6}\n",
"{'eval_loss': 0.3726535737514496, 'eval_automl_metric': 0.14678899082568808, 'eval_runtime': 12.3158, 'eval_samples_per_second': 70.803, 'eval_steps_per_second': 70.803, 'epoch': 2.0}\n",
"{'train_runtime': 97.7012, 'train_samples_per_second': 204.706, 'train_steps_per_second': 12.794, 'train_loss': 0.4241449279785156, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 02:59:19] {2526} INFO - at 493.1s,\testimator transformer's best error=0.1468,\tbest estimator transformer's best error=0.1468\n",
"[flaml.automl.logger: 04-12 02:59:19] {2341} INFO - iteration 4, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.505, 'learning_rate': 1.543094173639824e-05, 'epoch': 0.8}\n",
"{'eval_loss': 0.3837029039859772, 'eval_automl_metric': 0.16284403669724767, 'eval_runtime': 12.1657, 'eval_samples_per_second': 71.677, 'eval_steps_per_second': 71.677, 'epoch': 1.0}\n",
"{'loss': 0.334, 'learning_rate': 5.14364724546608e-06, 'epoch': 1.6}\n",
"{'eval_loss': 0.35917285084724426, 'eval_automl_metric': 0.14220183486238536, 'eval_runtime': 11.0611, 'eval_samples_per_second': 78.835, 'eval_steps_per_second': 78.835, 'epoch': 2.0}\n",
"{'train_runtime': 98.0782, 'train_samples_per_second': 203.919, 'train_steps_per_second': 12.745, 'train_loss': 0.3914005249023437, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:01:16] {2526} INFO - at 610.1s,\testimator transformer's best error=0.1422,\tbest estimator transformer's best error=0.1422\n",
"[flaml.automl.logger: 04-12 03:01:16] {2341} INFO - iteration 5, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.5324, 'learning_rate': 8.879996750213194e-06, 'epoch': 0.8}\n",
"{'eval_loss': 0.383835107088089, 'eval_automl_metric': 0.15366972477064222, 'eval_runtime': 12.2373, 'eval_samples_per_second': 71.257, 'eval_steps_per_second': 71.257, 'epoch': 1.0}\n",
"{'loss': 0.3629, 'learning_rate': 2.9599989167377317e-06, 'epoch': 1.6}\n",
"{'eval_loss': 0.3726535737514496, 'eval_automl_metric': 0.14678899082568808, 'eval_runtime': 15.0923, 'eval_samples_per_second': 57.778, 'eval_steps_per_second': 57.778, 'epoch': 2.0}\n",
"{'train_runtime': 96.9835, 'train_samples_per_second': 206.221, 'train_steps_per_second': 12.889, 'train_loss': 0.4241449279785156, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:03:13] {2526} INFO - at 726.6s,\testimator transformer's best error=0.1422,\tbest estimator transformer's best error=0.1422\n",
"[flaml.automl.logger: 04-12 03:03:13] {2341} INFO - iteration 6, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4443, 'learning_rate': 3.3201834726658944e-05, 'epoch': 0.8}\n",
"{'eval_loss': 0.36218205094337463, 'eval_automl_metric': 0.14678899082568808, 'eval_runtime': 15.8225, 'eval_samples_per_second': 55.111, 'eval_steps_per_second': 55.111, 'epoch': 1.0}\n",
"{'loss': 0.2946, 'learning_rate': 1.106727824221965e-05, 'epoch': 1.6}\n",
"{'eval_loss': 0.3497363030910492, 'eval_automl_metric': 0.125, 'eval_runtime': 13.5632, 'eval_samples_per_second': 64.292, 'eval_steps_per_second': 64.292, 'epoch': 2.0}\n",
"{'train_runtime': 99.4348, 'train_samples_per_second': 201.137, 'train_steps_per_second': 12.571, 'train_loss': 0.34309757995605467, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:05:13] {2526} INFO - at 846.5s,\testimator transformer's best error=0.1250,\tbest estimator transformer's best error=0.1250\n",
"[flaml.automl.logger: 04-12 03:05:13] {2341} INFO - iteration 7, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.505, 'learning_rate': 1.543094173639823e-05, 'epoch': 0.8}\n",
"{'eval_loss': 0.3837029039859772, 'eval_automl_metric': 0.16284403669724767, 'eval_runtime': 13.2849, 'eval_samples_per_second': 65.639, 'eval_steps_per_second': 65.639, 'epoch': 1.0}\n",
"{'loss': 0.334, 'learning_rate': 5.143647245466077e-06, 'epoch': 1.6}\n",
"{'eval_loss': 0.35917285084724426, 'eval_automl_metric': 0.14220183486238536, 'eval_runtime': 13.3768, 'eval_samples_per_second': 65.187, 'eval_steps_per_second': 65.187, 'epoch': 2.0}\n",
"{'train_runtime': 100.6961, 'train_samples_per_second': 198.617, 'train_steps_per_second': 12.414, 'train_loss': 0.3914005249023437, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:07:15] {2526} INFO - at 969.1s,\testimator transformer's best error=0.1250,\tbest estimator transformer's best error=0.1250\n",
"[flaml.automl.logger: 04-12 03:07:15] {2341} INFO - iteration 8, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4927, 'learning_rate': 4.960961475644055e-05, 'epoch': 0.4}\n",
"{'loss': 0.3649, 'learning_rate': 3.720721106733041e-05, 'epoch': 0.8}\n",
"{'eval_loss': 0.42004191875457764, 'eval_automl_metric': 0.14678899082568808, 'eval_runtime': 12.3779, 'eval_samples_per_second': 70.448, 'eval_steps_per_second': 70.448, 'epoch': 1.0}\n",
"{'loss': 0.3085, 'learning_rate': 2.4804807378220275e-05, 'epoch': 1.2}\n",
"{'loss': 0.2846, 'learning_rate': 1.2402403689110137e-05, 'epoch': 1.6}\n",
"{'loss': 0.2478, 'learning_rate': 0.0, 'epoch': 2.0}\n",
"{'eval_loss': 0.4621019959449768, 'eval_automl_metric': 0.13188073394495414, 'eval_runtime': 15.9671, 'eval_samples_per_second': 54.612, 'eval_steps_per_second': 54.612, 'epoch': 2.0}\n",
"{'train_runtime': 176.8274, 'train_samples_per_second': 113.105, 'train_steps_per_second': 14.138, 'train_loss': 0.3397238067626953, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:10:32] {2526} INFO - at 1166.1s,\testimator transformer's best error=0.1250,\tbest estimator transformer's best error=0.1250\n",
"[flaml.automl.logger: 04-12 03:10:32] {2341} INFO - iteration 9, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.38070234656333923, 'eval_automl_metric': 0.1571100917431193, 'eval_runtime': 13.2262, 'eval_samples_per_second': 65.93, 'eval_steps_per_second': 65.93, 'epoch': 1.0}\n",
"{'loss': 0.3891, 'learning_rate': 9.938984432909045e-06, 'epoch': 1.6}\n",
"{'eval_loss': 0.3584316074848175, 'eval_automl_metric': 0.1490825688073395, 'eval_runtime': 12.9432, 'eval_samples_per_second': 67.371, 'eval_steps_per_second': 67.371, 'epoch': 2.0}\n",
"{'train_runtime': 67.7566, 'train_samples_per_second': 295.174, 'train_steps_per_second': 9.239, 'train_loss': 0.36350049149875824, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:12:00] {2526} INFO - at 1253.6s,\testimator transformer's best error=0.1250,\tbest estimator transformer's best error=0.1250\n",
"[flaml.automl.logger: 04-12 03:12:00] {2341} INFO - iteration 10, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4701, 'learning_rate': 2.245453717937598e-05, 'epoch': 0.8}\n",
"{'eval_loss': 0.333243191242218, 'eval_automl_metric': 0.1330275229357798, 'eval_runtime': 13.3288, 'eval_samples_per_second': 65.422, 'eval_steps_per_second': 65.422, 'epoch': 1.0}\n",
"{'loss': 0.3064, 'learning_rate': 7.484845726458661e-06, 'epoch': 1.6}\n",
"{'eval_loss': 0.3158172369003296, 'eval_automl_metric': 0.12958715596330272, 'eval_runtime': 13.0704, 'eval_samples_per_second': 66.716, 'eval_steps_per_second': 66.716, 'epoch': 2.0}\n",
"{'train_runtime': 100.5144, 'train_samples_per_second': 198.977, 'train_steps_per_second': 12.436, 'train_loss': 0.3659558166503906, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:14:03] {2526} INFO - at 1376.9s,\testimator transformer's best error=0.1250,\tbest estimator transformer's best error=0.1250\n",
"[flaml.automl.logger: 04-12 03:14:03] {2341} INFO - iteration 11, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4463, 'learning_rate': 5.205038223181688e-05, 'epoch': 0.12}\n",
"{'loss': 0.318, 'learning_rate': 4.876437325253551e-05, 'epoch': 0.24}\n",
"{'loss': 0.2922, 'learning_rate': 4.547836427325414e-05, 'epoch': 0.36}\n",
"{'loss': 0.2593, 'learning_rate': 4.219235529397277e-05, 'epoch': 0.48}\n",
"{'loss': 0.2689, 'learning_rate': 3.8906346314691405e-05, 'epoch': 0.59}\n",
"{'loss': 0.2379, 'learning_rate': 3.5620337335410034e-05, 'epoch': 0.71}\n",
"{'loss': 0.2362, 'learning_rate': 3.233432835612866e-05, 'epoch': 0.83}\n",
"{'loss': 0.2216, 'learning_rate': 2.9048319376847296e-05, 'epoch': 0.95}\n",
"{'eval_loss': 0.30998706817626953, 'eval_automl_metric': 0.10550458715596334, 'eval_runtime': 13.4979, 'eval_samples_per_second': 64.603, 'eval_steps_per_second': 64.603, 'epoch': 1.0}\n",
"{'loss': 0.195, 'learning_rate': 2.5762310397565928e-05, 'epoch': 1.07}\n",
"{'loss': 0.1818, 'learning_rate': 2.247630141828456e-05, 'epoch': 1.19}\n",
"{'loss': 0.1702, 'learning_rate': 1.919029243900319e-05, 'epoch': 1.31}\n",
"{'loss': 0.1764, 'learning_rate': 1.5904283459721823e-05, 'epoch': 1.43}\n",
"{'loss': 0.1659, 'learning_rate': 1.2618274480440455e-05, 'epoch': 1.54}\n",
"{'loss': 0.1637, 'learning_rate': 9.332265501159088e-06, 'epoch': 1.66}\n",
"{'loss': 0.1572, 'learning_rate': 6.046256521877719e-06, 'epoch': 1.78}\n",
"{'loss': 0.1504, 'learning_rate': 2.7602475425963495e-06, 'epoch': 1.9}\n",
"{'eval_loss': 0.3402843773365021, 'eval_automl_metric': 0.09977064220183485, 'eval_runtime': 13.5874, 'eval_samples_per_second': 64.177, 'eval_steps_per_second': 64.177, 'epoch': 2.0}\n",
"{'train_runtime': 525.9446, 'train_samples_per_second': 256.107, 'train_steps_per_second': 16.009, 'train_loss': 0.22444996686559393, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:23:26] {2526} INFO - at 1939.6s,\testimator transformer's best error=0.0998,\tbest estimator transformer's best error=0.0998\n",
"[flaml.automl.logger: 04-12 03:23:26] {2341} INFO - iteration 12, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4977, 'learning_rate': 2.4827832220147736e-05, 'epoch': 0.12}\n",
"{'loss': 0.3577, 'learning_rate': 2.3260418569885886e-05, 'epoch': 0.24}\n",
"{'loss': 0.298, 'learning_rate': 2.169300491962403e-05, 'epoch': 0.36}\n",
"{'loss': 0.2778, 'learning_rate': 2.0125591269362177e-05, 'epoch': 0.48}\n",
"{'loss': 0.2768, 'learning_rate': 1.8558177619100327e-05, 'epoch': 0.59}\n",
"{'loss': 0.2563, 'learning_rate': 1.6990763968838476e-05, 'epoch': 0.71}\n",
"{'loss': 0.2446, 'learning_rate': 1.5423350318576622e-05, 'epoch': 0.83}\n",
"{'loss': 0.2298, 'learning_rate': 1.3855936668314771e-05, 'epoch': 0.95}\n",
"{'eval_loss': 0.24045641720294952, 'eval_automl_metric': 0.08371559633027525, 'eval_runtime': 13.5012, 'eval_samples_per_second': 64.587, 'eval_steps_per_second': 64.587, 'epoch': 1.0}\n",
"{'loss': 0.2207, 'learning_rate': 1.2288523018052919e-05, 'epoch': 1.07}\n",
"{'loss': 0.1969, 'learning_rate': 1.0721109367791068e-05, 'epoch': 1.19}\n",
"{'loss': 0.1994, 'learning_rate': 9.153695717529216e-06, 'epoch': 1.31}\n",
"{'loss': 0.1888, 'learning_rate': 7.586282067267364e-06, 'epoch': 1.43}\n",
"{'loss': 0.1809, 'learning_rate': 6.018868417005512e-06, 'epoch': 1.54}\n",
"{'loss': 0.1851, 'learning_rate': 4.4514547667436594e-06, 'epoch': 1.66}\n",
"{'loss': 0.1847, 'learning_rate': 2.8840411164818075e-06, 'epoch': 1.78}\n",
"{'loss': 0.185, 'learning_rate': 1.3166274662199555e-06, 'epoch': 1.9}\n",
"{'eval_loss': 0.31772467494010925, 'eval_automl_metric': 0.0905963302752294, 'eval_runtime': 15.9416, 'eval_samples_per_second': 54.7, 'eval_steps_per_second': 54.7, 'epoch': 2.0}\n",
"{'train_runtime': 528.6445, 'train_samples_per_second': 254.799, 'train_steps_per_second': 15.928, 'train_loss': 0.24481408669659757, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:32:50] {2526} INFO - at 2503.4s,\testimator transformer's best error=0.0837,\tbest estimator transformer's best error=0.0837\n",
"[flaml.automl.logger: 04-12 03:32:50] {2341} INFO - iteration 13, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4506, 'learning_rate': 1.9036634605968542e-05, 'epoch': 0.24}\n",
"{'loss': 0.3162, 'learning_rate': 1.6471050427266582e-05, 'epoch': 0.48}\n",
"{'loss': 0.283, 'learning_rate': 1.3905466248564623e-05, 'epoch': 0.71}\n",
"{'loss': 0.2499, 'learning_rate': 1.1339882069862663e-05, 'epoch': 0.95}\n",
"{'eval_loss': 0.2961079776287079, 'eval_automl_metric': 0.12041284403669728, 'eval_runtime': 11.8342, 'eval_samples_per_second': 73.685, 'eval_steps_per_second': 73.685, 'epoch': 1.0}\n",
"{'loss': 0.2234, 'learning_rate': 8.774297891160704e-06, 'epoch': 1.19}\n",
"{'loss': 0.2074, 'learning_rate': 6.208713712458743e-06, 'epoch': 1.43}\n",
"{'loss': 0.1995, 'learning_rate': 3.6431295337567835e-06, 'epoch': 1.66}\n",
"{'loss': 0.2037, 'learning_rate': 1.077545355054823e-06, 'epoch': 1.9}\n",
"{'eval_loss': 0.2587985694408417, 'eval_automl_metric': 0.09174311926605505, 'eval_runtime': 13.0236, 'eval_samples_per_second': 66.955, 'eval_steps_per_second': 66.955, 'epoch': 2.0}\n",
"{'train_runtime': 303.3583, 'train_samples_per_second': 444.023, 'train_steps_per_second': 13.878, 'train_loss': 0.2629204466903578, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:38:27] {2526} INFO - at 2840.5s,\testimator transformer's best error=0.0837,\tbest estimator transformer's best error=0.0837\n",
"[flaml.automl.logger: 04-12 03:38:27] {2341} INFO - iteration 14, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.5133, 'learning_rate': 3.1294026779419606e-05, 'epoch': 0.06}\n",
"{'loss': 0.3967, 'learning_rate': 3.0336319998313606e-05, 'epoch': 0.12}\n",
"{'loss': 0.3755, 'learning_rate': 2.9378613217207605e-05, 'epoch': 0.18}\n",
"{'loss': 0.3482, 'learning_rate': 2.842090643610161e-05, 'epoch': 0.24}\n",
"{'loss': 0.3325, 'learning_rate': 2.7463199654995608e-05, 'epoch': 0.3}\n",
"{'loss': 0.3393, 'learning_rate': 2.6505492873889615e-05, 'epoch': 0.36}\n",
"{'loss': 0.2998, 'learning_rate': 2.5547786092783614e-05, 'epoch': 0.42}\n",
"{'loss': 0.3135, 'learning_rate': 2.4590079311677617e-05, 'epoch': 0.48}\n",
"{'loss': 0.3285, 'learning_rate': 2.3632372530571617e-05, 'epoch': 0.53}\n",
"{'loss': 0.3231, 'learning_rate': 2.267466574946562e-05, 'epoch': 0.59}\n",
"{'loss': 0.2747, 'learning_rate': 2.171695896835962e-05, 'epoch': 0.65}\n",
"{'loss': 0.3021, 'learning_rate': 2.0759252187253623e-05, 'epoch': 0.71}\n",
"{'loss': 0.3086, 'learning_rate': 1.9801545406147622e-05, 'epoch': 0.77}\n",
"{'loss': 0.2598, 'learning_rate': 1.8843838625041625e-05, 'epoch': 0.83}\n",
"{'loss': 0.2682, 'learning_rate': 1.7886131843935625e-05, 'epoch': 0.89}\n",
"{'loss': 0.2836, 'learning_rate': 1.6928425062829628e-05, 'epoch': 0.95}\n",
"{'eval_loss': 0.3779904544353485, 'eval_automl_metric': 0.0986238532110092, 'eval_runtime': 12.4545, 'eval_samples_per_second': 70.015, 'eval_steps_per_second': 70.015, 'epoch': 1.0}\n",
"{'loss': 0.2727, 'learning_rate': 1.5970718281723628e-05, 'epoch': 1.01}\n",
"{'loss': 0.2187, 'learning_rate': 1.5013011500617631e-05, 'epoch': 1.07}\n",
"{'loss': 0.2318, 'learning_rate': 1.4055304719511632e-05, 'epoch': 1.13}\n",
"{'loss': 0.2257, 'learning_rate': 1.3097597938405634e-05, 'epoch': 1.19}\n",
"{'loss': 0.2046, 'learning_rate': 1.2139891157299637e-05, 'epoch': 1.25}\n",
"{'loss': 0.2128, 'learning_rate': 1.1182184376193638e-05, 'epoch': 1.31}\n",
"{'loss': 0.2342, 'learning_rate': 1.022447759508764e-05, 'epoch': 1.37}\n",
"{'loss': 0.2161, 'learning_rate': 9.26677081398164e-06, 'epoch': 1.43}\n",
"{'loss': 0.2224, 'learning_rate': 8.309064032875642e-06, 'epoch': 1.48}\n",
"{'loss': 0.2136, 'learning_rate': 7.351357251769644e-06, 'epoch': 1.54}\n",
"{'loss': 0.223, 'learning_rate': 6.393650470663646e-06, 'epoch': 1.6}\n",
"{'loss': 0.2009, 'learning_rate': 5.435943689557647e-06, 'epoch': 1.66}\n",
"{'loss': 0.1951, 'learning_rate': 4.478236908451648e-06, 'epoch': 1.72}\n",
"{'loss': 0.2069, 'learning_rate': 3.52053012734565e-06, 'epoch': 1.78}\n",
"{'loss': 0.188, 'learning_rate': 2.5628233462396515e-06, 'epoch': 1.84}\n",
"{'loss': 0.2092, 'learning_rate': 1.6051165651336533e-06, 'epoch': 1.9}\n",
"{'loss': 0.1991, 'learning_rate': 6.474097840276549e-07, 'epoch': 1.96}\n",
"{'eval_loss': 0.40998855233192444, 'eval_automl_metric': 0.08944954128440363, 'eval_runtime': 13.109, 'eval_samples_per_second': 66.519, 'eval_steps_per_second': 66.519, 'epoch': 2.0}\n",
"{'train_runtime': 1005.387, 'train_samples_per_second': 133.976, 'train_steps_per_second': 16.748, 'train_loss': 0.2697054841113722, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 03:55:51] {2526} INFO - at 3884.8s,\testimator transformer's best error=0.0837,\tbest estimator transformer's best error=0.0837\n",
"[flaml.automl.logger: 04-12 03:55:51] {2341} INFO - iteration 15, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.5456, 'learning_rate': 1.4204334325441244e-05, 'epoch': 0.12}\n",
"{'loss': 0.3467, 'learning_rate': 1.2290003553818434e-05, 'epoch': 0.24}\n",
"{'loss': 0.3331, 'learning_rate': 1.0375672782195626e-05, 'epoch': 0.36}\n",
"{'loss': 0.3057, 'learning_rate': 8.461342010572816e-06, 'epoch': 0.48}\n",
"{'loss': 0.291, 'learning_rate': 6.547011238950007e-06, 'epoch': 0.59}\n",
"{'loss': 0.2872, 'learning_rate': 4.632680467327198e-06, 'epoch': 0.71}\n",
"{'loss': 0.2688, 'learning_rate': 2.718349695704389e-06, 'epoch': 0.83}\n",
"{'loss': 0.2666, 'learning_rate': 8.040189240815798e-07, 'epoch': 0.95}\n",
"{'eval_loss': 0.2697773277759552, 'eval_automl_metric': 0.10206422018348627, 'eval_runtime': 13.1895, 'eval_samples_per_second': 66.113, 'eval_steps_per_second': 66.113, 'epoch': 1.0}\n",
"{'train_runtime': 256.8129, 'train_samples_per_second': 262.249, 'train_steps_per_second': 16.393, 'train_loss': 0.32819755354856367, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 04:00:45] {2526} INFO - at 4178.9s,\testimator transformer's best error=0.0837,\tbest estimator transformer's best error=0.0837\n",
"[flaml.automl.logger: 04-12 04:00:45] {2341} INFO - iteration 16, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4628, 'learning_rate': 4.151258730652502e-05, 'epoch': 0.12}\n",
"{'loss': 0.3132, 'learning_rate': 3.980143366651987e-05, 'epoch': 0.24}\n",
"{'loss': 0.2997, 'learning_rate': 3.8090280026514715e-05, 'epoch': 0.36}\n",
"{'loss': 0.2644, 'learning_rate': 3.637912638650956e-05, 'epoch': 0.48}\n",
"{'loss': 0.2709, 'learning_rate': 3.4667972746504405e-05, 'epoch': 0.59}\n",
"{'loss': 0.2412, 'learning_rate': 3.2956819106499256e-05, 'epoch': 0.71}\n",
"{'loss': 0.2422, 'learning_rate': 3.12456654664941e-05, 'epoch': 0.83}\n",
"{'loss': 0.2258, 'learning_rate': 2.953451182648895e-05, 'epoch': 0.95}\n",
"{'loss': 0.2074, 'learning_rate': 2.7823358186483794e-05, 'epoch': 1.07}\n",
"{'loss': 0.1859, 'learning_rate': 2.6112204546478642e-05, 'epoch': 1.19}\n",
"{'loss': 0.1871, 'learning_rate': 2.4401050906473487e-05, 'epoch': 1.31}\n",
"{'loss': 0.1918, 'learning_rate': 2.268989726646833e-05, 'epoch': 1.43}\n",
"{'loss': 0.1836, 'learning_rate': 2.097874362646318e-05, 'epoch': 1.54}\n",
"{'loss': 0.1776, 'learning_rate': 1.9267589986458028e-05, 'epoch': 1.66}\n",
"{'loss': 0.1691, 'learning_rate': 1.7556436346452876e-05, 'epoch': 1.78}\n",
"{'loss': 0.1602, 'learning_rate': 1.584528270644772e-05, 'epoch': 1.9}\n",
"{'eval_loss': 0.31128454208374023, 'eval_automl_metric': 0.0905963302752294, 'eval_runtime': 13.3839, 'eval_samples_per_second': 65.153, 'eval_steps_per_second': 65.153, 'epoch': 2.0}\n",
"{'loss': 0.1634, 'learning_rate': 1.4134129066442567e-05, 'epoch': 2.02}\n",
"{'loss': 0.1312, 'learning_rate': 1.2422975426437414e-05, 'epoch': 2.14}\n",
"{'loss': 0.1499, 'learning_rate': 1.071182178643226e-05, 'epoch': 2.26}\n",
"{'loss': 0.1355, 'learning_rate': 9.000668146427107e-06, 'epoch': 2.38}\n",
"{'loss': 0.1231, 'learning_rate': 7.289514506421954e-06, 'epoch': 2.49}\n",
"{'loss': 0.1271, 'learning_rate': 5.5783608664167995e-06, 'epoch': 2.61}\n",
"{'loss': 0.1385, 'learning_rate': 3.867207226411647e-06, 'epoch': 2.73}\n",
"{'loss': 0.1383, 'learning_rate': 2.156053586406493e-06, 'epoch': 2.85}\n",
"{'loss': 0.1224, 'learning_rate': 4.4489994640133986e-07, 'epoch': 2.97}\n",
"{'eval_loss': 0.3745174705982208, 'eval_automl_metric': 0.08600917431192656, 'eval_runtime': 13.2675, 'eval_samples_per_second': 65.725, 'eval_steps_per_second': 65.725, 'epoch': 3.0}\n",
"{'train_runtime': 762.9855, 'train_samples_per_second': 264.811, 'train_steps_per_second': 16.553, 'train_loss': 0.19935976180969867, 'epoch': 3.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 04:14:05] {2526} INFO - at 4978.9s,\testimator transformer's best error=0.0837,\tbest estimator transformer's best error=0.0837\n",
"[flaml.automl.logger: 04-12 04:14:05] {2341} INFO - iteration 17, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4976, 'learning_rate': 2.4804708485734894e-05, 'epoch': 0.12}\n",
"{'loss': 0.3505, 'learning_rate': 2.3782255539084654e-05, 'epoch': 0.24}\n",
"{'loss': 0.3027, 'learning_rate': 2.275980259243441e-05, 'epoch': 0.36}\n",
"{'loss': 0.2787, 'learning_rate': 2.1737349645784163e-05, 'epoch': 0.48}\n",
"{'loss': 0.2575, 'learning_rate': 2.0714896699133923e-05, 'epoch': 0.59}\n",
"{'loss': 0.2483, 'learning_rate': 1.969244375248368e-05, 'epoch': 0.71}\n",
"{'loss': 0.2386, 'learning_rate': 1.8669990805833436e-05, 'epoch': 0.83}\n",
"{'loss': 0.2348, 'learning_rate': 1.7647537859183196e-05, 'epoch': 0.95}\n",
"{'loss': 0.2039, 'learning_rate': 1.6625084912532952e-05, 'epoch': 1.07}\n",
"{'loss': 0.1986, 'learning_rate': 1.560263196588271e-05, 'epoch': 1.19}\n",
"{'loss': 0.1961, 'learning_rate': 1.4580179019232463e-05, 'epoch': 1.31}\n",
"{'loss': 0.1901, 'learning_rate': 1.3557726072582221e-05, 'epoch': 1.43}\n",
"{'loss': 0.1992, 'learning_rate': 1.253527312593198e-05, 'epoch': 1.54}\n",
"{'loss': 0.1824, 'learning_rate': 1.1512820179281736e-05, 'epoch': 1.66}\n",
"{'loss': 0.178, 'learning_rate': 1.0490367232631494e-05, 'epoch': 1.78}\n",
"{'loss': 0.174, 'learning_rate': 9.46791428598125e-06, 'epoch': 1.9}\n",
"{'eval_loss': 0.3750886917114258, 'eval_automl_metric': 0.10206422018348627, 'eval_runtime': 13.3143, 'eval_samples_per_second': 65.493, 'eval_steps_per_second': 65.493, 'epoch': 2.0}\n",
"{'loss': 0.1663, 'learning_rate': 8.445461339331006e-06, 'epoch': 2.02}\n",
"{'loss': 0.1491, 'learning_rate': 7.4230083926807645e-06, 'epoch': 2.14}\n",
"{'loss': 0.1528, 'learning_rate': 6.400555446030521e-06, 'epoch': 2.26}\n",
"{'loss': 0.1535, 'learning_rate': 5.378102499380278e-06, 'epoch': 2.38}\n",
"{'loss': 0.1493, 'learning_rate': 4.355649552730035e-06, 'epoch': 2.49}\n",
"{'loss': 0.1507, 'learning_rate': 3.333196606079792e-06, 'epoch': 2.61}\n",
"{'loss': 0.1568, 'learning_rate': 2.3107436594295493e-06, 'epoch': 2.73}\n",
"{'loss': 0.1524, 'learning_rate': 1.288290712779306e-06, 'epoch': 2.85}\n",
"{'loss': 0.1481, 'learning_rate': 2.658377661290632e-07, 'epoch': 2.97}\n",
"{'eval_loss': 0.37647315859794617, 'eval_automl_metric': 0.09633027522935778, 'eval_runtime': 12.4258, 'eval_samples_per_second': 70.177, 'eval_steps_per_second': 70.177, 'epoch': 3.0}\n",
"{'train_runtime': 762.6853, 'train_samples_per_second': 264.915, 'train_steps_per_second': 16.56, 'train_loss': 0.2117002085476571, 'epoch': 3.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 04:27:21] {2526} INFO - at 5775.1s,\testimator transformer's best error=0.0837,\tbest estimator transformer's best error=0.0837\n",
"[flaml.automl.logger: 04-12 04:27:21] {2341} INFO - iteration 18, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.485, 'learning_rate': 2.377204590632704e-05, 'epoch': 0.12}\n",
"{'loss': 0.3523, 'learning_rate': 2.0568266134584853e-05, 'epoch': 0.24}\n",
"{'loss': 0.3008, 'learning_rate': 1.7364486362842665e-05, 'epoch': 0.36}\n",
"{'loss': 0.281, 'learning_rate': 1.4160706591100473e-05, 'epoch': 0.48}\n",
"{'loss': 0.2656, 'learning_rate': 1.0956926819358287e-05, 'epoch': 0.59}\n",
"{'loss': 0.26, 'learning_rate': 7.753147047616097e-06, 'epoch': 0.71}\n",
"{'loss': 0.245, 'learning_rate': 4.5493672758739085e-06, 'epoch': 0.83}\n",
"{'loss': 0.248, 'learning_rate': 1.3455875041317192e-06, 'epoch': 0.95}\n",
"{'eval_loss': 0.26702097058296204, 'eval_automl_metric': 0.09518348623853212, 'eval_runtime': 12.5875, 'eval_samples_per_second': 69.275, 'eval_steps_per_second': 69.275, 'epoch': 1.0}\n",
"{'train_runtime': 258.5842, 'train_samples_per_second': 260.453, 'train_steps_per_second': 16.281, 'train_loss': 0.3011425933475449, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 04:32:15] {2526} INFO - at 6068.8s,\testimator transformer's best error=0.0837,\tbest estimator transformer's best error=0.0837\n",
"[flaml.automl.logger: 04-12 04:32:15] {2341} INFO - iteration 19, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.4688, 'learning_rate': 2.860547592162196e-05, 'epoch': 0.12}\n",
"{'loss': 0.3323, 'learning_rate': 2.679957466394381e-05, 'epoch': 0.24}\n",
"{'loss': 0.3065, 'learning_rate': 2.4993673406265652e-05, 'epoch': 0.36}\n",
"{'loss': 0.2827, 'learning_rate': 2.3187772148587497e-05, 'epoch': 0.48}\n",
"{'loss': 0.2619, 'learning_rate': 2.1381870890909346e-05, 'epoch': 0.59}\n",
"{'loss': 0.2546, 'learning_rate': 1.957596963323119e-05, 'epoch': 0.71}\n",
"{'loss': 0.2411, 'learning_rate': 1.7770068375553037e-05, 'epoch': 0.83}\n",
"{'loss': 0.2373, 'learning_rate': 1.5964167117874882e-05, 'epoch': 0.95}\n",
"{'eval_loss': 0.2775367200374603, 'eval_automl_metric': 0.09403669724770647, 'eval_runtime': 13.3919, 'eval_samples_per_second': 65.114, 'eval_steps_per_second': 65.114, 'epoch': 1.0}\n",
"{'loss': 0.2111, 'learning_rate': 1.415826586019673e-05, 'epoch': 1.07}\n",
"{'loss': 0.2022, 'learning_rate': 1.2352364602518575e-05, 'epoch': 1.19}\n",
"{'loss': 0.1888, 'learning_rate': 1.054646334484042e-05, 'epoch': 1.31}\n",
"{'loss': 0.1875, 'learning_rate': 8.740562087162267e-06, 'epoch': 1.43}\n",
"{'loss': 0.1873, 'learning_rate': 6.934660829484112e-06, 'epoch': 1.54}\n",
"{'loss': 0.1884, 'learning_rate': 5.128759571805958e-06, 'epoch': 1.66}\n",
"{'loss': 0.1936, 'learning_rate': 3.322858314127804e-06, 'epoch': 1.78}\n",
"{'loss': 0.1865, 'learning_rate': 1.5169570564496495e-06, 'epoch': 1.9}\n",
"{'eval_loss': 0.3135208785533905, 'eval_automl_metric': 0.08715596330275233, 'eval_runtime': 13.6854, 'eval_samples_per_second': 63.718, 'eval_steps_per_second': 63.718, 'epoch': 2.0}\n",
"{'train_runtime': 528.2587, 'train_samples_per_second': 254.985, 'train_steps_per_second': 15.939, 'train_loss': 0.24211964459996893, 'epoch': 2.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 04:41:42] {2526} INFO - at 6635.5s,\testimator transformer's best error=0.0837,\tbest estimator transformer's best error=0.0837\n",
"[flaml.automl.logger: 04-12 04:41:42] {2642} INFO - selected model: None\n",
"[flaml.automl.logger: 04-12 04:41:42] {2041} INFO - fit succeeded\n",
"[flaml.automl.logger: 04-12 04:41:42] {2042} INFO - Time taken to find the best model: 2503.373429775238\n"
]
}
],
"source": [
"'''The main flaml automl API'''\n",
"automl.fit(X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2eE5pLdH4v9M"
},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "Ehn1SDb5xAH9"
},
"source": [
"The run takes 2.5 hours. We can print the best trial's loss, which is 1-the accuracy. The accuracy we got is 90.9% which is close to 91.2% reported by [the Electra model github](https://github.com/google-research/electra). "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "qbTAqBsnTjhG",
"outputId": "53c86a9e-21d1-4237-9ea8-10710c77407c"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The best loss by FLAML: 0.9162844036697247\n"
]
}
],
"source": [
"print(\"The best loss by FLAML: {}\".format(1-automl.best_loss))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "wcO2th5M6AIu"
},
"source": [
"If you have more GPUs on your server, you can use flaml.tune with the ray tune option, which will often give you a better score. For example, with 4x NVIDIA V100 GPU, the accuracy was 92.2% after searching for half an hour. For that experiment, you can open this notebook on your GPU server and set \"use_ray\" to {\"local_dir\": \"data/output/\"} and n_concurrent_trials to more than 1. "
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "QFP5JNdPTjhG"
},
"source": [
"### Best model and metric"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "mY07pTY_xlIV"
},
"source": [
"Next, we can print the best hyperparameter and the best score:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "sbnhP3WrTjhG",
"outputId": "19ed86dc-b08e-4c7b-99dd-fd476b33c257"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Best hyperparmeter config: {'learning_rate': 2.6395245870409587e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 33, 'global_max_steps': 4210}\n",
"Best accuracy on validation data: 0.9163\n",
"Training duration of best run: 563.8 s\n"
]
}
],
"source": [
"'''retrieve best config and best learner'''\n",
"print('Best hyperparmeter config:', automl.best_config)\n",
"print('Best accuracy on validation data: {0:.4g}'.format(1-automl.best_loss))\n",
"print('Training duration of best run: {0:.4g} s'.format(automl.best_config_train_time))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "MqIpmxl0dKWu"
},
"source": [
"Save and load the model:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "gfUNXfcNTBA2"
},
"outputs": [],
"source": [
"import pickle\n",
"automl.pickle(\"automl.pkl\")\n",
"\n",
"with open(\"automl.pkl\", \"rb\") as f:\n",
" automl = pickle.load(f)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6mdBURdexxJS"
},
"source": [
"Run the prediction:\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kRl7pnEKTjhH",
"outputId": "31e08579-cb32-4b8e-a903-ab8026c1107e"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Predicted labels [1 0 1 1 0 1 0 0 1 0 1 0 0 0 0 1 1 1 0 0 0 0 0 1 1 0 0 1 0 0 1 0 1 0 0 0 1\n",
" 0 1 1 1 1 1 1 0 0 0 1 1 0 0 1 1 1 0 1 0 0 0 0 1 0 1 1 1 0 1 1 1 0 0 1 1 0\n",
" 0 1 0 1 1 0 1 0 0 0 1 1 0 1 1 1 1 1 1 0 1 1 0 0 0 0 1 0 1 1 0 0 1 0 0 1 0\n",
" 0 0 0 1 0 1 1 0 0 1 1 1 0 1 1 0 0 1 1 0 0 1 0 0 1 0 0 1 0 0 0 0 1 0 0 1 0\n",
" 0 1 1 1 1 0 1 0 1 0 0 1 0 0 0 0 1 0 0 0 1 1 1 0 0 0 1 1 0 0 0 1 0 0 0 0 0\n",
" 1 1 1 0 0 0 1 1 1 0 1 1 0 1 0 1 1 0 0 1 0 1 1 0 0 1 0 0 0 0 1 1 0 1 1 0 0\n",
" 1 1 1 1 0 1 1 0 1 1 0 0 1 1 1 0 0 1 1 0 0 1 1 1 1 0 0 1 1 0 1 0 0 0 0 0 0\n",
" 1 0 1 0 1 0 0 0 0 0 0 1 0 0 0 1 1 1 1 0 1 1 0 0 1 0 0 1 1 1 1 1 0 1 1 1 1\n",
" 0 1 0 1 1 0 1 1 1 0 1 1 1 1 1 1 0 1 1 0 1 1 0 0 1 0 0 1 0 1 1 1 0 0 0 1 1\n",
" 1 1 0 1 0 0 1 0 1 0 0 1 1 0 0 0 0 0 1 1 1 1 0 0 0 1 1 1 0 1 0 0 0 1 1 0 1\n",
" 0 1 1 0 0 0 0 0 0 0 0 1 0 1 0 0 1 1 0 1 0 0 0 1 1 0 1 1 1 0 1 1 1 0 1 0 1\n",
" 0 0 0 1 1 0 0 1 1 1 1 1 0 0 1 0 0 1 1 1 1 1 0 1 0 0 1 0 1 0 1 1 1 1 0 0 1\n",
" 0 1 0 1 1 1 1 1 0 1 0 1 0 1 1 1 1 0 0 0 0 0 1 1 1 0 1 0 1 1 0 1 0 0 0 1 1\n",
" 1 1 1 1 1 0 0 1 0 0 0 1 0 1 0 1 0 1 1 0 0 0 0 1 1 1 1 1 1 1 1 0 0 1 0 0 0\n",
" 1 1 1 0 0 1 1 1 0 1 1 0 1 0 1 1 1 1 0 1 0 0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 0\n",
" 0 0 1 0 0 0 0 0 1 0 1 0 1 0 1 1 0 0 1 0 0 0 1 0 0 1 0 0 0 0 0 0 0 1 1 0 0\n",
" 0 1 1 0 0 0 0 0 1 0 1 1 1 1 0 1 1 1 0 1 0 1 1 0 1 0 0 0 0 1 1 1 1 1 1 0 1\n",
" 0 1 0 0 0 1 0 1 0 1 0 1 1 0 0 1 0 1 0 1 0 0 0 0 0 0 1 0 0 1 0 1 0 0 1 1 0\n",
" 1 0 0 1 0 1 0 0 1 1 0 1 1 1 1 0 0 1 1 0 1 0 0 1 0 1 0 0 0 1 1 1 1 0 0 0 0\n",
" 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 1 0 1 1 0 1 0 1 1 0 1 1 0 0 0 0 0 0 1 1 1 1\n",
" 1 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 0 1 0 0 0 1 1 0 0 1 1 1 1 0 1 1 1 0 0 0\n",
" 1 1 0 1 0 0 1 1 0 0 0 1 0 0 1 1 1 1 0 1 0 0 1 0 0 0 0 0 0 1 1 1 1 1 0 1 1\n",
" 0 1 1 1 0 0 1 1 0 1 0 1 1 0 0 0 1 0 0 0 1 1 0 1 0 0 0 1 0 0 0 0 1 1 1 0 0\n",
" 0 0 1 0 1 1 1 1 1 1 1 1 0 0 0 1 0 1 0 0 1]\n"
]
}
],
"source": [
"'''compute predictions of testing dataset''' \n",
"y_pred = automl.predict(X_val, **{\"per_device_eval_batch_size\": 1})\n",
"print('Predicted labels', y_pred)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "QThcVssKTjhH"
},
"source": [
"### Log history"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "OEFqWAuLyYIQ"
},
"source": [
"You can also save and plot the history:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "58wpj4vPTjhH",
"outputId": "1e1eb66b-97bc-4875-84d1-37cf5dc5b667"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 9.999999999999999e-06, 'num_train_epochs': 3, 'per_device_train_batch_size': 32, 'seed': 20, 'global_max_steps': 939, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.999999999999999e-06, 'num_train_epochs': 3, 'per_device_train_batch_size': 32, 'seed': 20, 'global_max_steps': 939, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 3, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 471, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.999999999999999e-06, 'num_train_epochs': 3, 'per_device_train_batch_size': 32, 'seed': 20, 'global_max_steps': 939, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 1.0296683485633468e-05, 'num_train_epochs': 3, 'per_device_train_batch_size': 16, 'seed': 26, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 1.0296683485633468e-05, 'num_train_epochs': 3, 'per_device_train_batch_size': 16, 'seed': 26, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 1.4799994583688665e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 25, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 1.4799994583688665e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 25, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 2.57182362273304e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 31, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 2.57182362273304e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 31, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 1.4799994583688658e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 25, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 2.57182362273304e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 31, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 5.5336391211098245e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 29, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 5.5336391211098245e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 29, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 2.5718236227330384e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 31, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 5.5336391211098245e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 29, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 6.201201844555069e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 8, 'seed': 29, 'global_max_steps': 2500, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 5.5336391211098245e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 29, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 4.9379398849214773e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 32, 'seed': 29, 'global_max_steps': 626, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 5.5336391211098245e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 29, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 10000, 'Current Hyper-parameters': {'learning_rate': 3.74242286322933e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 23, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 5.5336391211098245e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 29, 'global_max_steps': 1250, 'FLAML_sample_size': 10000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 67349, 'Current Hyper-parameters': {'learning_rate': 5.5336391211098245e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 29, 'global_max_steps': 8420, 'FLAML_sample_size': 67349}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 5.5336391211098245e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 29, 'global_max_steps': 8420, 'FLAML_sample_size': 67349}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 67349, 'Current Hyper-parameters': {'learning_rate': 2.6395245870409587e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 33, 'global_max_steps': 4210, 'FLAML_sample_size': 67349}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 2.6395245870409587e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 33, 'global_max_steps': 4210, 'FLAML_sample_size': 67349}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 67349, 'Current Hyper-parameters': {'learning_rate': 2.1602218784670503e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 32, 'seed': 37, 'global_max_steps': 4210, 'FLAML_sample_size': 67349}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 2.6395245870409587e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 16, 'seed': 33, 'global_max_steps': 4210, 'FLAML_sample_size': 67349}}\n"
]
}
],
"source": [
"from flaml.automl.data import get_output_from_log\n",
"time_history, best_valid_loss_history, valid_loss_history, config_history, metric_history = \\\n",
" get_output_from_log(filename=automl_settings['log_file_name'], time_budget=3000)\n",
"for config in config_history:\n",
" print(config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "dtWSrLsdTjhH",
"outputId": "db4e1844-dd7f-4923-b8b0-b66473248347"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"14\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAkAAAAHHCAYAAABXx+fLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABP2UlEQVR4nO3deVxU1f8/8NfMKMwoMC6sGqukhrgrhAtqkqBFav7KXEKotEzSwhZNFNES/XyKqEwtU6zINFNLKzFFsEwSBTdCyTWN2NwAUURmzu8Pv8zHEVAGZxjgvp6PxzwecO65d973BvHy3HPPyIQQAkREREQSIjd3AURERET1jQGIiIiIJIcBiIiIiCSHAYiIiIgkhwGIiIiIJIcBiIiIiCSHAYiIiIgkhwGIiIiIJIcBiIiIiCSHAYiIGiU3NzeEhoaauwwiaqQYgIgkbM2aNZDJZDhw4IC5S2l0ysrK8MEHH8DX1xdqtRpKpRIdO3ZEeHg4/vrrL3OXR0T30MzcBRAR1UV2djbkcvP8G+7ChQsICgpCeno6Hn/8cYwfPx5WVlbIzs7GunXr8Nlnn6G8vNwstRFR7TAAEZHZVVRUQKvVwsLCotb7WFpamrCiuwsNDcXBgwfx3XffYcyYMXrbFi5ciDlz5hjlfepyXYiodngLjIjuKScnB8899xwcHBxgaWmJLl26YPXq1Xp9ysvLMW/ePPTu3RtqtRotW7bEwIEDkZycrNfv7NmzkMlkeO+99xAXF4cOHTrA0tISWVlZmD9/PmQyGU6ePInQ0FC0atUKarUaYWFhuHbtmt5x7pwDVHk77/fff0dERATs7OzQsmVLjB49GoWFhXr7arVazJ8/H+3atUOLFi0wZMgQZGVl1Wpe0b59+/DTTz/h+eefrxJ+gFvB7L333tN9P3jwYAwePLhKv9DQULi5ud3zuhw8eBDNmjVDdHR0lWNkZ2dDJpNh6dKlurYrV67g1VdfhbOzMywtLeHp6YklS5ZAq9Xe9byIpIYjQER0V/n5+Xj44Ychk8kQHh4OOzs7bNu2Dc8//zyKi4vx6quvAgCKi4vx+eefY9y4cZg8eTJKSkqwatUqBAYGIi0tDT169NA7bnx8PMrKyjBlyhRYWlqiTZs2um1PP/003N3dERMTg4yMDHz++eewt7fHkiVL7lnvK6+8gtatWyMqKgpnz55FXFwcwsPDsX79el2f2bNn4z//+Q+Cg4MRGBiIw4cPIzAwEGVlZfc8/pYtWwAAzz77bC2unuHuvC5OTk4YNGgQvv32W0RFRen1Xb9+PRQKBZ566ikAwLVr1zBo0CDk5OTgxRdfhIuLC/bu3YvZs2cjNzcXcXFxJqmZqFESRCRZ8fHxAoDYv39/jX2ef/554eTkJC5cuKDX/swzzwi1Wi2uXbsmhBCioqJC3LhxQ6/P5cuXhYODg3juued0bWfOnBEAhI2NjSgoKNDrHxUVJQDo9RdCiNGjR4u2bdvqtbm6uopJkyZVOZeAgACh1Wp17a+99ppQKBTiypUrQggh8vLyRLNmzcSoUaP0jjd//nwBQO+Y1Rk9erQAIC5fvnzXfpUGDRokBg0aVKV90qRJwtXVVff93a7Lp59+KgCIo0eP6rV7eXmJRx55RPf9woULRcuWLcVff/2l12/WrFlCoVCIc+fO1apmIingLTAiqpEQAhs3bkRwcDCEELhw4YLuFRgYiKKiImRkZAAAFAqFbq6KVqvFpUuXUFFRgT59+uj63G7MmDGws7Or9n1feuklve8HDhyIixcvori4+J41T5kyBTKZTG9fjUaDv//+GwCQlJSEiooKvPzyy3r7vfLKK/c8NgBdDdbW1rXqb6jqrsuTTz6JZs2a6Y1iZWZmIisrC2PHjtW1bdiwAQMHDkTr1q31/lsFBARAo9Hg119/NUnNRI0Rb4ERUY0KCwtx5coVfPbZZ/jss8+q7VNQUKD7+osvvsD777+P48eP4+bNm7p2d3f3KvtV11bJxcVF7/vWrVsDAC5fvgwbG5u71ny3fQHogpCnp6devzZt2uj63k3l+5eUlKBVq1b37G+o6q6Lra0thg4dim+//RYLFy4EcOv2V7NmzfDkk0/q+p04cQJHjhypMVje/t+KSOoYgIioRpUTZydOnIhJkyZV26dbt24AgISEBISGhmLUqFF44403YG9vD4VCgZiYGJw6darKfiqVqsb3VSgU1bYLIe5Z8/3sWxudO3cGABw9ehQDBw68Z3+ZTFbte2s0mmr713RdnnnmGYSFheHQoUPo0aMHvv32WwwdOhS2tra6PlqtFo8++ijefPPNao/RsWPHe9ZLJBUMQERUIzs7O1hbW0Oj0SAgIOCufb/77jt4eHhg06ZNereg7py4a26urq4AgJMnT+qNtly8eFE3SnQ3wcHBiImJQUJCQq0CUOvWrXH69Okq7ZUjUbU1atQovPjii7rbYH/99Rdmz56t16dDhw64evXqPf9bEREfgyeiu1AoFBgzZgw2btyIzMzMKttvf7y8cuTl9tGOffv2ITU11fSFGmDo0KFo1qwZli9frtd++6Pkd+Pn54egoCB8/vnn+P7776tsLy8vx+uvv677vkOHDjh+/LjetTp8+DB+//13g+pu1aoVAgMD8e2332LdunWwsLDAqFGj9Po8/fTTSE1Nxfbt26vsf+XKFVRUVBj0nkRNGUeAiAirV69GYmJilfYZM2Zg8eLFSE5Ohq+vLyZPngwvLy9cunQJGRkZ2LlzJy5dugQAePzxx7Fp0yaMHj0ajz32GM6cOYMVK1bAy8sLV69ere9TqpGDgwNmzJiB999/H0888QSCgoJw+PBhbNu2Dba2tnqjVzX58ssvMWzYMDz55JMIDg7G0KFD0bJlS5w4cQLr1q1Dbm6ubi2g5557DrGxsQgMDMTzzz+PgoICrFixAl26dKnVpO7bjR07FhMnTsSyZcsQGBhYZQ7SG2+8gS1btuDxxx9HaGgoevfujdLSUhw9ehTfffcdzp49q3fLjEjKGICIqMpoSKXQ0FA88MADSEtLw4IFC7Bp0yYsW7YMbdu2RZcuXfTW5QkNDUVeXh4+/fRTbN++HV5eXkhISMCGDRuQkpJST2dSO0uWLEGLFi2wcuVK7Ny5E35+fvjll18wYMAAKJXKe+5vZ2eHvXv3YtmyZVi/fj3mzJmD8vJyuLq64oknnsCMGTN0fR966CF8+eWXmDdvHiIiIuDl5YWvvvoKa9euNfi6PPHEE1CpVCgpKdF7+qtSixYtsHv3bixatAgbNmzAl19+CRsbG3Ts2BHR0dFQq9UGvR9RUyYTxpoZSETUiF25cgWtW7fGO++8Y7SPsiCihotzgIhIcq5fv16lrXKV5Oo+toKImh7eAiMiyVm/fj3WrFmDESNGwMrKCnv27ME333yDYcOGoX///uYuj4jqAQMQEUlOt27d0KxZM/znP/9BcXGxbmL0O++8Y+7SiKiecA4QERERSQ7nABEREZHkMAARERGR5HAOUDW0Wi3+/fdfWFtb12pRNCIiIjI/IQRKSkrQrl07yOX3GOMRDcDSpUuFq6ursLS0FD4+PmLfvn019i0vLxfR0dHCw8NDWFpaim7duolt27bp9Vm0aJHo06ePsLKyEnZ2dmLkyJHi+PHjta7n/PnzAgBffPHFF1988dUIX+fPn7/n33qzjwCtX78eERERWLFiBXx9fREXF4fAwEBkZ2fD3t6+Sv/IyEgkJCRg5cqV6Ny5M7Zv347Ro0dj79696NmzJwBg9+7dmDZtGvr27YuKigq8/fbbGDZsGLKystCyZct71mRtbQ0AOH/+PGxsbIx7wkRERGQSxcXFcHZ21v0dvxuzPwXm6+uLvn376j6IUKvVwtnZGa+88gpmzZpVpX+7du0wZ84cTJs2Tdc2ZswYqFQqJCQkVPsehYWFsLe3x+7du+Hv73/PmoqLi6FWq1FUVMQARERE1EgY8vfbrJOgy8vLkZ6ejoCAAF2bXC5HQEBAjZ8gfePGjSqf1aNSqbBnz54a36eoqAgA0KZNmxqPWVxcrPciIiKipsusAejChQvQaDRwcHDQa3dwcEBeXl61+wQGBiI2NhYnTpyAVqvFjh07sGnTJuTm5lbbX6vV4tVXX0X//v3h7e1dbZ+YmBio1Wrdy9nZ+f5OjIiIiBq0RvcY/IcffogHH3wQnTt3hoWFBcLDwxEWFlbjbO9p06YhMzMT69atq/GYs2fPRlFRke51/vx5U5VPREREDYBZA5CtrS0UCgXy8/P12vPz8+Ho6FjtPnZ2dvj+++9RWlqKv//+G8ePH4eVlRU8PDyq9A0PD8ePP/6I5ORkPPDAAzXWYWlpCRsbG70XERERNV1mDUAWFhbo3bs3kpKSdG1arRZJSUnw8/O7675KpRLt27dHRUUFNm7ciJEjR+q2CSEQHh6OzZs3Y9euXXB3dzfZORAREVHjY/bH4CMiIjBp0iT06dMHPj4+iIuLQ2lpKcLCwgAAISEhaN++PWJiYgAA+/btQ05ODnr06IGcnBzMnz8fWq0Wb775pu6Y06ZNw9q1a/HDDz/A2tpaN59IrVZDpVLV/0kSERFRg2L2ADR27FgUFhZi3rx5yMvLQ48ePZCYmKibGH3u3Dm9+T1lZWWIjIzE6dOnYWVlhREjRuCrr75Cq1atdH2WL18OABg8eLDee8XHxyM0NNTUp0REREQNnNnXAWqIuA4QERFR49No1gEiIiIiMgez3wIjIiKi/9FoBdLOXEJBSRnsrZXwcW8DhZwfzG1sDEBEREQNRGJmLqK3ZiG3qEzX5qRWIirYC0HeTmasrOnhLTAiIqIGIDEzF1MTMvTCDwDkFZVhakIGEjOr/8QDqhuOABEREZmZRisQvTUL1T2VVNkWteVP9Pe0bVK3w1TNFZDJzHM+DEBERERmlnbmUpWRnzvlF99A1/m/1FNF9SNrQSBaWJgnivAWGBERkZkVlNw9/JDxcQSIiIjIzOytlbXqFx/aF74ebUxcTf1RNVeY7b0ZgIiIiMzMx70NnNRK5BWVVTsPSAbAUa2Ef0e7JjUHyJx4C4yIiMjMFHIZooK9qt1WGXeigr0YfoyIAYiIiKgBCPJ2wvKJveBgY6nX7qhWYvnEXlwHyMh4C4yIiKiBCPJ2Qn9PW93TXvGhfXnby0Q4AkRERNSA3B52fD34MRimwgBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREktPM3AUQERGRdGi0AmlnLqGgpAz21kr4uLeBQi6r9zoYgIiIiKheJGbmInprFnKLynRtTmolooK9EOTtVK+18BYYERERmVxiZi6mJmTohR8AyCsqw9SEDCRm5tZrPQxAREREZFIarUD01iyIarZVtkVvzYJGW10P02AAIiIiIpNKO3OpysjP7QSA3KIypJ25VG81MQARERGRSRWU1Bx+6tLPGBiAiIiIyKTsrZVG7WcMDEBERERkUj7ubeCkVqKmh91luPU0mI97m3qriQGIiIiITEohlyEq2AsAqoSgyu+jgr3qdT0gBiAiIiIyuSBvJyyf2AuOav3bXI5qJZZP7FXv6wBxIUQiIiKqF0HeTnjUy5ErQRMREZG0KOQy+HVoa+4yeAuMiIiIpIcBiIiIiCSHAYiIiIgkhwGIiIiIJIcBiIiIiCSHAYiIiIgkhwGIiIiIJIcBiIiIiCSHAYiIiIgkhwGIiIiIJIcBiIiIiCSHAYiIiIgkhwGIiIiIJIcBiIiIiCSHAYiIiIgkp5m5CyAiooZJoxVIO3MJBSVlsLdWwse9DRRymbnLIjIKBiAiIqoiMTMX0VuzkFtUpmtzUisRFeyFIG8nM1ZGZBwN4hbYJ598Ajc3NyiVSvj6+iItLa3Gvjdv3sSCBQvQoUMHKJVKdO/eHYmJifd1TCIi+p/EzFxMTcjQCz8AkFdUhqkJGUjMzDVTZUTGY/YRoPXr1yMiIgIrVqyAr68v4uLiEBgYiOzsbNjb21fpHxkZiYSEBKxcuRKdO3fG9u3bMXr0aOzduxc9e/as0zGJiOgWjVYgemsWRDXbKtuitvyJ/p62vB1mItfKNeYuQRJkQojqfs7rja+vL/r27YulS5cCALRaLZydnfHKK69g1qxZVfq3a9cOc+bMwbRp03RtY8aMgUqlQkJCQp2Oeafi4mKo1WoUFRXBxsbGGKdJRNQopJ66iHEr/zB3GfR/shYEooWF2ccqGg1D/n6b9RZYeXk50tPTERAQoGuTy+UICAhAampqtfvcuHEDSqVSr02lUmHPnj33dczi4mK9FxGRFBWUlN27E9WLPq6toWquMHcZTZZZY+WFCxeg0Wjg4OCg1+7g4IDjx49Xu09gYCBiY2Ph7++PDh06ICkpCZs2bYJGo6nzMWNiYhAdHW2EMyIiatzsrZX37gQgPrQvfD3amLgaaVM1V0Am421GU2l042offvghJk+ejM6dO0Mmk6FDhw4ICwvD6tWr63zM2bNnIyIiQvd9cXExnJ2djVEuEVGj4uPeBk5qJfKKyqqdByQD4KhWwr+jHecAUaNm1ltgtra2UCgUyM/P12vPz8+Ho6NjtfvY2dnh+++/R2lpKf7++28cP34cVlZW8PDwqPMxLS0tYWNjo/ciIpIihVyGqGCvardVxp2oYC+GH2r0zBqALCws0Lt3byQlJenatFotkpKS4Ofnd9d9lUol2rdvj4qKCmzcuBEjR46872MSEREQ5O2E5RN7wcHGUq/dUa3E8om9uA4QNQlmvwUWERGBSZMmoU+fPvDx8UFcXBxKS0sRFhYGAAgJCUH79u0RExMDANi3bx9ycnLQo0cP5OTkYP78+dBqtXjzzTdrfUwiIrq7IG8n9Pe0Rdf5vwC4NeeHt72oKTF7ABo7diwKCwsxb9485OXloUePHkhMTNRNYj537hzk8v8NVJWVlSEyMhKnT5+GlZUVRowYga+++gqtWrWq9TGJiOjebg87vh78GAxqWsy+DlBDxHWAiIiAa+UV8Jq3HQDXo6HGodGsA0RERERkDgxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkGB6DTp0+bog4iIiKiemNwAPL09MSQIUOQkJCAsrIyU9REREREZFIGB6CMjAx069YNERERcHR0xIsvvoi0tDRT1EZERERkEgYHoB49euDDDz/Ev//+i9WrVyM3NxcDBgyAt7c3YmNjUVhYaIo6iYiIiIymzpOgmzVrhieffBIbNmzAkiVLcPLkSbz++utwdnZGSEgIcnNzjVknERERkdHUOQAdOHAAL7/8MpycnBAbG4vXX38dp06dwo4dO/Dvv/9i5MiRxqyTiIiIyGiaGbpDbGws4uPjkZ2djREjRuDLL7/EiBEjIJffylLu7u5Ys2YN3NzcjF0rERERkVEYHICWL1+O5557DqGhoXBycqq2j729PVatWnXfxRERERGZgsEB6MSJE/fsY2FhgUmTJtWpICIiIiJTM3gOUHx8PDZs2FClfcOGDfjiiy+MUhQRERGRKRkcgGJiYmBra1ul3d7eHosWLTJKUURERESmZHAAOnfuHNzd3au0u7q64ty5c0YpioiIiMiUDA5A9vb2OHLkSJX2w4cPo23btkYpioiIiMiUDA5A48aNw/Tp05GcnAyNRgONRoNdu3ZhxowZeOaZZ0xRIxEREZFRGfwU2MKFC3H27FkMHToUzZrd2l2r1SIkJIRzgIiIiKhRMDgAWVhYYP369Vi4cCEOHz4MlUqFrl27wtXV1RT1ERERERmdwQGoUseOHdGxY0dj1kJERERUL+oUgP755x9s2bIF586dQ3l5ud622NhYoxRGREREZCoGB6CkpCQ88cQT8PDwwPHjx+Ht7Y2zZ89CCIFevXqZokYiIiIiozL4KbDZs2fj9ddfx9GjR6FUKrFx40acP38egwYNwlNPPWWKGomIiIiMyuAAdOzYMYSEhAAAmjVrhuvXr8PKygoLFizAkiVLDC7gk08+gZubG5RKJXx9fZGWlnbX/nFxcejUqRNUKhWcnZ3x2muvoaysTLddo9Fg7ty5cHd3h0qlQocOHbBw4UIIIQyujYiIiJomg2+BtWzZUjfvx8nJCadOnUKXLl0AABcuXDDoWOvXr0dERARWrFgBX19fxMXFITAwENnZ2bC3t6/Sf+3atZg1axZWr16Nfv364a+//kJoaChkMplu7tGSJUuwfPlyfPHFF+jSpQsOHDiAsLAwqNVqTJ8+3dDTJSIioibI4AD08MMPY8+ePXjooYcwYsQIzJw5E0ePHsWmTZvw8MMPG3Ss2NhYTJ48GWFhYQCAFStW4KeffsLq1asxa9asKv337t2L/v37Y/z48QAANzc3jBs3Dvv27dPrM3LkSDz22GO6Pt988809R5aIiIhIOgy+BRYbGwtfX18AQHR0NIYOHYr169fDzc0Nq1atqvVxysvLkZ6ejoCAgP8VI5cjICAAqamp1e7Tr18/pKen68LM6dOn8fPPP2PEiBF6fZKSkvDXX38BuPURHXv27MHw4cNrrOXGjRsoLi7WexEREVHTZdAIkEajwT///INu3boBuHU7bMWKFXV64wsXLkCj0cDBwUGv3cHBAcePH692n/Hjx+PChQsYMGAAhBCoqKjASy+9hLffflvXZ9asWSguLkbnzp2hUCig0Wjw7rvvYsKECTXWEhMTg+jo6DqdBxERETU+Bo0AKRQKDBs2DJcvXzZVPXeVkpKCRYsWYdmyZcjIyMCmTZvw008/YeHChbo+3377Lb7++musXbsWGRkZ+OKLL/Dee+/hiy++qPG4s2fPRlFRke51/vz5+jgdIiIiMhOD5wB5e3vj9OnTcHd3v683trW1hUKhQH5+vl57fn4+HB0dq91n7ty5ePbZZ/HCCy8AALp27YrS0lJMmTIFc+bMgVwuxxtvvIFZs2bpPpi1a9eu+PvvvxETE4NJkyZVe1xLS0tYWlre1/kQERFR42HwHKB33nkHr7/+On788Ufk5ubWee6MhYUFevfujaSkJF2bVqtFUlIS/Pz8qt3n2rVrkMv1S1YoFACge8y9pj5arbbWtREREVHTZvAIUOWE4yeeeAIymUzXLoSATCaDRqOp9bEiIiIwadIk9OnTBz4+PoiLi0NpaanuqbCQkBC0b98eMTExAIDg4GDExsaiZ8+e8PX1xcmTJzF37lwEBwfrglBwcDDeffdduLi4oEuXLjh48CBiY2Px3HPPGXqqRERE1EQZHICSk5ON9uZjx45FYWEh5s2bh7y8PPTo0QOJiYm6idHnzp3TG82JjIyETCZDZGQkcnJyYGdnpws8lT7++GPMnTsXL7/8MgoKCtCuXTu8+OKLmDdvntHqJiIiosZNJrhEchXFxcVQq9UoKiqCjY2NucshIjKLa+UV8Jq3HQCQtSAQLSzq9PnZRPXGkL/fBv80//rrr3fd7u/vb+ghiYiIiOqVwQFo8ODBVdpunwtkyBwgIiIiInMw+Cmwy5cv670KCgqQmJiIvn374pdffjFFjURERERGZfAIkFqtrtL26KOPwsLCAhEREUhPTzdKYURERESmYvAIUE0cHByQnZ1trMMRERERmYzBI0BHjhzR+14IgdzcXCxevBg9evQwVl1EjYpGK5B25hIKSspgb62Ej3sbKOSye+9IRERmYXAA6tGjB2QyGe58ev7hhx/G6tWrjVYYUWORmJmL6K1ZyC0q07U5qZWICvZCkLeTGSsjIqKaGByAzpw5o/e9XC6HnZ0dlEql0YoiaiwSM3MxNSEDdy6mlVdUhqkJGVg+sRdDEBFRA2RwAHJ1dTVFHUSNjkYrEL01q0r4AaBri9ryJ/p72vJ2GDVK18q5rAk1XQYHoOnTp8PT0xPTp0/Xa1+6dClOnjyJuLg4Y9VG1KClnbmkd9urOvnFN9B1PpeHICJqaAx+Cmzjxo3o379/lfZ+/frhu+++M0pRRI1BQcndww9RU9HHtTVUzRXmLoPIqAweAbp48WK1awHZ2NjgwoULRimKqDGwt67dvLf40L7w9Whj4mqITEfVXKG34j9RU2BwAPL09ERiYiLCw8P12rdt2wYPDw+jFUbU0Pm4t4GTWom8orJq5wHJADiqlfDvaMc5QEREDYzBASgiIgLh4eEoLCzEI488AgBISkrC+++/z/k/JCkKuQxRwV6YmpBRZVtl3IkK9mL4ISJqgGTizgV9amH58uV499138e+//wIA3NzcMH/+fISEhBi9QHMoLi6GWq1GUVERbGxszF0ONXCJmbmI2vIn8otv6Nq4DhARUf0z5O93nQJQpcLCQqhUKlhZWdX1EA0SAxAZqqTspu5pr/jQvrztRURkBob8/a7TQogVFRV48MEHYWdnp2s/ceIEmjdvDjc3N4MLJmrsbg87vh78GAwioobO4MfgQ0NDsXfv3irt+/btQ2hoqDFqIiIiIjIpgwPQwYMHq10H6OGHH8ahQ4eMURMRERGRSRkcgGQyGUpKSqq0FxUVQaPhsulERETU8BkcgPz9/RETE6MXdjQaDWJiYjBgwACjFkdERERkCgZPgl6yZAn8/f3RqVMnDBw4EADw22+/obi4GLt27TJ6gUREGq1A2plLKCgpg721Ej7unGhORPfH4ADk5eWFI0eOYOnSpTh8+DBUKhVCQkIQHh6ONm243D8RGVdiZi6it2bpffAs11kiovtlcAACgHbt2mHRokV6bVeuXMHSpUurfEQGEVFdJWbmYmpCRpWPGskrKsPUhAwsn9iLIYiI6sTgOUB3SkpKwvjx4+Hk5ISoqChj1EREBI1WIHprVrWfs1bZFr01CxptnddyJSIJq1MAOn/+PBYsWAB3d3cMGzYMALB582bk5eUZtTgikq60M5f0bnvdSQDILSpD2plL9VcUETUZtQ5AN2/exIYNGxAYGIhOnTrh0KFD+O9//wu5XI7IyEgEBQWhefPmpqyViCSkoKTm8FOXfkREt6v1HKD27dujc+fOmDhxItatW4fWrVsDAMaNG2ey4ohIuuytlUbtR0R0u1qPAFVUVEAmk0Emk0GhUJiyJiIi+Li3gZNaiZoedpfh1tNgPu58+pSIDFfrAPTvv/9iypQp+Oabb+Do6IgxY8Zg8+bNkMm4FgdRU6TRCqSeuogfDuUg9dTFep9srJDLEBXsBQBVQlDl91HBXlwPiIjqpNYBSKlUYsKECdi1axeOHj2Khx56CNOnT0dFRQXeffdd7Nixgx+FQdREJGbmYsCSXRi38g/MWHcI41b+gQFLdiExM7de6wjydsLyib3gqNa/zeWoVvIReCK6LzIhRJ3/WafVarF9+3asWrUKW7duhbW1NS5cuGDM+syiuLgYarUaRUVFsLGxMXc51AhcK6+A17ztAICsBYFoYVGnJbYahJrW3qkcZzFH8OBK0ERUG4b8/b6v/0vL5XIMHz4cw4cPR2FhIb766qv7ORwRmdm91t6R4dbaO496OdZrAFHIZfDr0Lbe3o+Imr77Xgixkp2dHSIiIox1OCIyA669Q0RSYbQARESNH9feISKpYAAiIh2uvUNEUsEAREQ6XHuHiKSCAYiIdLj2DhFJhcFPgWk0GqxZswZJSUkoKCiAVqvV275r1y6jFUdE9a9y7Z3orVl6E6Id1UpEBXtx7R0iahIMDkAzZszAmjVr8Nhjj8Hb25srQRM1QUHeTnjUy5Fr7xBRk2VwAFq3bh2+/fZbjBgxwhT1EFEDwbV3iKgpM3gOkIWFBTw9PU1RCxEREVG9MDgAzZw5Ex9++CHu4xM0iIiIiMzK4Ftge/bsQXJyMrZt24YuXbqgefPmets3bdpktOKIiIiITMHgANSqVSuMHj3aFLUQERER1QuDA1B8fLwp6iAiIiKqN3X+NPjCwkJkZ2cDADp16gQ7OzujFUVERERkSgZPgi4tLcVzzz0HJycn+Pv7w9/fH+3atcPzzz+Pa9eumaJGIiIiIqMyOABFRERg9+7d2Lp1K65cuYIrV67ghx9+wO7duzFz5kxT1EhERERkVAbfAtu4cSO+++47DB48WNc2YsQIqFQqPP3001i+fLkx6yMiIiIyOoNHgK5duwYHB4cq7fb29rwFRkRERI2CwQHIz88PUVFRKCv734ckXr9+HdHR0fDz8zNqcURERESmYPAtsA8//BCBgYF44IEH0L17dwDA4cOHoVQqsX37dqMXSERERGRsBgcgb29vnDhxAl9//TWOHz8OABg3bhwmTJgAlUpl9AKJiIiIjK1O6wC1aNECkydPNnYtRERERPWiVgFoy5YtGD58OJo3b44tW7bcte8TTzxhlMKIiIiITKVWAWjUqFHIy8uDvb09Ro0aVWM/mUwGjUZjrNqIiIiITKJWAUir1Vb7NREREVFjZPBj8F9++SVu3LhRpb28vBxffvmlwQV88skncHNzg1KphK+vL9LS0u7aPy4uDp06dYJKpYKzszNee+01vUfyASAnJwcTJ05E27ZtoVKp0LVrVxw4cMDg2oiIiKhpMjgAhYWFoaioqEp7SUkJwsLCDDrW+vXrERERgaioKGRkZKB79+4IDAxEQUFBtf3Xrl2LWbNmISoqCseOHcOqVauwfv16vP3227o+ly9fRv/+/dG8eXNs27YNWVlZeP/999G6dWvDTpSIiIiaLIOfAhNCQCaTVWn/559/oFarDTpWbGwsJk+erAtOK1aswE8//YTVq1dj1qxZVfrv3bsX/fv3x/jx4wEAbm5uGDduHPbt26frs2TJEjg7OyM+Pl7X5u7ublBdRERE1LTVegSoZ8+e6NWrF2QyGYYOHYpevXrpXt27d8fAgQMREBBQ6zcuLy9Henq63j5yuRwBAQFITU2tdp9+/fohPT1dd5vs9OnT+PnnnzFixAhdny1btqBPnz546qmnYG9vj549e2LlypV3reXGjRsoLi7WexEREVHTVesRoMqnvw4dOoTAwEBYWVnptllYWMDNzQ1jxoyp9RtfuHABGo2myueKOTg46BZYvNP48eNx4cIFDBgwAEIIVFRU4KWXXtK7BXb69GksX74cERERePvtt7F//35Mnz4dFhYWmDRpUrXHjYmJQXR0dK1rJyIiosat1gEoKioKwK3bTmPHjoVSqTRZUTVJSUnBokWLsGzZMvj6+uLkyZOYMWMGFi5ciLlz5wK49ZRanz59sGjRIgC3Rq4yMzOxYsWKGgPQ7NmzERERofu+uLgYzs7Opj8hIiIiMguD5wDVFCIMZWtrC4VCgfz8fL32/Px8ODo6VrvP3Llz8eyzz+KFF14AAHTt2hWlpaWYMmUK5syZA7lcDicnJ3h5eent99BDD2Hjxo011mJpaQlLS8v7PCMiIiJqLAx+Ckyj0eC9996Dj48PHB0d0aZNG71XbVlYWKB3795ISkrStWm1WiQlJdX4qfLXrl2DXK5fskKhAHBrcjYA9O/fH9nZ2Xp9/vrrL7i6uta6NmqYNFqB1FMX8cOhHKSeugiNVpi7JJOS2vkSEdUng0eAoqOj8fnnn2PmzJmIjIzEnDlzcPbsWXz//feYN2+eQceKiIjApEmT0KdPH/j4+CAuLg6lpaW6p8JCQkLQvn17xMTEAACCg4MRGxuLnj176m6BzZ07F8HBwbog9Nprr6Ffv35YtGgRnn76aaSlpeGzzz7DZ599ZuipUgOSmJmL6K1ZyC3635pPTmolooK9EOTtZMbKTENq50tEVN8MDkBff/01Vq5cicceewzz58/HuHHj0KFDB3Tr1g1//PEHpk+fXutjjR07FoWFhZg3bx7y8vLQo0cPJCYm6iZGnzt3Tm/EJzIyEjKZDJGRkcjJyYGdnR2Cg4Px7rvv6vr07dsXmzdvxuzZs7FgwQK4u7sjLi4OEyZMMPRUqYFIzMzF1IQM3Dn+kVdUhqkJGVg+sVeTCgVSO18iInOQicp7R7XUsmVLHDt2DC4uLnBycsJPP/2EXr164fTp0+jZs2e1iyQ2NsXFxVCr1SgqKoKNjY25y5E0jVZgwJJdeiMhd3KwscTOiEFQyKuuT1VfrpVr0OednQCArAWBaGFh8L8tANz7fGUAHNVK7HnrEbOeLxFRQ2TI32+D/y/9wAMPIDc3Fy4uLujQoQN++eUX9OrVC/v37+dEYjK6tDOX7hp+ACC/+Aa6zv+lnioyrXudrwCQW1SGtDOX4Nehbf0VRkTUxBg8CXr06NG6icuvvPIK5s6diwcffBAhISF47rnnjF4gSVtByd3DT0PTx7U1VM0Vdd6/tufb2K4LEVFDY/AI0OLFi3Vfjx07Fi4uLkhNTcWDDz6I4OBgoxZHZG9du/Wm4kP7wtej9k8hmoqquaLaj4qprdqeb237ERFR9eo2UeE2fn5+NT62TnS/fNzbwEmtRF5RWZVJwcD/5sT4d7RrEnNianu+Pu7mD3tERI1ZrQLQli1ban3AJ554os7FEN1JIZchKtgLUxMyqmyrjDtRwV5NIvwA+ucrA/RCUFM8XyIic6nVU2B3Lj4ok8lw526Vw/4ajcaI5ZkHnwJreBIzcxG15U/kF9/QtTXldXG4DhARkeGM/hSYVqvVfb1z50689dZbWLRoke7WV2pqKiIjI3Wfv0VkbEHeTujvaat72is+tG+Tue1VnSBvJzzq5Yi0M5dQUFIGe+tbt72a6vkSEdU3g+cAvfrqq1ixYgUGDBigawsMDESLFi0wZcoUHDt2zKgFElW6/Y+/r0fTDwMKuYyPuhMRmYjBj8GfOnUKrVq1qtKuVqtx9uxZI5REREREZFoGB6C+ffsiIiJC71Pc8/Pz8cYbb8DHx8eoxRERERGZgsEBaPXq1bqVoD09PeHp6QkXFxfk5ORg1apVpqiRiIiIyKgMngPk6emJI0eOYMeOHTh+/DgA4KGHHkJAQMB9LQBHTY9GKxrsJN6GXBsREZlenRZClMlkGDZsGIYNG2bseqiJaMiPcTfk2oiIqH7UKgB99NFHmDJlCpRKJT766KO79p0+fbpRCqPGKzEzF1MTMqqsZJxXVIapCRlYPrGX2YJGQ66NiIjqT60WQnR3d8eBAwfQtm1buLu713wwmQynT582aoHmwIUQ606jFRiwZNddP9HcwcYSOyMGGXzL6Vq5Bn3e2QkAyFoQiBYWhg1g3qu2yo+Z2PPWI7wdRkTUCBl9IcQzZ85U+zXRndLOXLpr+AGA/OIbugUN69O9ahMAcovKkHbmEtffISJq4gx+CozobgpK7h5+jKGPa2uomisM3q+2tdXHORARkXnVagQoIiKi1geMjY2tczHU+NlbK2vVLz60L3w96vaJ5qrmijo9cVjb2mrbj4iIGq9aBaCDBw/W6mB8DJ583NvASa1EXlFZlYnGwP/m2Zjjc7xqW5uPe92CGRERNR61CkDJycmmroOaCIVchqhgL0xNyKiyrTLuRAV7mWWS8e21yQC9EGTu2poarrNERA1drZ4Ckxo+BXb/EjNzEbXlT+QX39C1NZS1drgOkGnx+hKRuRjy97tOAejAgQP49ttvce7cOZSXl+tt27Rpk6GHa3AYgIyjpOym7mmv+NC+ZrntVROOUJhGTessVV5ZrrNERKZkyN9vg58CW7duHfr164djx45h8+bNuHnzJv7880/s2rULarW6zkVT03N7oPD1aFgBQyGXwa9DW4zs0R5+Hdo2qNoaK41WIHprVrXzqyrbordmQaPloDMRmZ/BAWjRokX44IMPsHXrVlhYWODDDz/E8ePH8fTTT8PFxcUUNRJRI2DIOktEROZmcAA6deoUHnvsMQCAhYUFSktLIZPJ8Nprr+Gzzz4zeoFE1DhwnSUiakwMDkCtW7dGSUkJAKB9+/bIzMwEAFy5cgXXrl0zbnVE1GhwnSUiakwMDkD+/v7YsWMHAOCpp57CjBkzMHnyZIwbNw5Dhw41eoFE1DhUrrNU02wqGW49DcZ1loioIaj1p0lmZmbC29sbS5cuRVnZrSHsOXPmoHnz5ti7dy/GjBmDyMhIkxVK9YNPR1FdcZ0lImpMah2AunXrhr59++KFF17AM888AwCQy+WYNWuWyYqj+sX1W+h+BXk7YfnEXlV+jhz5c0REDUyt1wH67bffEB8fj++++w5arRZjxozBCy+8gIEDB5q6xnonxXWATLF+y7XyCnjN2w4AyFoQiBYWtc7b1MhxJJGIzMEk6wANHDgQq1evRm5uLj7++GOcPXsWgwYNQseOHbFkyRLk5eXdd+FkHvdav0UAiNryJ0rKbuJaeYUBL009nwk1FFxniYgauvv6KIyTJ08iPj4eX331FfLy8hAUFIQtW7YYsz6zkNoIUOqpixi38g+TvgdHgIiIyNRMuhL07Tw9PfH2228jMjIS1tbW+Omnn+7ncGQmpl6XpY9ra6iaK0z6HkRERIao8z/Jf/31V6xevRobN26EXC7H008/jeeff96YtVE9qe26LPGhfeHrYfgjzKrmCshkvAVCREQNh0EB6N9//8WaNWuwZs0anDx5Ev369cNHH32Ep59+Gi1btjRVjWRileu35BWVVTsPSIZbT/E0pA8zJSIiuh+1DkDDhw/Hzp07YWtri5CQEDz33HPo1KmTKWujenL7+i134votRETUFNU6ADVv3hzfffcdHn/8cSgUnM/R1FSu3xK15U/kF9/QtXP9FiIiaoru6ymwpkpqT4HdrqTsJrrO/wXArTk/vO1FRESNRb09BUZNz+1hx9eDi9cREVHTxABEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREktMgAtAnn3wCNzc3KJVK+Pr6Ii0t7a794+Li0KlTJ6hUKjg7O+O1115DWVlZtX0XL14MmUyGV1991QSVExERUWNk9gC0fv16REREICoqChkZGejevTsCAwNRUFBQbf+1a9di1qxZiIqKwrFjx7Bq1SqsX78eb7/9dpW++/fvx6effopu3bqZ+jSIiIioETF7AIqNjcXkyZMRFhYGLy8vrFixAi1atMDq1aur7b937170798f48ePh5ubG4YNG4Zx48ZVGTW6evUqJkyYgJUrV6J169b1cSpERETUSJg1AJWXlyM9PR0BAQG6NrlcjoCAAKSmpla7T79+/ZCenq4LPKdPn8bPP/+MESNG6PWbNm0aHnvsMb1j1+TGjRsoLi7WexEREVHT1cycb37hwgVoNBo4ODjotTs4OOD48ePV7jN+/HhcuHABAwYMgBACFRUVeOmll/Ruga1btw4ZGRnYv39/reqIiYlBdHR03U+EiIiIGhWz3wIzVEpKChYtWoRly5YhIyMDmzZtwk8//YSFCxcCAM6fP48ZM2bg66+/hlKprNUxZ8+ejaKiIt3r/PnzpjwFIiIiMjOzjgDZ2tpCoVAgPz9frz0/Px+Ojo7V7jN37lw8++yzeOGFFwAAXbt2RWlpKaZMmYI5c+YgPT0dBQUF6NWrl24fjUaDX3/9FUuXLsWNGzegUCj0jmlpaQlLS0sjnx0RERE1VGYdAbKwsEDv3r2RlJSka9NqtUhKSoKfn1+1+1y7dg1yuX7ZlYFGCIGhQ4fi6NGjOHTokO7Vp08fTJgwAYcOHaoSfoiIiEh6zDoCBAARERGYNGkS+vTpAx8fH8TFxaG0tBRhYWEAgJCQELRv3x4xMTEAgODgYMTGxqJnz57w9fXFyZMnMXfuXAQHB0OhUMDa2hre3t5679GyZUu0bdu2SjsRERFJk9kD0NixY1FYWIh58+YhLy8PPXr0QGJiom5i9Llz5/RGfCIjIyGTyRAZGYmcnBzY2dkhODgY7777rrlOodY0WoG0M5dQUFIGe2slfNzbQCGXmbssIiIiyZEJIYS5i2hoiouLoVarUVRUBBsbG6McMzEzF9Fbs5Bb9L8Vq53USkQFeyHI28ko72EM18or4DVvOwAga0EgWliYPSMTERHViiF/vxvdU2CNUWJmLqYmZOiFHwDIKyrD1IQMJGbmmqkyIiIiaeI/701MoxWI3pqF6obZKtuitvyJ/p62DeJ22LVyjblLICIiMjkGIBNLO3OpysjPnfKLb6Dr/F/qqSIiIiLiLTATKyi5e/hpqPq4toaqOZcMICKipokjQCZmb1271ajjQ/vC16ONiaupPVVzBWQy89+SIyIiMgUGIBPzcW8DJ7USeUVl1c4DkgFwVCvh39GuQcwBIiIikgLeAjMxhVyGqGAvALfCzu0qv48K9mL4ISIiqkcMQPUgyNsJyyf2gqNa/3aYo1qJ5RN7Nah1gIiIiKSAt8DqSZC3Ex71cuRK0ERERA0AA1A9Ushl8OvQ1txlEBERSR5vgREREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQwABEREZHkMAARERGR5DAAERERkeQ0iAD0ySefwM3NDUqlEr6+vkhLS7tr/7i4OHTq1AkqlQrOzs547bXXUFZWptseExODvn37wtraGvb29hg1ahSys7NNfRpERETUSJg9AK1fvx4RERGIiopCRkYGunfvjsDAQBQUFFTbf+3atZg1axaioqJw7NgxrFq1CuvXr8fbb7+t67N7925MmzYNf/zxB3bs2IGbN29i2LBhKC0tra/TIiIiogZMJoQQ5izA19cXffv2xdKlSwEAWq0Wzs7OeOWVVzBr1qwq/cPDw3Hs2DEkJSXp2mbOnIl9+/Zhz5491b5HYWEh7O3tsXv3bvj7+9+zpuLiYqjVahQVFcHGxqaOZ0ZERET1yZC/32YdASovL0d6ejoCAgJ0bXK5HAEBAUhNTa12n379+iE9PV13m+z06dP4+eefMWLEiBrfp6ioCADQpk2barffuHEDxcXFeq+GRqMVSD11ET8cykHqqYvQaM2aW4mIiBq1ZuZ88wsXLkCj0cDBwUGv3cHBAcePH692n/Hjx+PChQsYMGAAhBCoqKjASy+9pHcL7HZarRavvvoq+vfvD29v72r7xMTEIDo6+v5OxoQSM3MRvTULuUX/m+fkpFYiKtgLQd5OZqyMiIiocTL7HCBDpaSkYNGiRVi2bBkyMjKwadMm/PTTT1i4cGG1/adNm4bMzEysW7euxmPOnj0bRUVFutf58+dNVb7BEjNzMTUhQy/8AEBeURmmJmQgMTPXTJURERE1XmYdAbK1tYVCoUB+fr5ee35+PhwdHavdZ+7cuXj22WfxwgsvAAC6du2K0tJSTJkyBXPmzIFc/r9MFx4ejh9//BG//vorHnjggRrrsLS0hKWlpRHOyLg0WoHorVmo7maXACADEL01C496OUIhl9VzdURERI2XWUeALCws0Lt3b70JzVqtFklJSfDz86t2n2vXrumFHABQKBQAgMr53EIIhIeHY/Pmzdi1axfc3d1NdAamlXbmUpWRn9sJALlFZUg7c6n+iiIiImoCzDoCBAARERGYNGkS+vTpAx8fH8TFxaG0tBRhYWEAgJCQELRv3x4xMTEAgODgYMTGxqJnz57w9fXFyZMnMXfuXAQHB+uC0LRp07B27Vr88MMPsLa2Rl5eHgBArVZDpVKZ50TroKCk5vBTl35ERER0i9kD0NixY1FYWIh58+YhLy8PPXr0QGJiom5i9Llz5/RGfCIjIyGTyRAZGYmcnBzY2dkhODgY7777rq7P8uXLAQCDBw/We6/4+HiEhoaa/JyMxd5aadR+REREdIvZ1wFqiBrKOkAarcCAJbuQV1RW7TwgGQBHtRJ73nqEc4CIiEjyGs06QHR3CrkMUcFeAG6FndtVfh8V7MXwQ0REZCAGoAYuyNsJyyf2gqNa/zaXo1qJ5RN7cR0gIiKiOjD7HCC6tyBvJzzq5Yi0M5dQUFIGe2slfNzbcOSHiIiojhiAGgmFXAa/Dm3NXQYREVGTwFtgREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOQxAREREJDkMQERERCQ5DEBEREQkOVwJuhpC3Prs9eLiYjNXQkRERLVV+Xe78u/43TAAVaOkpAQA4OzsbOZKiIiIyFAlJSVQq9V37SMTtYlJEqPVapGdnQ0vLy+cP38eNjY25i6p0SsuLoazszOvpxHwWhoPr6Xx8FoaD69l3QkhUFJSgnbt2kEuv/ssH44AVUMul6N9+/YAABsbG/4AGhGvp/HwWhoPr6Xx8FoaD69l3dxr5KcSJ0ETERGR5DAAERERkeQwANXA0tISUVFRsLS0NHcpTQKvp/HwWhoPr6Xx8FoaD69l/eAkaCIiIpIcjgARERGR5DAAERERkeQwABEREZHkMAARERGR5DAA1eCTTz6Bm5sblEolfH19kZaWZu6SGpz58+dDJpPpvTp37qzbXlZWhmnTpqFt27awsrLCmDFjkJ+fr3eMc+fO4bHHHkOLFi1gb2+PN954AxUVFfV9KvXu119/RXBwMNq1aweZTIbvv/9eb7sQAvPmzYOTkxNUKhUCAgJw4sQJvT6XLl3ChAkTYGNjg1atWuH555/H1atX9focOXIEAwcOhFKphLOzM/7zn/+Y+tTq3b2uZWhoaJWf06CgIL0+vJZATEwM+vbtC2tra9jb22PUqFHIzs7W62Os3+mUlBT06tULlpaW8PT0xJo1a0x9evWuNtdz8ODBVX42X3rpJb0+vJ4mJKiKdevWCQsLC7F69Wrx559/ismTJ4tWrVqJ/Px8c5fWoERFRYkuXbqI3Nxc3auwsFC3/aWXXhLOzs4iKSlJHDhwQDz88MOiX79+uu0VFRXC29tbBAQEiIMHD4qff/5Z2NraitmzZ5vjdOrVzz//LObMmSM2bdokAIjNmzfrbV+8eLFQq9Xi+++/F4cPHxZPPPGEcHd3F9evX9f1CQoKEt27dxd//PGH+O2334Snp6cYN26cbntRUZFwcHAQEyZMEJmZmeKbb74RKpVKfPrpp/V1mvXiXtdy0qRJIigoSO/n9NKlS3p9eC2FCAwMFPHx8SIzM1McOnRIjBgxQri4uIirV6/q+hjjd/r06dOiRYsWIiIiQmRlZYmPP/5YKBQKkZiYWK/na2q1uZ6DBg0SkydP1vvZLCoq0m3n9TQtBqBq+Pj4iGnTpum+12g0ol27diImJsaMVTU8UVFRonv37tVuu3LlimjevLnYsGGDru3YsWMCgEhNTRVC3PrDJZfLRV5enq7P8uXLhY2Njbhx44ZJa29I7vyjrdVqhaOjo/jvf/+ra7ty5YqwtLQU33zzjRBCiKysLAFA7N+/X9dn27ZtQiaTiZycHCGEEMuWLROtW7fWu5ZvvfWW6NSpk4nPyHxqCkAjR46scR9ey+oVFBQIAGL37t1CCOP9Tr/55puiS5cueu81duxYERgYaOpTMqs7r6cQtwLQjBkzatyH19O0eAvsDuXl5UhPT0dAQICuTS6XIyAgAKmpqWasrGE6ceIE2rVrBw8PD0yYMAHnzp0DAKSnp+PmzZt617Fz585wcXHRXcfU1FR07doVDg4Ouj6BgYEoLi7Gn3/+Wb8n0oCcOXMGeXl5etdOrVbD19dX79q1atUKffr00fUJCAiAXC7Hvn37dH38/f1hYWGh6xMYGIjs7Gxcvny5ns6mYUhJSYG9vT06deqEqVOn4uLFi7ptvJbVKyoqAgC0adMGgPF+p1NTU/WOUdmnqf//9c7rWenrr7+Gra0tvL29MXv2bFy7dk23jdfTtPhhqHe4cOECNBqN3g8cADg4OOD48eNmqqph8vX1xZo1a9CpUyfk5uYiOjoaAwcORGZmJvLy8mBhYYFWrVrp7ePg4IC8vDwAQF5eXrXXuXKbVFWee3XX5vZrZ29vr7e9WbNmaNOmjV4fd3f3Kseo3Na6dWuT1N/QBAUF4cknn4S7uztOnTqFt99+G8OHD0dqaioUCgWvZTW0Wi1effVV9O/fH97e3gBgtN/pmvoUFxfj+vXrUKlUpjgls6ruegLA+PHj4erqinbt2uHIkSN46623kJ2djU2bNgHg9TQ1BiCqs+HDh+u+7tatG3x9feHq6opvv/2Wv3TUYDzzzDO6r7t27Ypu3bqhQ4cOSElJwdChQ81YWcM1bdo0ZGZmYs+ePeYupUmo6XpOmTJF93XXrl3h5OSEoUOH4tSpU+jQoUN9lyk5vAV2B1tbWygUiipPNuTn58PR0dFMVTUOrVq1QseOHXHy5Ek4OjqivLwcV65c0etz+3V0dHSs9jpXbpOqynO/28+go6MjCgoK9LZXVFTg0qVLvL734OHhAVtbW5w8eRIAr+WdwsPD8eOPPyI5ORkPPPCArt1Yv9M19bGxsWmS/3Cq6XpWx9fXFwD0fjZ5PU2HAegOFhYW6N27N5KSknRtWq0WSUlJ8PPzM2NlDd/Vq1dx6tQpODk5oXfv3mjevLnedczOzsa5c+d019HPzw9Hjx7V++OzY8cO2NjYwMvLq97rbyjc3d3h6Oiod+2Ki4uxb98+vWt35coVpKen6/rs2rULWq1W9z9RPz8//Prrr7h586auz44dO9CpU6cmd8vGEP/88w8uXrwIJycnALyWlYQQCA8Px+bNm7Fr164qt/yM9Tvt5+end4zKPk3t/6/3up7VOXToEADo/WzyepqQuWdhN0Tr1q0TlpaWYs2aNSIrK0tMmTJFtGrVSm8mPgkxc+ZMkZKSIs6cOSN+//13ERAQIGxtbUVBQYEQ4tYjsy4uLmLXrl3iwIEDws/PT/j5+en2r3zEc9iwYeLQoUMiMTFR2NnZSeIx+JKSEnHw4EFx8OBBAUDExsaKgwcPir///lsIcesx+FatWokffvhBHDlyRIwcObLax+B79uwp9u3bJ/bs2SMefPBBvUe3r1y5IhwcHMSzzz4rMjMzxbp160SLFi2a1KPbQtz9WpaUlIjXX39dpKamijNnzoidO3eKXr16iQcffFCUlZXpjsFrKcTUqVOFWq0WKSkpeo9lX7t2TdfHGL/TlY9tv/HGG+LYsWPik08+aZKPbd/rep48eVIsWLBAHDhwQJw5c0b88MMPwsPDQ/j7++uOwetpWgxANfj444+Fi4uLsLCwED4+PuKPP/4wd0kNztixY4WTk5OwsLAQ7du3F2PHjhUnT57Ubb9+/bp4+eWXRevWrUWLFi3E6NGjRW5urt4xzp49K4YPHy5UKpWwtbUVM2fOFDdv3qzvU6l3ycnJAkCV16RJk4QQtx6Fnzt3rnBwcBCWlpZi6NChIjs7W+8YFy9eFOPGjRNWVlbCxsZGhIWFiZKSEr0+hw8fFgMGDBCWlpaiffv2YvHixfV1ivXmbtfy2rVrYtiwYcLOzk40b95cuLq6ismTJ1f5xwyvpaj2GgIQ8fHxuj7G+p1OTk4WPXr0EBYWFsLDw0PvPZqKe13Pc+fOCX9/f9GmTRthaWkpPD09xRtvvKG3DpAQvJ6mJBNCiPobbyIiIiIyP84BIiIiIslhACIiIiLJYQAiIiIiyWEAIiIiIslhACIiIiLJYQAiIiIiyWEAIiIiIslhACIiIiLJYQAiovuWkpICmUym+6DMNWvWoFWrVvd9XGMdx1THA4DBgwfj1VdfNeoxDeHv74+1a9fWqu/DDz+MjRs3mrgiosaBAYhIQlasWAFra2tUVFTo2q5evYrmzZtj8ODBen0rQ82pU6dMVk9ycjJGjBiBtm3bokWLFvDy8sLMmTORk5NjsvesrbNnz0Imk931tWbNGmzatAkLFy40S41btmxBfn4+nnnmmVr1j4yMxKxZs6DVak1cGVHDxwBEJCFDhgzB1atXceDAAV3bb7/9BkdHR+zbtw9lZWW69uTkZLi4uKBDhw4mqeXTTz9FQEAAHB0dsXHjRmRlZWHFihUoKirC+++/b5L3NISzszNyc3N1r5kzZ6JLly56bWPHjkWbNm1gbW1tlho/+ugjhIWFQS6v3f/Khw8fjpKSEmzbts3ElRE1fAxARBLSqVMnODk5ISUlRdeWkpKCkSNHwt3dHX/88Yde+5AhQwAAX331Ffr06QNra2s4Ojpi/PjxKCgoqHMd//zzD6ZPn47p06dj9erVGDx4MNzc3ODv74/PP/8c8+bNq3Hf5cuXo0OHDrCwsECnTp3w1Vdf6W2/cuUKXnzxRTg4OECpVMLb2xs//vhjtccqLCxEnz59MHr0aNy4cUNvm0KhgKOjo+5lZWWFZs2a6bWpVKoqt8Dc3NzwzjvvICQkBFZWVnB1dcWWLVtQWFiIkSNHwsrKCt26ddMLoQCwZ88eDBw4ECqVCs7Ozpg+fTpKS0trvA6FhYXYtWsXgoODdW1CCMyfPx8uLi6wtLREu3btMH36dL1zGjFiBNatW1fjcYmkggGISGKGDBmC5ORk3ffJyckYPHgwBg0apGu/fv069u3bpwtAN2/exMKFC3H48GF8//33OHv2LEJDQ+tcw4YNG1BeXo4333yz2u01zdPZvHkzZsyYgZkzZyIzMxMvvvgiwsLCdHVrtVoMHz4cv//+OxISEpCVlYXFixdDoVBUOdb58+cxcOBAeHt747vvvoOlpWWdz+dOH3zwAfr374+DBw/isccew7PPPouQkBBMnDgRGRkZ6NChA0JCQlD5WdSnTp1CUFAQxowZgyNHjmD9+vXYs2cPwsPDa3yPPXv2oEWLFnjooYd0bRs3bsQHH3yATz/9FCdOnMD333+Prl276u3n4+OD3377zWjnStRomffD6Imovq1cuVK0bNlS3Lx5UxQXF4tmzZqJgoICsXbtWuHv7y+EECIpKUkAEH///Xe1x9i/f78AIEpKSoQQQiQnJwsA4vLly0IIIeLj44Vara6xhqlTpwobG5t71nrncfr16ycmT56s1+epp54SI0aMEEIIsX37diGXy0V2dvZdj3f8+HHh7Owspk+fLrRa7T3rEEKIqKgo0b179yrtgwYNEjNmzNB97+rqKiZOnKj7Pjc3VwAQc+fO1bWlpqYKACI3N1cIIcTzzz8vpkyZonfc3377TcjlcnH9+vVq6/nggw+Eh4eHXtv7778vOnbsKMrLy2s8jx9++EHI5XKh0Whq7EMkBRwBIpKYwYMHo7S0FPv378dvv/2Gjh07ws7ODoMGDdLNA0pJSYGHhwdcXFwAAOnp6QgODoaLiwusra0xaNAgAMC5c+fqVIMQAjKZzOD9jh07hv79++u19e/fH8eOHQMAHDp0CA888AA6duxY4zGuX7+OgQMH4sknn8SHH35YpzrupVu3brqvHRwcAEBvJKayrfI24uHDh7FmzRpYWVnpXoGBgdBqtThz5kyN56FUKvXannrqKVy/fh0eHh6YPHkyNm/erDfhHQBUKhW0Wm2VW35EUsMARCQxnp6eeOCBB5CcnIzk5GRdmGnXrh2cnZ2xd+9eJCcn45FHHgEAlJaWIjAwEDY2Nvj666+xf/9+bN68GQBQXl5epxo6duyIoqIi5ObmGuek/o9KpbpnH0tLSwQEBODHH3802dNmzZs3131dGbCqa6t8Guvq1at48cUXcejQId3r8OHDOHHiRI2T0G1tbXH58mW9NmdnZ2RnZ2PZsmVQqVR4+eWX4e/vj5s3b+r6XLp0CS1btqzVtSJqyhiAiCRoyJAhSElJQUpKit7j7/7+/ti2bRvS0tJ083+OHz+OixcvYvHixRg4cCA6d+58XxOgAeD//b//BwsLC/znP/+pdnvlekJ3euihh/D777/rtf3+++/w8vICcGvk5Z9//sFff/1V43vL5XJ89dVX6N27N4YMGYJ///23bidhRL169UJWVhY8PT2rvCwsLKrdp2fPnsjLy6sSglQqFYKDg/HRRx8hJSUFqampOHr0qG57ZmYmevbsadLzIWoMmpm7ACKqf0OGDMG0adNw8+ZN3QgQAAwaNAjh4eEoLy/XBSAXFxdYWFjg448/xksvvYTMzMz7XvfG2dkZH3zwAcLDw1FcXIyQkBC4ubnhn3/+wZdffgkrK6tqH4V/44038PTTT6Nnz54ICAjA1q1bsWnTJuzcuVNXv7+/P8aMGYPY2Fh4enri+PHjkMlkCAoK0h1HoVDg66+/xrhx4/DII48gJSUFjo6O93VO9+Ott97Cww8/jPDwcLzwwgto2bIlsrKysGPHDixdurTafXr27AlbW1v8/vvvePzxxwHcWuhRo9HA19cXLVq0QEJCAlQqFVxdXXX7/fbbbxg2bFi9nBdRQ8YRICIJGjJkCK5fvw5PT0/dfBTgVoAoKSnRPS4PAHZ2dlizZg02bNgALy8vLF68GO+999591/Dyyy/jl19+QU5ODkaPHo3OnTvjhRdegI2NDV5//fVq9xk1ahQ+/PBDvPfee+jSpQs+/fRTxMfH641ibdy4EX379sW4cePg5eWFN998ExqNpsqxmjVrhm+++QZdunTBI488ct+jWvejW7du2L17N/766y8MHDgQPXv2xLx589CuXbsa91EoFAgLC8PXX3+ta2vVqhVWrlyJ/v37o1u3bti5cye2bt2Ktm3bAgBycnKwd+9ehIWFmfyciBo6mRD/9xwmERE1Knl5eejSpQsyMjL0Rnlq8tZbb+Hy5cv47LPP6qE6ooaNI0BERI2Uo6MjVq1aVeun8ezt7c32sR1EDQ1HgIiIiEhyOAJEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESSwwBEREREksMARERERJLDAERERESS8/8B3shQ91hUDpYAAAAASUVORK5CYII=",
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"plt.title('Learning Curve')\n",
"plt.xlabel('Wall Clock Time (s)')\n",
"plt.ylabel('Validation Accuracy')\n",
"print(len(valid_loss_history))\n",
"plt.scatter(time_history, 1 - np.array(valid_loss_history))\n",
"plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "xudzM73mTjhI"
},
"source": [
"## 3. Model selection"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "A3gC3u_E4cO1"
},
"source": [
"Given a dataset, which language model should you use for the fine tuning? It appears this is a simple question: just choose the best model according to the benchmarks such as [GLUE](https://gluebenchmark.com/leaderboard). However, we will see that under the resource constraints, the model selection is non trivial. \n",
"\n",
"In this example, we will tune the [spooky-author-identification](https://www.kaggle.com/competitions/spooky-author-identification/data?select=train.zip) dataset from kaggle. You can download the dataset from the [here](https://drive.google.com/file/d/1Jk-_Vg_SxOUDfFVzF7S85oBasY8fFvOY/view?usp=sharing) and upload it to Colab. The following command also downloads the file. We run FLAML for 30 mins using bert."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Bty5Qz3x_OzJ",
"outputId": "8a135114-7367-40a3-a383-ebb891e1f019"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Downloading...\n",
"From: https://drive.google.com/uc?id=1Jk-_Vg_SxOUDfFVzF7S85oBasY8fFvOY\n",
"To: /content/spooky-author-identification.csv\n",
"\r\n",
" 0% 0.00/3.30M [00:00<?, ?B/s]\r\n",
"100% 3.30M/3.30M [00:00<00:00, 79.7MB/s]\n"
]
}
],
"source": [
"!gdown 1Jk-_Vg_SxOUDfFVzF7S85oBasY8fFvOY"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "HjvdojhfTjhI",
"outputId": "954629d5-46a0-4341-d0b4-5e4355ad2bdf"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:19:25] {1768} INFO - task = seq-classification\n",
"[flaml.automl.logger: 04-12 17:19:25] {1775} INFO - Data split method: stratified\n",
"[flaml.automl.logger: 04-12 17:19:25] {1778} INFO - Evaluation method: holdout\n",
"[flaml.automl.logger: 04-12 17:19:25] {1891} INFO - Minimizing error metric: 1-accuracy\n",
"[flaml.automl.logger: 04-12 17:19:25] {2011} INFO - List of ML learners in AutoML Run: ['transformer']\n",
"[flaml.automl.logger: 04-12 17:19:25] {2341} INFO - iteration 0, current learner transformer\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "54fca810bfe14b46a0a1ae57821f3391",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)okenizer_config.json: 0%| | 0.00/28.0 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1547b188bb9f42d59984856a250bbd96",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)lve/main/config.json: 0%| | 0.00/570 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "45b9766f23084827814f41ed85d0ea95",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)solve/main/vocab.txt: 0%| | 0.00/232k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b37ea6febb1e4ef886fa613182b42331",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)/main/tokenizer.json: 0%| | 0.00/466k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "7923deb016734e3d8167ba2642e697c4",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading pytorch_model.bin: 0%| | 0.00/440M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.8885396122932434, 'eval_automl_metric': 0.3881511746680286, 'eval_runtime': 63.5955, 'eval_samples_per_second': 76.971, 'eval_steps_per_second': 76.971, 'epoch': 0.3}\n",
"{'train_runtime': 138.5184, 'train_samples_per_second': 31.802, 'train_steps_per_second': 0.996, 'train_loss': 0.9821738643922667, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:23:29] {2479} INFO - Estimated sufficient time budget=2440736s. Estimated necessary time budget=2441s.\n",
"[flaml.automl.logger: 04-12 17:23:29] {2526} INFO - at 244.1s,\testimator transformer's best error=0.3882,\tbest estimator transformer's best error=0.3882\n",
"[flaml.automl.logger: 04-12 17:23:29] {2341} INFO - iteration 1, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.9504787921905518, 'eval_automl_metric': 0.454341164453524, 'eval_runtime': 63.833, 'eval_samples_per_second': 76.685, 'eval_steps_per_second': 76.685, 'epoch': 0.3}\n",
"{'train_runtime': 143.6886, 'train_samples_per_second': 30.658, 'train_steps_per_second': 0.48, 'train_loss': 1.0153502312259397, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:27:17] {2526} INFO - at 472.5s,\testimator transformer's best error=0.3882,\tbest estimator transformer's best error=0.3882\n",
"[flaml.automl.logger: 04-12 17:27:17] {2341} INFO - iteration 2, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.7725200057029724, 'eval_automl_metric': 0.31624106230847804, 'eval_runtime': 64.172, 'eval_samples_per_second': 76.279, 'eval_steps_per_second': 76.279, 'epoch': 0.3}\n",
"{'train_runtime': 136.3992, 'train_samples_per_second': 32.296, 'train_steps_per_second': 2.023, 'train_loss': 0.9211070848547894, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:30:57] {2526} INFO - at 691.8s,\testimator transformer's best error=0.3162,\tbest estimator transformer's best error=0.3162\n",
"[flaml.automl.logger: 04-12 17:30:57] {2341} INFO - iteration 3, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.7185199856758118, 'eval_automl_metric': 0.2858018386108274, 'eval_runtime': 63.1708, 'eval_samples_per_second': 77.488, 'eval_steps_per_second': 77.488, 'epoch': 0.3}\n",
"{'train_runtime': 136.3072, 'train_samples_per_second': 32.318, 'train_steps_per_second': 2.025, 'train_loss': 0.8806653230086617, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:34:34] {2526} INFO - at 908.9s,\testimator transformer's best error=0.2858,\tbest estimator transformer's best error=0.2858\n",
"[flaml.automl.logger: 04-12 17:34:34] {2341} INFO - iteration 4, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.7593, 'learning_rate': 4.688468079515019e-06, 'epoch': 0.54}\n",
"{'eval_loss': 0.48282745480537415, 'eval_automl_metric': 0.18263534218590394, 'eval_runtime': 62.0311, 'eval_samples_per_second': 78.912, 'eval_steps_per_second': 78.912, 'epoch': 1.0}\n",
"{'train_runtime': 299.9815, 'train_samples_per_second': 48.95, 'train_steps_per_second': 3.06, 'train_loss': 0.6506855950116591, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:40:55] {2526} INFO - at 1289.8s,\testimator transformer's best error=0.1826,\tbest estimator transformer's best error=0.1826\n",
"[flaml.automl.logger: 04-12 17:40:55] {2341} INFO - iteration 5, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.6676, 'learning_rate': 8.147241469004167e-06, 'epoch': 0.54}\n",
"{'eval_loss': 0.3991524279117584, 'eval_automl_metric': 0.1542390194075587, 'eval_runtime': 61.4178, 'eval_samples_per_second': 79.7, 'eval_steps_per_second': 79.7, 'epoch': 1.0}\n",
"{'train_runtime': 299.7831, 'train_samples_per_second': 48.982, 'train_steps_per_second': 3.062, 'train_loss': 0.5576270442123247, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:47:16] {2526} INFO - at 1671.6s,\testimator transformer's best error=0.1542,\tbest estimator transformer's best error=0.1542\n",
"[flaml.automl.logger: 04-12 17:47:16] {2642} INFO - selected model: None\n",
"[flaml.automl.logger: 04-12 17:47:16] {2041} INFO - fit succeeded\n",
"[flaml.automl.logger: 04-12 17:47:16] {2042} INFO - Time taken to find the best model: 1671.5927600860596\n",
"[flaml.automl.logger: 04-12 17:47:16] {2054} WARNING - Time taken to find the best model is 93% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
"source": [
"from flaml import AutoML\n",
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"df = pd.read_csv('/content/spooky-author-identification.csv')\n",
"X, y = df.drop('author', axis=1), df['author']\n",
"\n",
"X_train, X_val, y_train, y_val = train_test_split(X, y, random_state=123)\n",
"automl_model = AutoML()\n",
"\n",
"automl_settings = {\n",
" \"time_budget\": 1800, \n",
" \"task\": \"seq-classification\", \n",
" \"fit_kwargs_by_estimator\": {\n",
" \"transformer\": {\n",
" \"output_dir\": \"data/output/\", \n",
" \"model_path\": \"bert-base-uncased\", \n",
" }\n",
" },\n",
" \"metric\": \"accuracy\",\n",
" \"gpu_per_trial\": 1, \n",
" \"log_file_name\": \"spooky_bert.log\", \n",
" \"log_type\": \"all\", \n",
" \"use_ray\": False, # set whether to use Ray\n",
" \"n_concurrent_trials\": 1,\n",
" \"keep_search_state\": True, # keeping the search state\n",
"}\n",
"\n",
"from flaml import tune\n",
"custom_hp = {\n",
" \"transformer\": {\n",
" \"num_train_epochs\": {\n",
" \"domain\": tune.choice([0.3, 1, 2, 3, 4, 5]),\n",
" \"init_value\": 0.3, \n",
" \"low_cost_init_value\": 0.3,\n",
" },\n",
" }\n",
"}\n",
"\n",
"automl_model.fit(X_train=X_train, y_train=y_train,X_val=X_val, y_val=y_val, custom_hp=custom_hp, **automl_settings)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "9jZiKSU75jjl"
},
"source": [
"The job ran for 23m and searched for 4 trials. This time is shorter than our budget 30m because FLAML early stops the last trial which will run for too long. If you want to run for longer time, set a larger time budget. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xpA-rzYzTjhI",
"outputId": "00e69a54-b44e-41f0-ffda-14e3a9fe45c5"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"the best loss for spooky author identification: 0.1542390194075587\n"
]
}
],
"source": [
"print(\"the best loss for spooky author identification: {}\".format(automl_model.best_loss))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "TzDjaBTA6ZaD"
},
"source": [
"Next, we set the model to roberta and run again. RoBERTa outperforms BERT by 15% on the [SuperGLUE](https://super.gluebenchmark.com/) benchmark, as well as [GLUE](https://gluebenchmark.com/), [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/), [RACE](https://www.cs.cmu.edu/~glai1/data/race/), etc. Does this mean we should always use RoBERTa and never use BERT? To answer this question, we run the same experiment again with RoBERTa:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "6MTZCJz1TjhJ",
"outputId": "003254b1-149f-4158-d11c-135bfa4dae09"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:48:38] {1768} INFO - task = seq-classification\n",
"[flaml.automl.logger: 04-12 17:48:38] {1775} INFO - Data split method: stratified\n",
"[flaml.automl.logger: 04-12 17:48:38] {1778} INFO - Evaluation method: holdout\n",
"[flaml.automl.logger: 04-12 17:48:38] {1891} INFO - Minimizing error metric: 1-accuracy\n",
"[flaml.automl.logger: 04-12 17:48:38] {2011} INFO - List of ML learners in AutoML Run: ['transformer']\n",
"[flaml.automl.logger: 04-12 17:48:38] {2341} INFO - iteration 0, current learner transformer\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "9f296e6acfc840aa9c44e432e4f123cf",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)lve/main/config.json: 0%| | 0.00/481 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "d9d95117a756415d9decc16facacdab5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)olve/main/vocab.json: 0%| | 0.00/899k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e71a4827c72c48778510a63e517470f8",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)olve/main/merges.txt: 0%| | 0.00/456k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "8faac462dd7e4798854b167be7f05201",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)/main/tokenizer.json: 0%| | 0.00/1.36M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "0aac55f49a9443aa8890d4874e8e5a86",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading pytorch_model.bin: 0%| | 0.00/501M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.8735764622688293, 'eval_automl_metric': 0.34811031664964254, 'eval_runtime': 62.4127, 'eval_samples_per_second': 78.43, 'eval_steps_per_second': 78.43, 'epoch': 0.3}\n",
"{'train_runtime': 141.5981, 'train_samples_per_second': 31.111, 'train_steps_per_second': 0.975, 'train_loss': 1.0305425671563633, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:52:30] {2479} INFO - Estimated sufficient time budget=2324423s. Estimated necessary time budget=2324s.\n",
"[flaml.automl.logger: 04-12 17:52:30] {2526} INFO - at 232.6s,\testimator transformer's best error=0.3481,\tbest estimator transformer's best error=0.3481\n",
"[flaml.automl.logger: 04-12 17:52:30] {2341} INFO - iteration 1, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 1.0598695278167725, 'eval_automl_metric': 0.6020429009193053, 'eval_runtime': 61.0626, 'eval_samples_per_second': 80.164, 'eval_steps_per_second': 80.164, 'epoch': 0.3}\n",
"{'train_runtime': 138.5775, 'train_samples_per_second': 31.789, 'train_steps_per_second': 0.498, 'train_loss': 1.0830751501995584, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:56:12] {2526} INFO - at 454.0s,\testimator transformer's best error=0.3481,\tbest estimator transformer's best error=0.3481\n",
"[flaml.automl.logger: 04-12 17:56:12] {2341} INFO - iteration 2, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.6515682339668274, 'eval_automl_metric': 0.2645556690500511, 'eval_runtime': 60.5612, 'eval_samples_per_second': 80.827, 'eval_steps_per_second': 80.827, 'epoch': 0.3}\n",
"{'train_runtime': 136.2654, 'train_samples_per_second': 32.328, 'train_steps_per_second': 2.025, 'train_loss': 0.8835090968919836, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 17:59:50] {2526} INFO - at 672.5s,\testimator transformer's best error=0.2646,\tbest estimator transformer's best error=0.2646\n",
"[flaml.automl.logger: 04-12 17:59:50] {2341} INFO - iteration 3, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.5933533906936646, 'eval_automl_metric': 0.24147088866189992, 'eval_runtime': 62.0616, 'eval_samples_per_second': 78.873, 'eval_steps_per_second': 78.873, 'epoch': 0.3}\n",
"{'train_runtime': 138.0465, 'train_samples_per_second': 31.911, 'train_steps_per_second': 1.999, 'train_loss': 0.8010869786359262, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:03:31] {2526} INFO - at 893.5s,\testimator transformer's best error=0.2415,\tbest estimator transformer's best error=0.2415\n",
"[flaml.automl.logger: 04-12 18:03:31] {2341} INFO - iteration 4, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.737, 'learning_rate': 4.688468079515019e-06, 'epoch': 0.54}\n",
"{'eval_loss': 0.4920736253261566, 'eval_automl_metric': 0.192849846782431, 'eval_runtime': 61.6342, 'eval_samples_per_second': 79.42, 'eval_steps_per_second': 79.42, 'epoch': 1.0}\n",
"{'train_runtime': 304.3584, 'train_samples_per_second': 48.246, 'train_steps_per_second': 3.016, 'train_loss': 0.6382612340590533, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:09:59] {2526} INFO - at 1280.8s,\testimator transformer's best error=0.1928,\tbest estimator transformer's best error=0.1928\n",
"[flaml.automl.logger: 04-12 18:09:59] {2341} INFO - iteration 5, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.6672, 'learning_rate': 8.147241469004167e-06, 'epoch': 0.54}\n",
"{'eval_loss': 0.44737380743026733, 'eval_automl_metric': 0.17568947906026555, 'eval_runtime': 64.4479, 'eval_samples_per_second': 75.953, 'eval_steps_per_second': 75.953, 'epoch': 1.0}\n",
"{'train_runtime': 307.2349, 'train_samples_per_second': 47.794, 'train_steps_per_second': 2.988, 'train_loss': 0.58052682980473, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:16:30] {2526} INFO - at 1672.1s,\testimator transformer's best error=0.1757,\tbest estimator transformer's best error=0.1757\n",
"[flaml.automl.logger: 04-12 18:16:30] {2642} INFO - selected model: None\n",
"[flaml.automl.logger: 04-12 18:16:30] {2041} INFO - fit succeeded\n",
"[flaml.automl.logger: 04-12 18:16:30] {2042} INFO - Time taken to find the best model: 1672.051875114441\n",
"[flaml.automl.logger: 04-12 18:16:30] {2054} WARNING - Time taken to find the best model is 93% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
"source": [
"automl_settings[\"fit_kwargs_by_estimator\"][\"transformer\"][\"model_path\"] = \"roberta-base\"\n",
"automl_settings[\"log_file_name\"] = \"spooky_roberta.log\"\n",
"automl_model.fit(X_train=X_train, y_train=y_train,X_val=X_val, y_val=y_val, custom_hp=custom_hp, **automl_settings)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "eJygKvYzkHwQ",
"outputId": "792398d5-74d8-446d-c0a2-1210ea25b0e0"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:18:23] {1768} INFO - task = seq-classification\n",
"[flaml.automl.logger: 04-12 18:18:23] {1775} INFO - Data split method: stratified\n",
"[flaml.automl.logger: 04-12 18:18:23] {1778} INFO - Evaluation method: holdout\n",
"[flaml.automl.logger: 04-12 18:18:23] {1891} INFO - Minimizing error metric: 1-accuracy\n",
"[flaml.automl.logger: 04-12 18:18:23] {2011} INFO - List of ML learners in AutoML Run: ['transformer_ms']\n",
"[flaml.automl.logger: 04-12 18:18:23] {2341} INFO - iteration 0, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.8885396122932434, 'eval_automl_metric': 0.3881511746680286, 'eval_runtime': 60.8992, 'eval_samples_per_second': 80.379, 'eval_steps_per_second': 80.379, 'epoch': 0.3}\n",
"{'train_runtime': 135.217, 'train_samples_per_second': 32.579, 'train_steps_per_second': 1.021, 'train_loss': 0.9821738643922667, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:22:02] {2479} INFO - Estimated sufficient time budget=2181390s. Estimated necessary time budget=2181s.\n",
"[flaml.automl.logger: 04-12 18:22:02] {2526} INFO - at 218.2s,\testimator transformer_ms's best error=0.3882,\tbest estimator transformer_ms's best error=0.3882\n",
"[flaml.automl.logger: 04-12 18:22:02] {2341} INFO - iteration 1, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.9509267807006836, 'eval_automl_metric': 0.4549540347293156, 'eval_runtime': 61.6199, 'eval_samples_per_second': 79.439, 'eval_steps_per_second': 79.439, 'epoch': 0.3}\n",
"{'train_runtime': 141.4275, 'train_samples_per_second': 31.148, 'train_steps_per_second': 0.488, 'train_loss': 1.0141158173049705, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:25:43] {2526} INFO - at 439.0s,\testimator transformer_ms's best error=0.3882,\tbest estimator transformer_ms's best error=0.3882\n",
"[flaml.automl.logger: 04-12 18:25:43] {2341} INFO - iteration 2, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.6531080007553101, 'eval_automl_metric': 0.2659856996935649, 'eval_runtime': 64.0156, 'eval_samples_per_second': 76.466, 'eval_steps_per_second': 76.466, 'epoch': 0.3}\n",
"{'train_runtime': 139.7816, 'train_samples_per_second': 31.515, 'train_steps_per_second': 1.975, 'train_loss': 0.8856382508208787, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:29:27] {2526} INFO - at 663.8s,\testimator transformer_ms's best error=0.2660,\tbest estimator transformer_ms's best error=0.2660\n",
"[flaml.automl.logger: 04-12 18:29:27] {2341} INFO - iteration 3, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.9636347889900208, 'eval_automl_metric': 0.4727272727272728, 'eval_runtime': 62.0469, 'eval_samples_per_second': 78.892, 'eval_steps_per_second': 78.892, 'epoch': 0.3}\n",
"{'train_runtime': 135.5242, 'train_samples_per_second': 32.505, 'train_steps_per_second': 2.037, 'train_loss': 1.0159390214560688, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:33:03] {2526} INFO - at 879.9s,\testimator transformer_ms's best error=0.2660,\tbest estimator transformer_ms's best error=0.2660\n",
"[flaml.automl.logger: 04-12 18:33:03] {2341} INFO - iteration 4, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.5451087951660156, 'eval_automl_metric': 0.21634320735444335, 'eval_runtime': 63.5443, 'eval_samples_per_second': 77.033, 'eval_steps_per_second': 77.033, 'epoch': 0.3}\n",
"{'train_runtime': 138.1395, 'train_samples_per_second': 31.889, 'train_steps_per_second': 1.998, 'train_loss': 0.735467551411062, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:36:49] {2526} INFO - at 1105.6s,\testimator transformer_ms's best error=0.2163,\tbest estimator transformer_ms's best error=0.2163\n",
"[flaml.automl.logger: 04-12 18:36:49] {2341} INFO - iteration 5, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.4679938852787018, 'eval_automl_metric': 0.18181818181818177, 'eval_runtime': 63.3825, 'eval_samples_per_second': 77.23, 'eval_steps_per_second': 77.23, 'epoch': 1.0}\n",
"{'train_runtime': 301.9871, 'train_samples_per_second': 48.625, 'train_steps_per_second': 1.52, 'train_loss': 0.6205861874914896, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:43:17] {2526} INFO - at 1493.3s,\testimator transformer_ms's best error=0.1818,\tbest estimator transformer_ms's best error=0.1818\n",
"[flaml.automl.logger: 04-12 18:43:17] {2341} INFO - iteration 6, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.5505358576774597, 'eval_automl_metric': 0.2140960163432074, 'eval_runtime': 62.2359, 'eval_samples_per_second': 78.652, 'eval_steps_per_second': 78.652, 'epoch': 0.3}\n",
"{'train_runtime': 136.4186, 'train_samples_per_second': 32.292, 'train_steps_per_second': 2.023, 'train_loss': 0.7632542485776155, 'epoch': 0.3}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:46:54] {2526} INFO - at 1710.6s,\testimator transformer_ms's best error=0.1818,\tbest estimator transformer_ms's best error=0.1818\n",
"[flaml.automl.logger: 04-12 18:46:54] {2341} INFO - iteration 7, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.4987771511077881, 'eval_automl_metric': 0.20081716036772213, 'eval_runtime': 63.2947, 'eval_samples_per_second': 77.337, 'eval_steps_per_second': 77.337, 'epoch': 1.0}\n",
"{'train_runtime': 302.7583, 'train_samples_per_second': 48.501, 'train_steps_per_second': 1.516, 'train_loss': 0.6465008638003813, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:53:22] {2526} INFO - at 2098.8s,\testimator transformer_ms's best error=0.1818,\tbest estimator transformer_ms's best error=0.1818\n",
"[flaml.automl.logger: 04-12 18:53:22] {2341} INFO - iteration 8, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.4121459722518921, 'eval_automl_metric': 0.15955056179775284, 'eval_runtime': 63.1345, 'eval_samples_per_second': 77.533, 'eval_steps_per_second': 77.533, 'epoch': 1.0}\n",
"{'train_runtime': 302.9621, 'train_samples_per_second': 48.468, 'train_steps_per_second': 1.515, 'train_loss': 0.5716960965158633, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 18:59:46] {2526} INFO - at 2482.5s,\testimator transformer_ms's best error=0.1596,\tbest estimator transformer_ms's best error=0.1596\n",
"[flaml.automl.logger: 04-12 18:59:46] {2341} INFO - iteration 9, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.47075968980789185, 'eval_automl_metric': 0.18733401430030638, 'eval_runtime': 64.0365, 'eval_samples_per_second': 76.441, 'eval_steps_per_second': 76.441, 'epoch': 1.0}\n",
"{'train_runtime': 302.2398, 'train_samples_per_second': 48.584, 'train_steps_per_second': 1.519, 'train_loss': 0.6250811142599401, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 19:06:14] {2526} INFO - at 2870.2s,\testimator transformer_ms's best error=0.1596,\tbest estimator transformer_ms's best error=0.1596\n",
"[flaml.automl.logger: 04-12 19:06:14] {2341} INFO - iteration 10, current learner transformer_ms\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 0.38394054770469666, 'eval_automl_metric': 0.1491317671092952, 'eval_runtime': 62.4366, 'eval_samples_per_second': 78.4, 'eval_steps_per_second': 78.4, 'epoch': 1.0}\n",
"{'train_runtime': 300.1761, 'train_samples_per_second': 48.918, 'train_steps_per_second': 1.529, 'train_loss': 0.5415585918883612, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 19:12:35] {2526} INFO - at 3252.0s,\testimator transformer_ms's best error=0.1491,\tbest estimator transformer_ms's best error=0.1491\n",
"[flaml.automl.logger: 04-12 19:12:35] {2642} INFO - selected model: None\n",
"[flaml.automl.logger: 04-12 19:12:35] {2041} INFO - fit succeeded\n",
"[flaml.automl.logger: 04-12 19:12:35] {2042} INFO - Time taken to find the best model: 3251.999900817871\n",
"[flaml.automl.logger: 04-12 19:12:35] {2054} WARNING - Time taken to find the best model is 90% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
"source": [
"automl_settings[\"time_budget\"] = 3600\n",
"automl_settings[\"estimator_list\"] = [\"transformer_ms\"]\n",
"automl_settings[\"log_file_name\"] = \"spooky_ms.log\" \n",
"automl_settings[\"fit_kwargs_by_estimator\"] = { \n",
" \"transformer_ms\": {\n",
" \"output_dir\": \"data/output/\" \n",
" }\n",
"} \n",
"\n",
"from flaml import tune\n",
"\n",
"custom_hp = {\n",
" \"transformer_ms\": {\n",
" \"model_path\": {\n",
" \"domain\": tune.choice([\"bert-base-uncased\", \"roberta-base\"]),\n",
" \"init_value\": \"bert-base-uncased\"\n",
" },\n",
" \"num_train_epochs\": {\n",
" \"domain\": tune.choice([0.3, 1, 2, 3, 4, 5]),\n",
" \"init_value\": 0.3, \n",
" \"low_cost_init_value\": 0.3,\n",
" },\n",
" }\n",
"}\n",
"\n",
"automl_model.fit(X_train=X_train, y_train=y_train,X_val=X_val, y_val=y_val, custom_hp=custom_hp, **automl_settings)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "58KIeU-xyj13",
"outputId": "50801d44-8fb7-4e9b-f566-40cf764cea0b"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"record_id\": 0, \"iter_per_learner\": 1, \"logged_metric\": {\"pred_time\": 0.01281316652970124, \"intermediate_results\": [{\"eval_loss\": 0.8885396122932434, \"eval_automl_metric\": 0.3881511746680286, \"eval_runtime\": 60.8992, \"eval_samples_per_second\": 80.379, \"eval_steps_per_second\": 80.379, \"epoch\": 0.3, \"train_runtime\": 135.217, \"train_samples_per_second\": 32.579, \"train_steps_per_second\": 1.021, \"train_loss\": 0.9821738643922667}]}, \"trial_time\": 218.13677191734314, \"wall_clock_time\": 218.22574067115784, \"validation_loss\": 0.3881511746680286, \"config\": {\"learning_rate\": 9.999999999999999e-06, \"num_train_epochs\": 0.3, \"per_device_train_batch_size\": 32, \"seed\": 20, \"global_max_steps\": 138, \"model_path\": \"bert-base-uncased\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 1, \"iter_per_learner\": 2, \"logged_metric\": {\"pred_time\": 0.013203539862939602, \"intermediate_results\": [{\"eval_loss\": 0.9509267807006836, \"eval_automl_metric\": 0.4549540347293156, \"eval_runtime\": 61.6199, \"eval_samples_per_second\": 79.439, \"eval_steps_per_second\": 79.439, \"epoch\": 0.3, \"train_runtime\": 141.4275, \"train_samples_per_second\": 31.148, \"train_steps_per_second\": 0.488, \"train_loss\": 1.0141158173049705}]}, \"trial_time\": 220.74192595481873, \"wall_clock_time\": 438.9754583835602, \"validation_loss\": 0.4549540347293156, \"config\": {\"learning_rate\": 9.706892218498696e-06, \"num_train_epochs\": 0.3, \"per_device_train_batch_size\": 64, \"seed\": 14, \"global_max_steps\": 69, \"model_path\": \"bert-base-uncased\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 2, \"iter_per_learner\": 3, \"logged_metric\": {\"pred_time\": 0.013624806691482435, \"intermediate_results\": [{\"eval_loss\": 0.6531080007553101, \"eval_automl_metric\": 0.2659856996935649, \"eval_runtime\": 64.0156, \"eval_samples_per_second\": 76.466, \"eval_steps_per_second\": 76.466, \"epoch\": 0.3, \"train_runtime\": 139.7816, \"train_samples_per_second\": 31.515, \"train_steps_per_second\": 1.975, \"train_loss\": 0.8856382508208787}]}, \"trial_time\": 224.63303184509277, \"wall_clock_time\": 663.8141267299652, \"validation_loss\": 0.2659856996935649, \"config\": {\"learning_rate\": 1.0301958417692867e-05, \"num_train_epochs\": 0.3, \"per_device_train_batch_size\": 16, \"seed\": 26, \"global_max_steps\": 276, \"model_path\": \"roberta-base\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 3, \"iter_per_learner\": 4, \"logged_metric\": {\"pred_time\": 0.01327014427262989, \"intermediate_results\": [{\"eval_loss\": 0.9636347889900208, \"eval_automl_metric\": 0.4727272727272728, \"eval_runtime\": 62.0469, \"eval_samples_per_second\": 78.892, \"eval_steps_per_second\": 78.892, \"epoch\": 0.3, \"train_runtime\": 135.5242, \"train_samples_per_second\": 32.505, \"train_steps_per_second\": 2.037, \"train_loss\": 1.0159390214560688}]}, \"trial_time\": 215.93403434753418, \"wall_clock_time\": 879.8893353939056, \"validation_loss\": 0.4727272727272728, \"config\": {\"learning_rate\": 4.403698954265022e-06, \"num_train_epochs\": 0.3, \"per_device_train_batch_size\": 16, \"seed\": 31, \"global_max_steps\": 276, \"model_path\": \"bert-base-uncased\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 4, \"iter_per_learner\": 5, \"logged_metric\": {\"pred_time\": 0.013769044720237662, \"intermediate_results\": [{\"eval_loss\": 0.5451087951660156, \"eval_automl_metric\": 0.21634320735444335, \"eval_runtime\": 63.5443, \"eval_samples_per_second\": 77.033, \"eval_steps_per_second\": 77.033, \"epoch\": 0.3, \"train_runtime\": 138.1395, \"train_samples_per_second\": 31.889, \"train_steps_per_second\": 1.998, \"train_loss\": 0.735467551411062}]}, \"trial_time\": 225.4983992576599, \"wall_clock_time\": 1105.595395565033, \"validation_loss\": 0.21634320735444335, \"config\": {\"learning_rate\": 2.4100273052744602e-05, \"num_train_epochs\": 0.3, \"per_device_train_batch_size\": 16, \"seed\": 21, \"global_max_steps\": 276, \"model_path\": \"roberta-base\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 5, \"iter_per_learner\": 6, \"logged_metric\": {\"pred_time\": 0.013779218009348665, \"intermediate_results\": [{\"eval_loss\": 0.4679938852787018, \"eval_automl_metric\": 0.18181818181818177, \"eval_runtime\": 63.3825, \"eval_samples_per_second\": 77.23, \"eval_steps_per_second\": 77.23, \"epoch\": 1.0, \"train_runtime\": 301.9871, \"train_samples_per_second\": 48.625, \"train_steps_per_second\": 1.52, \"train_loss\": 0.6205861874914896}]}, \"trial_time\": 387.57434844970703, \"wall_clock_time\": 1493.3338241577148, \"validation_loss\": 0.18181818181818177, \"config\": {\"learning_rate\": 1.85469436732702e-05, \"num_train_epochs\": 1, \"per_device_train_batch_size\": 32, \"seed\": 19, \"global_max_steps\": 459, \"model_path\": \"roberta-base\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 6, \"iter_per_learner\": 7, \"logged_metric\": {\"pred_time\": 0.013360443982211026, \"intermediate_results\": [{\"eval_loss\": 0.5505358576774597, \"eval_automl_metric\": 0.2140960163432074, \"eval_runtime\": 62.2359, \"eval_samples_per_second\": 78.652, \"eval_steps_per_second\": 78.652, \"epoch\": 0.3, \"train_runtime\": 136.4186, \"train_samples_per_second\": 32.292, \"train_steps_per_second\": 2.023, \"train_loss\": 0.7632542485776155}]}, \"trial_time\": 217.12110829353333, \"wall_clock_time\": 1710.5791184902191, \"validation_loss\": 0.2140960163432074, \"config\": {\"learning_rate\": 2.4100273052744602e-05, \"num_train_epochs\": 0.3, \"per_device_train_batch_size\": 16, \"seed\": 21, \"global_max_steps\": 276, \"model_path\": \"bert-base-uncased\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 7, \"iter_per_learner\": 8, \"logged_metric\": {\"pred_time\": 0.013658072293352667, \"intermediate_results\": [{\"eval_loss\": 0.4987771511077881, \"eval_automl_metric\": 0.20081716036772213, \"eval_runtime\": 63.2947, \"eval_samples_per_second\": 77.337, \"eval_steps_per_second\": 77.337, \"epoch\": 1.0, \"train_runtime\": 302.7583, \"train_samples_per_second\": 48.501, \"train_steps_per_second\": 1.516, \"train_loss\": 0.6465008638003813}]}, \"trial_time\": 387.9533214569092, \"wall_clock_time\": 2098.8422219753265, \"validation_loss\": 0.20081716036772213, \"config\": {\"learning_rate\": 1.3298483157591481e-05, \"num_train_epochs\": 1, \"per_device_train_batch_size\": 32, \"seed\": 11, \"global_max_steps\": 459, \"model_path\": \"roberta-base\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 8, \"iter_per_learner\": 9, \"logged_metric\": {\"pred_time\": 0.013322489303027768, \"intermediate_results\": [{\"eval_loss\": 0.4121459722518921, \"eval_automl_metric\": 0.15955056179775284, \"eval_runtime\": 63.1345, \"eval_samples_per_second\": 77.533, \"eval_steps_per_second\": 77.533, \"epoch\": 1.0, \"train_runtime\": 302.9621, \"train_samples_per_second\": 48.468, \"train_steps_per_second\": 1.515, \"train_loss\": 0.5716960965158633}]}, \"trial_time\": 383.42229533195496, \"wall_clock_time\": 2482.548007249832, \"validation_loss\": 0.15955056179775284, \"config\": {\"learning_rate\": 2.586679364428794e-05, \"num_train_epochs\": 1, \"per_device_train_batch_size\": 32, \"seed\": 27, \"global_max_steps\": 459, \"model_path\": \"bert-base-uncased\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 9, \"iter_per_learner\": 10, \"logged_metric\": {\"pred_time\": 0.013703715837768929, \"intermediate_results\": [{\"eval_loss\": 0.47075968980789185, \"eval_automl_metric\": 0.18733401430030638, \"eval_runtime\": 64.0365, \"eval_samples_per_second\": 76.441, \"eval_steps_per_second\": 76.441, \"epoch\": 1.0, \"train_runtime\": 302.2398, \"train_samples_per_second\": 48.584, \"train_steps_per_second\": 1.519, \"train_loss\": 0.6250811142599401}]}, \"trial_time\": 387.52638721466064, \"wall_clock_time\": 2870.2234270572662, \"validation_loss\": 0.18733401430030638, \"config\": {\"learning_rate\": 1.7750603229357797e-05, \"num_train_epochs\": 1, \"per_device_train_batch_size\": 32, \"seed\": 21, \"global_max_steps\": 459, \"model_path\": \"roberta-base\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"record_id\": 10, \"iter_per_learner\": 11, \"logged_metric\": {\"pred_time\": 0.013274957502947642, \"intermediate_results\": [{\"eval_loss\": 0.38394054770469666, \"eval_automl_metric\": 0.1491317671092952, \"eval_runtime\": 62.4366, \"eval_samples_per_second\": 78.4, \"eval_steps_per_second\": 78.4, \"epoch\": 1.0, \"train_runtime\": 300.1761, \"train_samples_per_second\": 48.918, \"train_steps_per_second\": 1.529, \"train_loss\": 0.5415585918883612}]}, \"trial_time\": 381.45426845550537, \"wall_clock_time\": 3251.999900817871, \"validation_loss\": 0.1491317671092952, \"config\": {\"learning_rate\": 3.7693987341768903e-05, \"num_train_epochs\": 1, \"per_device_train_batch_size\": 32, \"seed\": 33, \"global_max_steps\": 459, \"model_path\": \"bert-base-uncased\"}, \"learner\": \"transformer_ms\", \"sample_size\": 14684}\n",
"\n",
"{\"curr_best_record_id\": 10}\n",
"\n"
]
}
],
"source": [
"with open(\"spooky_ms.log\", \"r\") as fin:\n",
" for line in fin:\n",
" print(line)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ujti_Dih5_-3"
},
"source": [
"We plot the performance of BERT, RoBERTa, and model selection w.r.t. the wall clock time. We find two things: \n",
"\n",
"(1) although RoBERTa frequently outperforms BERT on benchmark datasets, its performance on the spooky-author-identification dataset is worse than BERT using the same time budget. Therefore, model selection is a non trivial problem;\n",
"\n",
"(2) by using FLAML's automated model selection, we are able to achieve a better performance than using just one model. Therefore, automated model selection is helpful;"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "rxV-dTUhxaQO",
"outputId": "4bc9e6b5-6fe7-4da8-9e64-8de2b47fd080"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"6\n",
"6\n",
"11\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAiwAAAGiCAYAAADEJZ3cAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABKXklEQVR4nO3dfVxUZcI//s/MMDPMJA8qMDwsgqZi5FOoEJquFQrVanbvd9fMIs2lVya9rMkUSyVsE9t+kVtrunerZnf3htVa2qqoUNiWKCW16qqUStIDDz7cgjjAPF2/P4jRkQFmcIY5MJ93L14x17nmnOtcHJwP5zrnOjIhhAARERGRhMm93QAiIiKizjCwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5HUpsKxduxaxsbHw9/dHUlISSktL261rMpmwcuVK3HjjjfD398eoUaNQUFBwXeskIiIi3+JyYNmyZQv0ej2ys7NRVlaGUaNGITU1FbW1tQ7rL1u2DH/961/x+uuv49ixY3jsscdw33334euvv+7yOomIiMi3yFx9+GFSUhLGjRuHv/zlLwAAq9WK6OhoPPHEE8jKympTPzIyEs899xwWLFhgK/vtb38LjUaDd955p0vrJCIiIt/i50plo9GIQ4cOYenSpbYyuVyOlJQUlJSUOHxPc3Mz/P397co0Gg0+//zz61pnc3Oz7bXVasWFCxfQv39/yGQyV3aJiIiIvEQIgUuXLiEyMhJyeceDPi4FlnPnzsFisUCn09mV63Q6nDhxwuF7UlNTkZeXh0mTJuHGG29EUVERtm7dCovF0uV15ubmIicnx5WmExERkUT98MMP+NWvftVhHZcCS1f8+c9/RkZGBoYNGwaZTIYbb7wRc+fOxcaNG7u8zqVLl0Kv19te19XVYcCAAaioqEBAQIA7mt3jmEwmfPrpp7j99tuhVCq93RzJYj85j33lHPaTc9hPzvG1frp06RIGDhzo1Ge3S4ElJCQECoUCNTU1duU1NTUIDw93+J7Q0FB89NFHaGpqwvnz5xEZGYmsrCwMGjSoy+tUq9VQq9Vtyvv164fAwEBXdqnXMJlM0Gq16N+/v08c5F3FfnIe+8o57CfnsJ+c42v91LqPzlzO4dJdQiqVCmPGjEFRUZGtzGq1oqioCMnJyR2+19/fH1FRUTCbzfjHP/6Be++997rXSURERL7B5SEhvV6Phx9+GGPHjkViYiLWrFmDy5cvY+7cuQCA9PR0REVFITc3FwBw8OBB/PTTTxg9ejR++uknPP/887BarVi8eLHT6yQiIiLf5nJgmTlzJs6ePYsVK1aguroao0ePRkFBge2i2crKSrsrfZuamrBs2TKcPn0affr0wd13343/+Z//QXBwsNPrJCIiIt/WpYtuMzMzkZmZ6XBZcXGx3etf//rXOHbs2HWt0x2EEDCbzba7k3obk8kEPz8/NDU1SX4fFQoF/Pz8eAs6ERE5zeN3CUmB0WhEVVUVDAaDt5viMUIIhIeH44cffugRQUCr1SIiIgIqlcrbTSEioh6g1wcWq9WKiooKKBQKREZGQqVS9YgPdFdZrVY0NDSgT58+nU6+401CCBiNRpw9exYVFRUYMmSIpNtLRETS0OsDi9FotE31r9Vqvd0cj7FarTAajfD395d8ANBoNFAqlThz5oytzURERB2R9iebG0n9Q9zX8OdBRESu4KcGERERSR4DCxEREUkeA4uETZ48GU8++aS3m0FEROR1DCw+aM6cOZgxY4a3m0FEROS0Xn+XkDtZrAKlFRdQe6kJYQH+SBzYDwp5z7lF2mKx9MpbuomIqPfjGRYnFRytwm0vfYJZbx7AwvxvMOvNA7jtpU9QcLTKo9s1m83IzMxEUFAQQkJCsHz5cgghAADNzc1YtGgRoqKiEBAQgJSUFLuZht966y0EBwdj+/btiI+Ph1qtxiOPPILNmzdj27ZtkMlkkMlkbWYnJiIikhqeYXFCwdEqzH+nDOKa8uq6Jsx/pwzrHkxA2vAIj2x78+bNmDdvHkpLS/HVV1/h0UcfxYABA5CRkYHMzEwcO3YM+fn5CA8PR35+Pu6++24cOXIEQ4YMAQAYDAa89NJL+Nvf/ob+/fsjIiICjY2NqK+vx6ZNmwAA/fr180jbiYiI3IWBpRMWq0DOx8fahBUAEABkAHI+PoYp8eEeGR6Kjo7Gq6++CplMhri4OBw5cgSvvvoqUlNTsWnTJlRWViIyMhJWqxVPPPEE9u3bh02bNmHVqlUAWp4x9MYbb2DUqFG2dWo0GjQ3NyM8PNzt7SUiIvIEBpZOlFZcQFVdU7vLBYCquiaUVlxA8o393b79W2+91e66k+TkZLzyyis4cuQILBYLhg4dale/ubkZ/ftfaYdKpcLIkSPd3i4iIqLuxMDSidpL7YeVrtRzl4aGBigUChw6dAgKhcLuWUKBgYG2ehqNhhfaEhFRj8fA0omwAOeec+NsPVcdPHjQ7vWBAwcwZMgQ3HLLLbBYLKitrcXEiRNhtVpRX1+PwMDATqe9V6lUsFgsHmkvERGRJ/AuoU4kDuyHiCB/tHeOQgYgIqjlFmdPqKyshF6vR3l5Od599128/vrrWLhwIYYOHYrZs2cjPT0dW7duRUVFBQ4dOoTVq1djx44dHa4zNjYWhw8fRnl5Oc6dOweTyeSRthMREbkLA0snFHIZsqfFA0Cb0NL6OntavMfmY0lPT0djYyMSExOxYMECLFy4EI8++igAYNOmTUhPT8fTTz+Nm266CQ8++CC+/PJLDBgwoMN1ZmRkIC4uDmPHjkVoaCi++OILj7SdiIjIXTgk5IS04RFY92ACcj4+ZncBbniQP7KnxXvsluar50dZt25dm+VKpRI5OTnIyclxOCQ0Z84czJkzp837QkNDsWfPHo+0mYiIyBMYWJyUNjwCU+LDe/RMt0RERD0VA4sLFHKZR25dJiIioo7xGhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAaWXuaOO+7Ak08+6e1mEBERuRUnjnOF1QKc2Q801AB9dEDMeECu8Harus2cOXNw8eJFfPTRR95uChER+RgGFmcd2w4ULAHqf75SFhgJpL0ExE/vliYYjUaoVKpu2dbVLBYLZDI+goCIiLyHQ0LOOLYdeC/dPqwAQH1VS/mx7R7Z7OTJk5GZmYknn3wSISEhSE1Nxb59+5CYmAi1Wo2IiAhkZWXBbDbbvc9sNiMzMxNBQUEICQnB8uXLIYSwLW9ubsaiRYsQFRWFG264AUlJSXYPWnzrrbcQHByM7du3Iz4+Hmq1Go888gg2b96Mbdu2QSaTQSaT2d6zZMkSDB06FFqtFoMGDcLy5cthMpk80idEROSbeIalM1ZLy5kVCAcLBQAZUJAFDLvHI8NDmzdvxvz58/HFF1+guroad999N+bMmYO3334bJ06cQEZGBvz9/bFixQq798ybNw+lpaX46quv8Oijj2LAgAHIyMgAAGRmZuLYsWPIz89HZGQkPvzwQ6SlpeHIkSMYMmQIAMBgMOCll17C3/72N/Tv3x8RERFobGxEfX09Nm3aBADo168fACAgIABvvfUWIiMjceTIEWRkZCAgIACLFy92e38QEZFvYmDpzJn9bc+s2BFA/U8t9QZOdPvmhwwZgj/96U8AgLfffhvR0dH4y1/+AplMhmHDhuHnn3/GkiVLsGzZMtt7oqOj8eqrr0ImkyEuLg5HjhzBq6++ioyMDFRWVmLTpk2orKxEZGQkAGDRokUoKCjApk2bsGrVKgCAyWTCG2+8gVGjRtnWq9Fo0NzcjPDwcLs2Xr3t2NhYLFq0CPn5+QwsRETkNgwsnWmocW89F40ZM8b2/fHjx5GcnGx3PcmECRPQ0NCAH3/8EcHBwQCAW2+91a5OcnIyXnnlFVgsFhw5cgQWiwVDhw61205zczP697/yJGqVSoWRI0c61cYtW7bgtddew6lTp9DQ0ACz2YzAwMCu7C4REZFDDCyd6aNzbz0X3XDDDW5dX0NDAxQKBQ4dOgSFwn4Iq0+fPrbvNRqNUxfalpSUYPbs2cjJyUFqaiqCgoKQn5+PV155xa3tJiIi38bA0pmY8S13A9VXwfF1LLKW5THjPd6Um266Cf/4xz8ghLCFiS+++AIBAQH41a9+hYaGBgDAwYMH7d534MABDBkyBAqFArfccgssFgtqa2sxcaJrQ1gqlQoWi8WubP/+/YiJicFzzz1nKztz5kxXdo+IiKhdvEuoM3JFy63LAIBrzzj88jptdbfMx/L444/jhx9+wBNPPIETJ05g27ZtyM7Ohl6vh1x+5UdZWVkJvV6P8vJyvPvuu3j99dexcOFCAMDQoUMxe/ZspKenY+vWraioqEBpaSlyc3OxY8eODrcfGxuLw4cPo7y8HOfOnYPJZMKQIUNQWVmJ/Px8nDp1Cq+99ho+/PBDj/YDERH5HgYWZ8RPB37/NhAYYV8eGNlS3k3zsERFRWHnzp0oLS3FqFGj8Nhjj2HevHl2F70CQHp6OhobG5GYmIgFCxZg4cKFePTRR23LN23ahPT0dDz99NOIi4vDjBkz8OWXX2LAgAEdbj8jIwNxcXEYO3YsQkND8cUXX2D69Ol46qmnkJmZidGjR2P//v1Yvny5R/afiIh8l0xcPUFHD1VfX4+goCDU1dW1udizqakJFRUVGDhwIPz9/a9vQxKe6dZqtaK+vh6BgYF2Z1ukyq0/FxeYTCbs3LkTd999N5RKZbdttydiXzmH/eQc9pNzfK2fOvr8vhavYXGFXOGRW5eJiIioY9L/U5yIiIh8HgMLERERSR4DCxEREUkeAwsRERFJHgMLERERSR4DCxEREUkeAwsRERFJHgMLERERSR4njiMiIqJ2WawWlNWW4azhLEK1oUgIS4DCC7O8M7C4QCo/NCIiou5QeKYQq0tXo8ZQYyvTaXXISsxCSkxKt7aFgcVJUvqhEREReVrhmULoi/UQsH/kYK2hFvpiPfIm53Xr51+XrmFZu3YtYmNj4e/vj6SkJJSWlnZYf82aNYiLi4NGo0F0dDSeeuopNDU12ZY///zzkMlkdl/Dhg3rStM8ovWHdnVYAa780ArPFHpku5MnT8YTTzyBJ598En379oVOp8Obb76Jy5cvY+7cuQgICMDgwYOxa9cuAMDFixfx4IMPIjQ0FBqNBkOGDMGmTZs80jYiIuq9LFYLVpeubhNWANjKXip9CRarpdva5HJg2bJlC/R6PbKzs1FWVoZRo0YhNTUVtbW1Duv//e9/R1ZWFrKzs3H8+HFs2LABW7ZswbPPPmtX7+abb0ZVVZXt6/PPP+/aHrmZt39omzdvRkhICEpLS/HEE09g/vz5+N3vfofx48ejrKwMU6dOxUMPPQSDwYAXX3wRx48fx65du3D8+HGsW7cOISEhHmkXERH1XmW1ZW3+SL+agEC1oRpltWXd1iaXh4Ty8vKQkZGBuXPnAgDWr1+PHTt2YOPGjcjKympTf//+/ZgwYQIeeOABAEBsbCxmzZqFgwcP2jfEzw/h4eFOtaG5uRnNzc221/X19QBaHsttMpns6ppMJgghYLVaYbVand/RX3xV/ZVTP7Svqr/CuPBxLq+/M6NGjbKFuyVLlmD16tXo378/5s2bBwBYtmwZ1q1bh3//+9/48ccfMXr0aCQkJAAABgwYAABd2m9Ps1qtEELAZDJBoei+64Baj49rjxNqi33lHPaTc9hPzpFKP9VeqoUaagAtn3NGGAEAKqggg8yunql/19vqyn66FFiMRiMOHTqEpUuX2srkcjlSUlJQUlLi8D3jx4/HO++8g9LSUiQmJuL06dPYuXMnHnroIbt63333HSIjI+Hv74/k5GTk5ubaPnCvlZubi5ycnDble/bsgVartd/BX4JQQ0MDjEajK7sLAPjhwg9O14vTxrm8/o6YzWYMGzbMFsgAoG/fvhgyZIitTKPRAADOnDmDRx55BA8//DC++uor3H777bjnnnuQlJTk1ja5i9FoRGNjIz777DOYzeZu3/7evXu7fZs9FfvKOewn57CfnCOFfloevBwAYBRGrKxbCQDICsqCSqay1TEfM2PnsZ1d3obBYHC6rkuB5dy5c7BYLNDpdHblOp0OJ06ccPieBx54AOfOncNtt90GIQTMZjMee+wxuyGhpKQkvPXWW4iLi0NVVRVycnIwceJEHD16FAEBAW3WuXTpUuj1etvr+vp6REdHY+rUqQgMDLSr29TUhB9++AF9+vSBv7+/K7sLAIjuF+10vWu3fb38/Pxwww032K1XoVAgICCgzbb8/f1xxx134PTp09i1axcKCwsxY8YMPP7443j55Zfd2i53aGpqgkajwaRJk7r0c+kqk8mEvXv3YsqUKVAqld223Z7I031lsQocOvN/ONfQjJA+aoyJ6QuFXNb5GyWGx5Rz2E/OkUo/WawW3Lf9PpwznIMVV87Sr65bDdkv/4VqQ7F1+tbrulv26j/IO+Pxu4SKi4uxatUqvPHGG0hKSsLJkyexcOFCvPDCC1i+vCW93XXXXbb6I0eORFJSEmJiYvDee+/Zhj6uplaroVar25Qrlco2P2CLxQKZTAa5XA653PVrjMeGj4VOq0OtodbhdSwyyKDT6jA2fGyX1t+Z1rZ3VtYqLCwMc+fOxdy5c/HXv/4VzzzzDF555RW3t+t6yeVyyGQyhz+z7uCt7fZEnuirgqNVyPn4GKrqrlx8HxHkj+xp8UgbHuHWbXUXHlPOYT85x9v9pIQSTyc+3eYuISOMtiEhfaIe/urr+4PTlX106RM2JCQECoUCNTX213TU1NS0e/3J8uXL8dBDD+EPf/gDRowYgfvuuw+rVq1Cbm5uu9dWBAcHY+jQoTh58qQrzfMIhVyBrMSWa3OuHre7+vWSxCWSmI9l1apV2LZtG06ePIn//Oc/+Oc//4mbbrrJ280islNwtArz3ymzCysAUF3XhPnvlKHgaJWXWkZEV0uJSUHe5DyEacLsynVaXbff0gy4GFhUKhXGjBmDoqIiW5nVakVRURGSk5MdvsdgMLQ5G9B6kaUQbc9YAEBDQwNOnTqFiAhp/KVl+6FppfFDa49KpcJzzz2HkSNHYtKkSVAoFMjPz/d2s4hsLFaBnI+POThXCVtZzsfHYLE6/reBiLpXSkwKtt27zfZ63Z3rUPDbAq987rk8JKTX6/Hwww9j7NixSExMxJo1a2zzggBAeno6oqKikJubCwCYNm0a8vLycMstt9iGhJYvX45p06bZgsuiRYswbdo0xMTE4Oeff0Z2djYUCgVmzZrlxl29PikxKbg9+vZunem2uLi4Tdn333/fpqz1Lqg77rgDK1eu9MjQFJE7lFZcaHNm5WoCQFVdE0orLiD5xv7d1zAiatfVnykJOu/N8O5yYJk5cybOnj2LFStWoLq6GqNHj0ZBQYHtQtzKykq7nVu2bBlkMhmWLVuGn376CaGhoZg2bRpefPFFW50ff/wRs2bNwvnz5xEaGorbbrsNBw4cQGhoqBt20X0UcoVHbl0m8hW1l9oPK12pR0S+o0sX3WZmZiIzM9PhsmvPCvj5+SE7OxvZ2dntro/DFkS+ISzAuQv0nK1H0nb189f6q3jGjK4PnyVERN0mcWA/RAT5o7quyeF1LDIA4UH+SBzYr7ubRm527fPX1FBjefByFP9QjCmDpni3cdQj8WIHIuo2CrkM2dPiHS5rvQcve1p8j5yPha5o7/lrAPDsv5712PPXqHfzmTMs7d2RRN7Bn4fvShsegXUPJiB7+39QU3/lERvhPXweFmrR3vPXBASMwggBgdyDubg1/FbeIOCA2WyGURjRaG6ECdJ4jEGjudHbTQDgA4GldVIag8Fgm8aevK91OmZOIOWb0oZHYMLgEIx4fg8AYNOccZg0NJRnVnqB9h6aZ8SV6d1rG2uRnO94KgxqsfK9ld5uguT0+sCiUCgQHBxse5q0VquFTNb7/lG0Wq0wGo1oamqS9F8tQggYDAbU1tYiODi4Wx98SNJydThJGtSPYaWXOGs46+0mkIfcEnYLNH7e+8O/1wcWALZZeFtDS28khEBjYyM0Gk2PCGTBwcFOP52biHqOUK3j6ShUUCErKAur61bDCCPW3bkOCbqEbm6d9JnNZuzevRupqanw85PWR7TGz7ufL9LqDQ+RyWSIiIhAWFiY1x/Z7SkmkwmfffYZJk2aJPlhFqVSyTMrRL1UQlhCu89fU8lUkEOOcG04kiOTJfFIE6kxwQSVTAWNn0by/5Z3N58ILK0UCkWv/aBUKBQwm83w9/fnQU5EXtP6/DV9sb7dOlJ5/hr1LNK92IGIiHqk9h6aBwCrJq6SzPPXqGdhYCEiIre79qF5r05+FQAwOXqyl1pEPR0DCxERecTVdyyODhvtvYZQr8DAQkRERJLHwEJERESSx8BCREREksfAQkRERJLHwEJERESSx8BCREREksfAQkRERJLHwEJERESSx8BCRN5htVz5/vv99q+JiK7BwEJE3e/YdmBt4pXX//v/gDXDW8qJiBxgYCGi7nVsO/BeOlBfZV9eX9VSztBCRA4wsBBR97FagIIlAAQAAQ2aoEGT7TUAoCCLw0NE1IaftxtARD7kzH6g/mfby+P+jwAADEL1S4kA6n9qqTdwohcaSERSxTMsRNR9GmrcW4+IfAYDCxF1nz4699YjIp/BISEismOxWlBWW4baS7W210oo3bPymPFAYGTbC25tZC3LY8a7Z3tE1GvwDAuRBFmsAiWnzmPbNz+h5NR5WKyiW7ZbeKYQqf9IxSO7H0H2/mwAwH3b70PhmUL3bECuANJe+uWF7JqFv7xOW91Sj4joKjzDQiQxBUerkPPxMVTVNdnKIoL8kT0tHmnDIzy23cIzhdAX6yFgH47OGc5BX6xH3uQ8pMSkXP+G4qcDv38b2Pkc0FB5pTwwsiWsxE+//m0QUa/DwEIkIQVHqzD/nTJcez6luq4J898pw7oHEzwSWixWC1aXrrYLKwICRmGEFVYICOQezMWt4bdCLnfDidkhKWh8ZAzw+s0AgMbfbgSG3g7I5YDJcP3r70ZmsxlGYUSjuREmmLzdHElpNDd6uwnUizCwEEmExSqQ8/GxNmEFsM1Qguzt/8GEwSFQyK8dTrk+h2rKUN1wAYCyZWsyC4wyI1bWrbTVqW2sRXJ+slu3i9jolv+XPQuUuXfV3W3leys7r0REXcbAQiQRpRUX7IaBHKmpb8aI5/d4qAUvtPxPZkTAsBUe2gb5olvCboG/wt/bzaAejoGFSCJqL3UcVryh4dtlEFZV5xW7QIMmHPKfDwAQi76DTHWDR7bjaWazGbt370Zqair8/PhPqiMaPw3MZrO3m0E9HH+7iCQiLMC5v0A3zRmHpEH93Lpti9WCez+6F2cbz0LAait/IUGOV+v/BCOMCNOE4aMZH0Hhrjt4jJeh/f9+GexSalu+eiATTFDJVND4aaBUuun2b+oxWqcBOGs4i1BtKBLCEtz3O0J2GFiIJCJxYD9EBPmjuq7J4XUsMgDhQf6YNDTU7dewAH54NnkR9MV64KqtqxWAXG6GDCYsTX4aAf5qt26TqCcrPFOI1aWrUWO4MjOzTqtDVmKWe+6oIzuch4VIIhRyGbKnxTtc1hpPsqfFeyCstEiJSUHe5DyEacLsykO1oe67pZmol2idBuDqsAIAtYZa6Iv17pu7iGwYWIgkJG14BNY9mABdoP2ZjPAgf4/d0ny1lJgUbLt3m13Z1ulbGVaIruJoGoBWrWUvlb4EC5867lY8J0skMWnDIzBhcIjtbqBNc8Z5aBjIsWvnWeF4PJG9stqyNmdWriYgUG2oRlltGcaFj+vGlvVuPMNCJEFXh5OkQf26LawQUefOGs66tR45h4GFiIjIBaHaULfWI+cwsBBJjdUCfL/f/jURSUZCWAJ0Wh1kbR7g2UIGGcK14UgIS+jmlvVuDCxEUnJsO7BmOPC//+9K2drElnIikgSFXIGsxCwAaBNaWl8vSVzC67/cjIGFSCqObQfeSwfqf7Yvr69uKWdoIZIM2zQAWvtpAHRaHacB8BDeJUQkBVYLULAEVyZtE9Cgdap+a0v5rsXAoMmAp/9qu+oJuwpLM2C8DAgPzOBq7FlPZSa6VkpMCm6Pvp0z3XYTBhYiKTiz3+7MihomHPd/xL7OpSpgdbTn2yKT2Z6inHY0E8ojCzy/TaIeSiFX8NblbsIhISIpaLCf00EhczQ5fy8VfWuPfY4QEXUfnmEhkoI+OofFY5rW4V/qhdDKjC0FD3wAxI73bFvMjcD7twMACob/BdPSfuPZh/optS1ndYiIOsDAQiQFMeOBwEigvgpXP3zQADVaniQka1k++A7PX8NyVXiwKNSA6gaATyEmIi/jkBCRFMgVQNpL7Sz8JUCkrfZ8WCEikigGFiKpiJ8O/P5tICDcvjwwvKU8frp32kVEJAFdCixr165FbGws/P39kZSUhNLS0g7rr1mzBnFxcdBoNIiOjsZTTz2FpqYmuzqurpOoV4qfDiz40r5sQSnDSi9ksVrwZfWX2Hl6J76s/pJP9iXqhMvXsGzZsgV6vR7r169HUlIS1qxZg9TUVJSXlyMsLKxN/b///e/IysrCxo0bMX78eHz77beYM2cOZDIZ8vLyurROol7t2mEfDgP1OoVnCrG6dLXdE391Wh2yErM44RhRO1w+w5KXl4eMjAzMnTsX8fHxWL9+PbRaLTZu3Oiw/v79+zFhwgQ88MADiI2NxdSpUzFr1iy7MyiurpOIqKcqPFMIfbHeLqwAQK2hFvpiPQrPFHqpZUTS5tIZFqPRiEOHDmHp0qW2MrlcjpSUFJSUlDh8z/jx4/HOO++gtLQUiYmJOH36NHbu3ImHHnqoy+tsbm5Gc3Oz7XV9fT0AwGQywWQyubJLvUbrfvvq/jurR/STyQSl3UsTTN04L4vZbL6mORLuKwlw5ZiyWC14pfQVqKByuFwGGfJK83Bb+G29brbUHvG7JwG+1k+u7KdLgeXcuXOwWCzQ6eznjNDpdDhx4oTD9zzwwAM4d+4cbrvtNgghYDab8dhjj+HZZ5/t8jpzc3ORk5PTpnzPnj3Qan17Aqq9e/d6uwk9gpT7SWFpxm+uer179x6ou/GzyyiMdq+l3FdS4mw/PaZ6DO3kFZvdBbvd0CJp4vHkHF/pJ4PB+Ud0eHweluLiYqxatQpvvPEGkpKScPLkSSxcuBAvvPACli9f3qV1Ll26FHq93va6vr4e0dHRmDp1KgIDA93V9B7FZDJh7969mDJlimcn+erhekQ/GS8Dh6+8TE2dCq2q+6ZMajQ3YuV7K22vJd1XEuDKMbXn+z3I3p/d6TpzxudgauxUdzVREnrE754E+Fo/tY6QOMOlfwVDQkKgUChQU2M/9lpTU4Pw8HCH71m+fDkeeugh/OEPfwAAjBgxApcvX8ajjz6K5557rkvrVKvVUKvVbcqVSqVP/IA7wj5wjqT76ZoHDba0tfsCiwn2p2gl3VcS4kw/hQWEoRnNHdZprddb+5zHk3N8pZ9c2UeXLrpVqVQYM2YMioqKbGVWqxVFRUVITk52+B6DwQC53H4zCkXL+W0hRJfWSUTUEyWEJUCn1UEGx48ikEGGcG04EsISurllRNLn8l1Cer0eb775JjZv3ozjx49j/vz5uHz5MubOnQsASE9Pt7uAdtq0aVi3bh3y8/NRUVGBvXv3Yvny5Zg2bZotuHS2TiKi3kAhVyArMQsA2oSW1tdLEpf0ugtuidzB5fPMM2fOxNmzZ7FixQpUV1dj9OjRKCgosF00W1lZaXdGZdmyZZDJZFi2bBl++uknhIaGYtq0aXjxxRedXicRUW+REpOCvMl5DudhWZK4hPOwELWjSwPjmZmZyMzMdLisuLjYfgN+fsjOzkZ2dscXmnW0TiKi3iQlJgW3R9+OstoynDWcRag2FAlhCTyzQtQBPq2ZiMgLFHIFxoWP83YziHoMPvyQiIiIJI+BhYiIiCSPgYWIiIgkj4GFiIiIJI8X3RKRpFisFt49Q0RtMLAQkWQUnil0OD9JVmIW5ych8nEcEiIiSSg8Uwh9sd4urABAraEW+mI9Cs8UeqllRCQFDCxETrJYLfiy+kvsPL0TX1Z/CYvV4u0m9RoWqwWrS1dDQLRZ1lr2UulL7HMiH8YhISIncKjCs8pqy9qcWbmagEC1oRpltWWcbI3IR/EMC1EnOFTheWcNZ91aj4h6H55hIeqAM0MVuQdzcWv4rXYP/bwu5kZA9suTfGVGNJobAVn33SXTaG7stm21CtWGurUeEfU+DCxEHehsqAIAahtrkZyf7N4Nx0YDAPywCpPfX+XedUtQQlgCdFodag21DsOhDDLotDokhCV4oXVEJAUcEiLqgC8PQYwOHQ0llN2yLYVcgazELAAt4eRqra+XJC7hfCxEPoxnWIg64OwQxLo71yFB56a//k0G4OXBAIAxTevw+fLfQKPs/g9qP+GHXbt2ddv2UmJSkDc5z+HFzUsSl/DiZiIfx8BC1AFnhyqSI5Pd99e/EC1fACBU0PhpoFV2/6+qyWTq9m2mxKTg9ujbOdMtEbXBwELUgdahCn2xvs0yDlV4hkKu4K3LRNQGr2Eh6kTrUEWYJsyuXKfVIW9yHocqiIi6Ac+wEDkhJSYFt4bfarsbaN2d69w7DERERB3iGRYiJ109z0qCjtdVEBF1JwYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjw/bzeAyGlWC3BmP9BQA/TRATHjAbnC260iIqJuwMBCPcOx7UDBEqD+5ytlgZFA2ktA/HTvtYuIiLoFh4RI+o5tB95Ltw8rAFBf1VJ+bLt32kVERN2GZ1hI2qyWljMrEA4W/lK2azEwaLJzw0MmExSWZsB4GRBK19pibrxqPQZAOGqTGxgNnlkvEVEPxsBC0nZmf9szK9e6VAWsjnZqdUoAvwGAw11oi0wGxP6ynZcHey6wEBFRGxwSImlrqPF2C7zmS+tQNELt7WYQEUkCz7CQtPXROVfvgQ+A2PGdVjOZTNi9ew9SU6dCqezCkND7t7d8/8xJwE/j2vtdYDCa8bs//guAzGPbICLqSRhYSNpixrfcDVRfBcfXschalg++w7lrWGQmWBRqQHUD4GpgkV0VHpTali+PMYNhhYjoCg4JkbTJFS23Ljv0ywd62mrOx0JE1MsxsJD0xU8Hfv82EBBuXx4Y2VLOeViIiHo9DglRzxA/veXW5da7gR74wPlhICIi6vF4hoV6jqvDSSyn5Sci8iVdCixr165FbGws/P39kZSUhNLS0nbrTp48GTKZrM3XPffcY6szZ86cNsvT0tK60jQiIiLqhVweEtqyZQv0ej3Wr1+PpKQkrFmzBqmpqSgvL0dYWFib+lu3boXRaLS9Pn/+PEaNGoXf/e53dvXS0tKwadMm22u1mvNPEBERUQuXA0teXh4yMjIwd+5cAMD69euxY8cObNy4EVlZWW3q9+vXz+51fn4+tFptm8CiVqsRHn7NRZXtaG5uRnNzs+11fX09gJY5Nkwmk0v701u07nev3n+TCUrbtyZA5vq+Xk8/mc1mu+9N8Fxfm0zmq743wSTr/ll1feKYcgP2k3PYT87xtX5yZT9dCixGoxGHDh3C0qVLbWVyuRwpKSkoKSlxah0bNmzA/fffjxtuuMGuvLi4GGFhYejbty/uuOMO/PGPf0T//v0driM3Nxc5OTltyvfs2QOt1pNzY0jf3r17vd0Ej1FYmlum1Qewe/eelvlUuqgr/WQUV84U7t69GyqZqsvb70yzBWj99dy9ew/UXrxcpzcfU+7EfnIO+8k5vtJPBoPzz06TCeH8A1F+/vlnREVFYf/+/UhOTraVL168GPv27cPBgwc7fH9paSmSkpJw8OBBJCYm2spbz7oMHDgQp06dwrPPPos+ffqgpKQECkXbf6kdnWGJjo7GuXPnEBgY6Ozu9Comkwl79+7FlClTXJ/BtacwXoby5RgAgOmZMy2Tv7noevqp0dyICe9NAAB88fsvoPHwTLejXvgEAPDv5XdAq+r+G/p84phyA/aTc9hPzvG1fqqvr0dISAjq6uo6/fzu1n8FN2zYgBEjRtiFFQC4//77bd+PGDECI0eOxI033oji4mLceeedbdajVqsdXuOiVCp94gfckV7dB1c9XVmpVLo+U+1VutJPVw8B+fn5ebSfleLKLLctbfXeDAS9+phyI/aTc9hPzvGVfnJlH126SygkJAQKhQI1NfYPpKupqen0+pPLly8jPz8f8+bN63Q7gwYNQkhICE6ePOlK84iIiKiXcimwqFQqjBkzBkVFRbYyq9WKoqIiuyEiR95//300NzfjwQcf7HQ7P/74I86fP4+IiAhXmkdERES9lMvzsOj1erz55pvYvHkzjh8/jvnz5+Py5cu2u4bS09PtLspttWHDBsyYMaPNhbQNDQ145plncODAAXz//fcoKirCvffei8GDByM1NbWLu0VERES9icsD4zNnzsTZs2exYsUKVFdXY/To0SgoKIBOpwMAVFZWQi63z0Hl5eX4/PPPsWfPnjbrUygUOHz4MDZv3oyLFy8iMjISU6dOxQsvvMC5WIiIiAhAFy+6zczMRGZmpsNlxcXFbcri4uLQ3s1IGo0Gu3fv7koziIiIyEfwWUJEREQkeQwsREREJHkMLERERCR5DCxEREQkeQwsREREJHnem++beh6rBTizH2ioAfrogJjxgNyLT+UjIiKfwcBCzjm2HShYAtT/fKUsMBJIewmIn+69dhERkU/gkBB1yvKfj/Dl9gzstFzEl/5qWFoX1FcB76W3hBkiIiIP4hkW6lDh93uw+uAy1ESEtRQIgRiTCfoLF3FHY1NL2a7FwKDJnh8eMho8u34iIpIsBhZqV+GZQuj3PQ3Reh5OCLxdVYNbmo32FS9VAauju719RETkOxhYyCGL1YLVpashAEAmAwD4C4E4owmGX157za/Gtfzf5PoZF7PZDKMwotHcCBNMLr230dzo8vaIiMg9GFjIobLaMtQYauzKmuRyJMVK4UxKNfDurde1hpXvrXRTW4iIqDvwolty6KzhrLebIFm3hN0CjZ/G280gIvIpPMNCDoVqQx2WF5/5ERoh8OsBUWiSy7GuuhYJd68Fht3dzS3sGrPZjN27dyM1NRV+fl07/DV+Gsi8PSxGRORjGFjIoYSwBOi0OtQaaiFarmQBAGiEgFYIyIVAuEUg+Td/heLmGd5rqItMMEElU0Hjp4FSqfR2c4iIyEkcEiKHFHIFshKz2q8gk2HJHXk9KqwQEVHPxcBC7UqJSUHe5DyEacLaLMud+BJSYqd6oVVEROSLGFioQykxKdh277Y25XcMuN0LrSEiIl/FwEKdkst5mBARkXfxk4iIiIgkj4GFiIiIJI+BhYiIiCSPgYWIiIgkj4GFiIiIJI+BhYiIiCSPgYWIiIgkj4GFiIiIJI+BhYiIiCSPgYWIiIgkj4GFiIiIJI+BhYiIiCSPgYWIiIgkz8/bDSDqCotVoLTiAmovNSEswB+JA/tBIZd5u1lEROQhDCzU4xQcrULOx8dQVddkK4sI8kf2tHikDY/wYsuIiMhTOCREPUrB0SrMf6fMLqwAQHVdE+a/U4aCo1VeahkREXkSz7BQj2GxCuR8fAzCwbLWsuzt/8GEwSHtDg+ZTGY0WwCD0QylkO4QksFo8XYTiIgkhYGFeozSigttzqxcq6a+GSOe39PJmvywuPQT9zWMiIg8jkNC1GPUXuo4rPRGY2P6QqNUeLsZRERexzMs1GOEBfg7VW/TnHFIGtTP4TKTyYTdu/cgNXUqlEqlO5vnERqlAjKZdIeuiIi6CwML9RiJA/shIsgf1XVNDq9jkQEID/LHpKGh7V/DIhNQKwCtyg9KJQ9/IqKegkNC1GMo5DJkT4t3uKw1nmRPi+d8LEREvRADC/UoacMjsO7BBOgC1Xbl4UH+WPdgAudhISLqpXhOnHqctOERmDA4xHY30KY54zocBiIiop6PZ1ioR7o6nCQN4rT8RES9HQMLERERSR4DCxEREUkeAwsRERFJHgMLERERSR4DCxEREUkeAwsRERFJXpcCy9q1axEbGwt/f38kJSWhtLS03bqTJ0+GTCZr83XPPffY6gghsGLFCkRERECj0SAlJQXfffddV5pGREREvZDLgWXLli3Q6/XIzs5GWVkZRo0ahdTUVNTW1jqsv3XrVlRVVdm+jh49CoVCgd/97ne2On/605/w2muvYf369Th48CBuuOEGpKamoqnJ957OS0RERG25HFjy8vKQkZGBuXPnIj4+HuvXr4dWq8XGjRsd1u/Xrx/Cw8NtX3v37oVWq7UFFiEE1qxZg2XLluHee+/FyJEj8fbbb+Pnn3/GRx99dF07R0RERL2DS1PzG41GHDp0CEuXLrWVyeVypKSkoKSkxKl1bNiwAffffz9uuOEGAEBFRQWqq6uRkpJiqxMUFISkpCSUlJTg/vvvb7OO5uZmNDc3217X19cDAEwmE0wmkyu71Gu07rcn9t9sNjvensx7fW0yma/63gSTzNHzmx29z3P91Nuwr5zDfnIO+8k5vtZPruynS4Hl3LlzsFgs0Ol0duU6nQ4nTpzo9P2lpaU4evQoNmzYYCurrq62rePadbYuu1Zubi5ycnLalO/ZswdarbbTdvRme/fudfs6jcLYpmz37j2wKNQOanePZgvQevju3r0HaoVr7/dEP/VW7CvnsJ+cw35yjq/0k8FgcLputz78cMOGDRgxYgQSExOvaz1Lly6FXq+3va6vr0d0dDSmTp2KwMDA621mj2QymbB3715MmTIFSqXSretuNDdi5Xsr7cpSU6cCqhvcuh1XGIxmLC79xNYWrcq5Q9mT/dTbsK+cw35yDvvJOb7WT60jJM5wKbCEhIRAoVCgpqbGrrympgbh4eEdvvfy5cvIz8/HypX2H3yt76upqUFERITdOkePHu1wXWq1Gmp127/ulUqlT/yAO+KJPjCh7Sk7pVIJeLGvleLKww5b9tm17M1jxXnsK+ewn5zDfnKOr/STK/vo0kW3KpUKY8aMQVFRka3MarWiqKgIycnJHb73/fffR3NzMx588EG78oEDByI8PNxunfX19Th48GCn6yQiIiLf4PKQkF6vx8MPP4yxY8ciMTERa9asweXLlzF37lwAQHp6OqKiopCbm2v3vg0bNmDGjBno37+/XblMJsOTTz6JP/7xjxgyZAgGDhyI5cuXIzIyEjNmzOj6nhEREVGv4XJgmTlzJs6ePYsVK1aguroao0ePRkFBge2i2crKSsjl9iduysvL8fnnn2PPnj0O17l48WJcvnwZjz76KC5evIjbbrsNBQUF8Pf378IuERERUW/TpYtuMzMzkZmZ6XBZcXFxm7K4uDgI0f5tpzKZDCtXrmxzfQsRERERwGcJERERUQ/AwEJERESSx8BCREREksfAQkRERJLHwEJERESSx8BCREREksfAQkRERJLHwEJERESS161Pa6be4+DpC6huuoiwAH8kDuwHhVzW+ZuIiIi6iIGFumTOW6VoRMujEyKC/JE9LR5pwyM6eRcREVHXcEiIrlt1XRPmv1OGgqNV3m4KERH1UjzDQtet9SlR2dv/gwmDQ7pleMhgtHh8G0REJB0MLOQ2NfXNGPG84ydyExERXQ8OCVGPNjamLzRKhbebQUREHsYzLORWm+aMQ9Kgft22PY1SAZmMdygREfV2DCzkFjIA4UH+mDQ0lLc4ExGR23FIiK5bazzJnhbPsEJERB7BMyx03cI5DwsREXkYAwtdl01zxnEYiIiIPI5DQnRdkgZxWn4iIvI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8BhYiIiKSPAYWIiIikjwGFiIiIpI8P283oEewWoAz+4GGGqCPDogZD8gV3m4VERGRz2Bg6cyx7UDBEqD+5ytlgZFA2ktA/HTvtYuIiMiHcEioI8e2A++l24cVAKivaik/tt077SIiIvIxPMPSHqul5cwKhIOFv5TtWgwMmiyN4SGTCQpLM2C8DAile9dtbnTv+oiIiFzUpcCydu1avPzyy6iursaoUaPw+uuvIzExsd36Fy9exHPPPYetW7fiwoULiImJwZo1a3D33XcDAJ5//nnk5OTYvScuLg4nTpzoSvPc48z+tmdWrnWpClgd3T3t6YQSwG8A4LAHVi6TAbHS2E8iIvJNLgeWLVu2QK/XY/369UhKSsKaNWuQmpqK8vJyhIWFtalvNBoxZcoUhIWF4YMPPkBUVBTOnDmD4OBgu3o333wzCgsLrzTMz8snfxpqvLt9CbP8KgmNJ9XebgYREfkQl1NBXl4eMjIyMHfuXADA+vXrsWPHDmzcuBFZWVlt6m/cuBEXLlzA/v37oVS2DFXExsa2bYifH8LDw11tjuf00TlX74EPgNjxnm2LE0wmE3bv3oPU1Km2fnYbcyPw/u0t3z9zEs2KICB7j3u3QURE1AGXAovRaMShQ4ewdOlSW5lcLkdKSgpKSkocvmf79u1ITk7GggULsG3bNoSGhuKBBx7AkiVLoFBcufbju+++Q2RkJPz9/ZGcnIzc3FwMGDDA4Tqbm5vR3Nxse11fXw+g5UPbZDK5skvtixwHBA0ELlXD8XUsMiAgAoiZCMi8fw2LSSaDRaGGSaYCZO4NLGaZ5arvVRBm85XtmkwwyRz1jzS1Hh9uO056MfaVc9hPzmE/OcfX+smV/XQpsJw7dw4WiwU6nf3ZB51O1+71JqdPn8Ynn3yC2bNnY+fOnTh58iQef/xxmEwmZGdnAwCSkpLw1ltvIS4uDlVVVcjJycHEiRNx9OhRBAQEtFlnbm5um2teAGDPnj3QarWu7FLHBrXdRhsFu923PTfYu3ev29dpFEbb97t374awqtB66OzevQdq7+c1l3min3or9pVz2E/OYT85x1f6yWAwOF1XJoRw+s/jn3/+GVFRUdi/fz+Sk5Nt5YsXL8a+fftw8ODBNu8ZOnQompqaUFFRYTujkpeXh5dffhlVVVUOt3Px4kXExMQgLy8P8+bNa7Pc0RmW6OhonDt3DoGBgc7ujnNO7AQKs1susG0VEAmkPA8Mu9u927oOJpMJe/fuxZQpU9w+JNRobsSE9yYAAL74/RcQViVGvfAJAODfy++AVtVzbjbzZD/1Nuwr57CfnMN+co6v9VN9fT1CQkJQV1fX6ee3S580ISEhUCgUqKmxvyC1pqam3etPIiIioFQq7YZ/brrpJlRXV8NoNEKlUrV5T3BwMIYOHYqTJ086XKdarYZa3faiT6VS6f4f8Ih7gZt/02NmuvVEH5hw5ZSdn5+f3W3TLdvrOYGllUeOlV6KfeUc9pNz2E/O8ZV+cmUfXZo4TqVSYcyYMSgqKrKVWa1WFBUV2Z1xudqECRNw8uRJWK1WW9m3336LiIgIh2EFABoaGnDq1ClERES40jzPkSuAgROBEf+v5f8SDStERES9lcsz3er1erz55pvYvHkzjh8/jvnz5+Py5cu2u4bS09PtLsqdP38+Lly4gIULF+Lbb7/Fjh07sGrVKixYsMBWZ9GiRdi3bx++//577N+/H/fddx8UCgVmzZrlhl0kIiKins7lc/kzZ87E2bNnsWLFClRXV2P06NEoKCiwXYhbWVkJufxKDoqOjsbu3bvx1FNPYeTIkYiKisLChQuxZMkSW50ff/wRs2bNwvnz5xEaGorbbrsNBw4cQGhoqBt2kYiIiHq6Ll18kJmZiczMTIfLiouL25QlJyfjwIED7a4vPz+/K80gIiIiH8GHHxIREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5DGwEBERkeQxsBAREZHkMbAQERGR5Pl5uwFSZrFaUFZbhrOGswjVhmJUyC04dKYOtZeaEBbgj8SB/aCQy7zdTCIiol6PgaUdhWcKsbp0NWoMNbYymSUYhqrfwHxpOAAgIsgf2dPikTY8wlvNJCIi8gkcEnKg8Ewh9MV6u7ACAFb5RfhHvQO/gKMAgOq6Jsx/pwwFR6u80UwiIiKfwTMs17BYLVhduhoCwuFyYfWDKnQXzA2DIX7Jeys+LkNC7ESvDg+ZTCZcMhtx3nAJSqXSretuNDde+d5kgbBa3Lp+IiKizjCwXKOstqzNmZVWMhkgU5gBxXkEDHveVt4I4M5/dE/7OvPSRys9uv4xLxQCQuXRbRAREV2LQ0LXOGs46+0mSJbZEAOIK2dvxsb0hUap8GKLiIjIV/AMyzVCtaHtLhMCgGjpMsMPD8HaONC2bP2DCRg3sJ+nm9cuk8mEwsIipKTc6fYhoVb+Cn/IZFeGvTRKhd1rIiIiT2FguUZCWAJ0Wh1qDbVtrmORyQABM4Q5CFbDEAByyACEB/ljyk0DvH4NS4CfCv21AR4LLERERN7CIaFrKOQKZCVmAQBksA8g4pf80lwzDa1hBQCyp8VzPhYiIiIPYmBxICUmBXmT8xCmDbMrl1uC0fTTg7Z5WMKD/LHuwQTOw0JERORhHBJqR0pMCm6Pvp0z3RIREUkAA0sHFHIFxoWPsytLvrG/l1pDRETkuzgkRERERJLHwEJERESSx8BCREREksfAQkRERJLXpcCydu1axMbGwt/fH0lJSSgtLe2w/sWLF7FgwQJERERArVZj6NCh2Llz53Wtk4iIiHyHy4Fly5Yt0Ov1yM7ORllZGUaNGoXU1FTU1tY6rG80GjFlyhR8//33+OCDD1BeXo4333wTUVFRXV4nERER+RaXb2vOy8tDRkYG5s6dCwBYv349duzYgY0bNyIrK6tN/Y0bN+LChQvYv3+/bcr42NjY61pnc3Mzmpubba/r6uoAABcuXIDJZHJ1l3oFk8kEg8GA8+fPc2r+DrCfnMe+cg77yTnsJ+f4Wj9dunQJACCE6KRmSyWnNTc3C4VCIT788EO78vT0dDF9+nSH77nrrrvE7NmzRUZGhggLCxM333yzePHFF4XZbO7yOrOzswUAfvGLX/ziF7/41Qu+fvjhh04ziEtnWM6dOweLxQKdTmdXrtPpcOLECYfvOX36ND755BPMnj0bO3fuxMmTJ/H444/DZDIhOzu7S+tcunQp9Hq97bXVasWFCxfQv39/n316cH19PaKjo/HDDz8gMDDQ282RLPaT89hXzmE/OYf95Bxf6ychBC5duoTIyMhO63p8plur1YqwsDD893//NxQKBcaMGYOffvoJL7/8MrKzs7u0TrVaDbVabVcWHBzshtb2fIGBgT5xkF8v9pPz2FfOYT85h/3kHF/qp6CgIKfquRRYQkJCoFAoUFNTY1deU1OD8PBwh++JiIiAUqmEQqGwld10002orq6G0Wjs0jqJiIjIt7h0l5BKpcKYMWNQVFRkK7NarSgqKkJycrLD90yYMAEnT56E1Wq1lX377beIiIiASqXq0jqJiIjIt7h8W7Ner8ebb76JzZs34/jx45g/fz4uX75su8MnPT0dS5cutdWfP38+Lly4gIULF+Lbb7/Fjh07sGrVKixYsMDpdVLn1Go1srOz2wyVkT32k/PYV85hPzmH/eQc9lP7ZEI4cy+Rvb/85S94+eWXUV1djdGjR+O1115DUlISAGDy5MmIjY3FW2+9ZatfUlKCp556Ct988w2ioqIwb948LFmyxG6YqKN1EhERkW/rUmAhIiIi6k58lhARERFJHgMLERERSR4DCxEREUkeAwsRERFJHgOLhD3//POQyWR2X8OGDbMtb2pqwoIFC9C/f3/06dMHv/3tb9tMwFdZWYl77rkHWq0WYWFheOaZZ2A2m7t7V9zqs88+w7Rp0xAZGQmZTIaPPvrIbrkQAitWrEBERAQ0Gg1SUlLw3Xff2dW5cOECZs+ejcDAQAQHB2PevHloaGiwq3P48GFMnDgR/v7+iI6Oxp/+9CdP75rbddZXc+bMaXOMpaWl2dXp7X2Vm5uLcePGISAgAGFhYZgxYwbKy8vt6rjrd624uBgJCQlQq9UYPHiw3d2UPYEzfTV58uQ2x9Rjjz1mV6e399W6deswcuRI22y1ycnJ2LVrl205j6cu6vRpQ+Q12dnZ4uabbxZVVVW2r7Nnz9qWP/bYYyI6OloUFRWJr776Stx6661i/PjxtuVms1kMHz5cpKSkiK+//lrs3LlThISEiKVLl3pjd9xm586d4rnnnhNbt24VANo8OHP16tUiKChIfPTRR+Lf//63mD59uhg4cKBobGy01UlLSxOjRo0SBw4cEP/617/E4MGDxaxZs2zL6+rqhE6nE7NnzxZHjx4V7777rtBoNOKvf/1rd+2mW3TWVw8//LBIS0uzO8YuXLhgV6e391VqaqrYtGmTOHr0qPjmm2/E3XffLQYMGCAaGhpsddzxu3b69Gmh1WqFXq8Xx44dE6+//rpQKBSioKCgW/f3ejjTV7/+9a9FRkaG3TFVV1dnW+4LfbV9+3axY8cO8e2334ry8nLx7LPPCqVSKY4ePSqE4PHUVQwsEpadnS1GjRrlcNnFixeFUqkU77//vq3s+PHjAoAoKSkRQrR8WMnlclFdXW2rs27dOhEYGCiam5s92vbucu2HsNVqFeHh4eLll1+2lV28eFGo1Wrx7rvvCiGEOHbsmAAgvvzyS1udXbt2CZlMJn766SchhBBvvPGG6Nu3r10/LVmyRMTFxXl4jzynvcBy7733tvseX+yr2tpaAUDs27dPCOG+37XFixeLm2++2W5bM2fOFKmpqZ7eJY+5tq+EaAksCxcubPc9vtpXffv2FX/72994PF0HDglJ3HfffYfIyEgMGjQIs2fPRmVlJQDg0KFDMJlMSElJsdUdNmwYBgwYgJKSEgAtE/aNGDHC7knYqampqK+vx3/+85/u3ZFuUlFRgerqart+CQoKQlJSkl2/BAcHY+zYsbY6KSkpkMvlOHjwoK3OpEmToFKpbHVSU1NRXl6O//u//+umvekexcXFCAsLQ1xcHObPn4/z58/blvliX9XV1QEA+vXrB8B9v2slJSV262it07qOnujavmr1v//7vwgJCcHw4cOxdOlSGAwG2zJf6yuLxYL8/HxcvnwZycnJPJ6ug8ef1kxdl5SUhLfeegtxcXGoqqpCTk4OJk6ciKNHj6K6uhoqlarNU6p1Oh2qq6sBANXV1XYHfOvy1mW9Uet+Odrvq/slLCzMbrmfnx/69etnV2fgwIFt1tG6rG/fvh5pf3dLS0vDf/3Xf2HgwIE4deoUnn32Wdx1110oKSmBQqHwub6yWq148sknMWHCBAwfPhwA3Pa71l6d+vp6NDY2QqPReGKXPMZRXwHAAw88gJiYGERGRuLw4cNYsmQJysvLsXXrVgC+01dHjhxBcnIympqa0KdPH3z44YeIj4/HN998w+OpixhYJOyuu+6yfT9y5EgkJSUhJiYG7733Xq88GKn73X///bbvR4wYgZEjR+LGG29EcXEx7rzzTi+2zDsWLFiAo0eP4vPPP/d2UySvvb569NFHbd+PGDECERERuPPOO3Hq1CnceOON3d1Mr4mLi8M333yDuro6fPDBB3j44Yexb98+bzerR+OQUA8SHByMoUOH4uTJkwgPD4fRaMTFixft6tTU1CA8PBwAEB4e3ubK89bXrXV6m9b9crTfV/dLbW2t3XKz2YwLFy74dN8BwKBBgxASEoKTJ08C8K2+yszMxD//+U98+umn+NWvfmUrd9fvWnt1AgMDe9wfIO31lSOtz4S7+pjyhb5SqVQYPHgwxowZg9zcXIwaNQp//vOfeTxdBwaWHqShoQGnTp1CREQExowZA6VSiaKiItvy8vJyVFZWIjk5GQCQnJyMI0eO2H3g7N27F4GBgYiPj+/29neHgQMHIjw83K5f6uvrcfDgQbt+uXjxIg4dOmSr88knn8Bqtdr+cU1OTsZnn30Gk8lkq7N3717ExcX1qCEOV/344484f/48IiIiAPhGXwkhkJmZiQ8//BCffPJJm+Etd/2uJScn262jtU7rOnqCzvrKkW+++QYA7I4pX+ira1mtVjQ3N/N4uh7evuqX2vf000+L4uJiUVFRIb744guRkpIiQkJCRG1trRCi5da4AQMGiE8++UR89dVXIjk5WSQnJ9ve33pr3NSpU8U333wjCgoKRGhoaI+/rfnSpUvi66+/Fl9//bUAIPLy8sTXX38tzpw5I4Roua05ODhYbNu2TRw+fFjce++9Dm9rvuWWW8TBgwfF559/LoYMGWJ3q+7FixeFTqcTDz30kDh69KjIz88XWq22x9yq26qjvrp06ZJYtGiRKCkpERUVFaKwsFAkJCSIIUOGiKamJts6entfzZ8/XwQFBYni4mK7W3ENBoOtjjt+11pvQ33mmWfE8ePHxdq1a3vcbaid9dXJkyfFypUrxVdffSUqKirEtm3bxKBBg8SkSZNs6/CFvsrKyhL79u0TFRUV4vDhwyIrK0vIZDKxZ88eIQSPp65iYJGwmTNnioiICKFSqURUVJSYOXOmOHnypG15Y2OjePzxx0Xfvn2FVqsV9913n6iqqrJbx/fffy/uuusuodFoREhIiHj66aeFyWTq7l1xq08//VQAaPP18MMPCyFabm1evny50Ol0Qq1WizvvvFOUl5fbreP8+fNi1qxZok+fPiIwMFDMnTtXXLp0ya7Ov//9b3HbbbcJtVotoqKixOrVq7trF92mo74yGAxi6tSpIjQ0VCiVShETEyMyMjLsbqUUovf3laP+ASA2bdpkq+Ou37VPP/1UjB49WqhUKjFo0CC7bfQEnfVVZWWlmDRpkujXr59Qq9Vi8ODB4plnnrGbh0WI3t9XjzzyiIiJiREqlUqEhoaKO++80xZWhODx1FUyIYTovvM5RERERK7jNSxEREQkeQwsREREJHkMLERERCR5DCxEREQkeQwsREREJHkMLERERCR5DCxEREQkeQwsREREJHkMLERERCR5DCxEREQkeQwsREREJHn/Py7E5CYYIGodAAAAAElFTkSuQmCC",
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from flaml.automl.data import get_output_from_log\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"axs = []\n",
"for each_file_name in ['bert', 'roberta', 'ms']:\n",
" time_history, best_valid_loss_history, valid_loss_history, config_history, metric_history = \\\n",
" get_output_from_log(filename='spooky_' + each_file_name + '.log', time_budget=4000)\n",
" print(len(valid_loss_history))\n",
" axs.append(plt.scatter(time_history, 1 - np.array(valid_loss_history)))\n",
" plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\n",
"\n",
"plt.legend(handles=axs, labels=['bert', 'roberta', 'ms'])\n",
"plt.ylim([0.6, 0.9])\n",
"plt.grid()\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "lT7IwNCoTjhJ"
},
"source": [
"## 4. Other Tasks"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Fzkr77iATjhJ"
},
"source": [
"Besides sequence classification, FLAML currently also supports four other tasks (more tasks are to be supported, which can be found on [FLAML's documentation website](https://microsoft.github.io/FLAML/docs/Examples/AutoML-NLP)):\n",
"\n",
"- sequence regression: predicting a float number from the input sequence, e.g., predicting the rating of a hotel review based on the text content;\n",
"- token classification: predicting the label of each token in a sequence, e.g., named entity recognition;\n",
"- multiple choice: predicting the best second half of a sentence that comes next to the first part of a sentence based on common sensen reasoning. An example is seen below;\n",
"- (abstractive) summarization: generating the textual summarization of an input paragraph;\n",
"\n",
"Here we look into two tasks: multiple choice classification and text summarization. These tasks require significant computational resources, therefore instead of Colab, we run them using 4 NVIDIA V100 GPUs and Ray Tune on our server."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Y4VgUR5TTjhJ"
},
"source": [
"### 4.1 Multiple Choice Example"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "OO8GqaH3TjhJ"
},
"source": [
"Multiple choice is a task of predicting the best second half of a sentence that follows the first half based on common sense reasoning. An example of multiple-choice classification problem is:\n",
"\n",
"On stage, a woman takes a seat at the piano. She\n",
"a) sits on a bench as her sister plays with the doll.\n",
"b) smiles with someone as the music plays.\n",
"c) is in the crowd, watching the dancers.\n",
"d) *nervously sets her fingers on the keys*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "hQ5fX0N3TjhJ",
"outputId": "e17bd3ce-9d38-42cf-f3ea-30a0095a34b5"
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "178b92c7a57342ee89b3712e27b80caf",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading builder script: 0%| | 0.00/7.97k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "bc33019034e545a4ac0e2185aaee2ed5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading metadata: 0%| | 0.00/7.10k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "f9830cd830784138a9b645fc12f32d96",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading readme: 0%| | 0.00/8.88k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:datasets.builder:No config specified, defaulting to: swag/regular\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Downloading and preparing dataset swag/regular to /root/.cache/huggingface/datasets/swag/regular/0.0.0/9640de08cdba6a1469ed3834fcab4b8ad8e38caf5d1ba5e7436d8b1fd067ad4c...\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "9e3222f707f7410d82bcc97c5a99bff8",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading data files: 0%| | 0/3 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "5ce0aad2cdf14056a8f39bd3adb188fc",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading data: 0%| | 0.00/6.71M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "75cf1bbba93a46dc850d445e8251a7a6",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading data: 0%| | 0.00/2.24M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "db30a5e75c244a87a0a9bcd54e1a067e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading data: 0%| | 0.00/2.21M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "7d3dca0965ba40d58d4ba69542f9e263",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Extracting data files: 0%| | 0/3 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "2510d30013314da4aba64c80554f4e2c",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating train split: 0%| | 0/73546 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "98960faf7e4f4f1eb682fd243193085e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating validation split: 0%| | 0/20006 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "736e659ba14d44fe90a7a120b3d017be",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating test split: 0%| | 0/20005 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dataset swag downloaded and prepared to /root/.cache/huggingface/datasets/swag/regular/0.0.0/9640de08cdba6a1469ed3834fcab4b8ad8e38caf5d1ba5e7436d8b1fd067ad4c. Subsequent calls will reuse this data.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:datasets.builder:No config specified, defaulting to: swag/regular\n",
"WARNING:datasets.builder:Found cached dataset swag (/root/.cache/huggingface/datasets/swag/regular/0.0.0/9640de08cdba6a1469ed3834fcab4b8ad8e38caf5d1ba5e7436d8b1fd067ad4c)\n",
"WARNING:datasets.builder:No config specified, defaulting to: swag/regular\n",
"WARNING:datasets.builder:Found cached dataset swag (/root/.cache/huggingface/datasets/swag/regular/0.0.0/9640de08cdba6a1469ed3834fcab4b8ad8e38caf5d1ba5e7436d8b1fd067ad4c)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"2000\n",
"1000\n"
]
}
],
"source": [
"from datasets import load_dataset\n",
"\n",
"train_dataset = load_dataset(\"swag\", split=\"train\").to_pandas()[:2000]\n",
"dev_dataset = load_dataset(\"swag\", split=\"validation\").to_pandas()[:1000]\n",
"test_dataset = load_dataset(\"swag\", split=\"test\").to_pandas()\n",
"\n",
"custom_sent_keys = [\n",
" \"sent1\",\n",
" \"sent2\",\n",
" \"ending0\",\n",
" \"ending1\",\n",
" \"ending2\",\n",
" \"ending3\",\n",
" \"gold-source\",\n",
" \"video-id\",\n",
" \"startphrase\",\n",
" \"fold-ind\",\n",
" ] # specify the column names of the input sentences\n",
"label_key = \"label\" # specify the column name of the label\n",
"\n",
"X_train, y_train = train_dataset[custom_sent_keys], train_dataset[label_key]\n",
"X_val, y_val = dev_dataset[custom_sent_keys], dev_dataset[label_key]\n",
"X_test = test_dataset[custom_sent_keys]\n",
"\n",
"print(len(X_train))\n",
"print(len(X_val))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "19m2ZpRGTjhJ",
"outputId": "11b26237-dbda-4abd-f371-eb785dfd8bc3"
},
"outputs": [
{
"data": {
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
},
"text/plain": [
"'Members of the procession walk down the street holding small horn brass instruments.'"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_dataset.iloc[0][\"sent1\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "uvNeyzFsTjhJ",
"outputId": "842317a3-fe07-47f4-ccfa-80b3015ff0e0"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 15:20:37] {1768} INFO - task = multichoice-classification\n",
"[flaml.automl.logger: 04-12 15:20:37] {1775} INFO - Data split method: stratified\n",
"[flaml.automl.logger: 04-12 15:20:37] {1778} INFO - Evaluation method: holdout\n",
"[flaml.automl.logger: 04-12 15:20:37] {1891} INFO - Minimizing error metric: 1-accuracy\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/flaml/automl/data.py:297: SettingWithCopyWarning: \n",
"A value is trying to be set on a copy of a slice from a DataFrame.\n",
"Try using .loc[row_indexer,col_indexer] = value instead\n",
"\n",
"See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
" X[str_columns] = X[str_columns].astype(\"string\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 15:20:37] {2011} INFO - List of ML learners in AutoML Run: ['transformer']\n",
"[flaml.automl.logger: 04-12 15:20:37] {2341} INFO - iteration 0, current learner transformer\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b9b531a3023c47c2a95d89cc66cce75b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)okenizer_config.json: 0%| | 0.00/28.0 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "ad661687b4974715a282f1b971e33d9f",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)lve/main/config.json: 0%| | 0.00/570 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "0246c0387fd447a28f0c78ee9c9b5105",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)solve/main/vocab.txt: 0%| | 0.00/232k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e3ef0fcdae434cee8482bdcc44705d8e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)/main/tokenizer.json: 0%| | 0.00/466k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "2d2ef6376ac140ed997ccb03cedfb067",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading pytorch_model.bin: 0%| | 0.00/440M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 1.0387, 'learning_rate': 6.666666666666666e-06, 'epoch': 1.0}\n",
"{'loss': 0.5781, 'learning_rate': 3.333333333333333e-06, 'epoch': 2.0}\n",
"{'eval_loss': 0.8650197982788086, 'eval_automl_metric': 0.29300000000000004, 'eval_runtime': 14.2894, 'eval_samples_per_second': 69.982, 'eval_steps_per_second': 69.982, 'epoch': 2.0}\n",
"{'loss': 0.363, 'learning_rate': 0.0, 'epoch': 3.0}\n",
"{'eval_loss': 0.9072939157485962, 'eval_automl_metric': 0.29100000000000004, 'eval_runtime': 13.6284, 'eval_samples_per_second': 73.376, 'eval_steps_per_second': 73.376, 'epoch': 3.0}\n",
"{'train_runtime': 270.546, 'train_samples_per_second': 22.177, 'train_steps_per_second': 5.544, 'train_loss': 0.659941151936849, 'epoch': 3.0}\n",
"[flaml.automl.logger: 04-12 15:25:50] {2479} INFO - Estimated sufficient time budget=3129882s. Estimated necessary time budget=3130s.\n",
"[flaml.automl.logger: 04-12 15:25:50] {2526} INFO - at 313.1s,\testimator transformer's best error=0.2910,\tbest estimator transformer's best error=0.2910\n",
"[flaml.automl.logger: 04-12 15:25:50] {2341} INFO - iteration 1, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 1.0538, 'learning_rate': 6.474576669243438e-06, 'epoch': 1.0}\n",
"{'loss': 0.6091, 'learning_rate': 3.237288334621719e-06, 'epoch': 2.0}\n",
"{'eval_loss': 0.859893798828125, 'eval_automl_metric': 0.30200000000000005, 'eval_runtime': 13.9904, 'eval_samples_per_second': 71.477, 'eval_steps_per_second': 71.477, 'epoch': 2.0}\n",
"{'loss': 0.3889, 'learning_rate': 0.0, 'epoch': 3.0}\n",
"{'eval_loss': 0.8932241201400757, 'eval_automl_metric': 0.30800000000000005, 'eval_runtime': 13.5564, 'eval_samples_per_second': 73.766, 'eval_steps_per_second': 73.766, 'epoch': 3.0}\n",
"{'train_runtime': 268.5443, 'train_samples_per_second': 22.343, 'train_steps_per_second': 5.586, 'train_loss': 0.6839515177408854, 'epoch': 3.0}\n",
"[flaml.automl.logger: 04-12 15:30:44] {2526} INFO - at 607.4s,\testimator transformer's best error=0.2910,\tbest estimator transformer's best error=0.2910\n",
"[flaml.automl.logger: 04-12 15:30:44] {2341} INFO - iteration 2, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 1.0315, 'learning_rate': 6.864455657088979e-06, 'epoch': 1.0}\n",
"{'loss': 0.5715, 'learning_rate': 3.4322278285444894e-06, 'epoch': 2.0}\n",
"{'eval_loss': 0.786681056022644, 'eval_automl_metric': 0.28500000000000003, 'eval_runtime': 14.1835, 'eval_samples_per_second': 70.505, 'eval_steps_per_second': 70.505, 'epoch': 2.0}\n",
"{'loss': 0.3374, 'learning_rate': 0.0, 'epoch': 3.0}\n",
"{'eval_loss': 0.8425467610359192, 'eval_automl_metric': 0.28700000000000003, 'eval_runtime': 13.4773, 'eval_samples_per_second': 74.199, 'eval_steps_per_second': 74.199, 'epoch': 3.0}\n",
"{'train_runtime': 268.049, 'train_samples_per_second': 22.384, 'train_steps_per_second': 5.596, 'train_loss': 0.6467840881347656, 'epoch': 3.0}\n",
"[flaml.automl.logger: 04-12 15:35:38] {2526} INFO - at 901.2s,\testimator transformer's best error=0.2850,\tbest estimator transformer's best error=0.2850\n",
"[flaml.automl.logger: 04-12 15:35:38] {2341} INFO - iteration 3, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 0.9884, 'learning_rate': 7.3999972918443325e-06, 'epoch': 1.0}\n",
"{'eval_loss': 0.7787197828292847, 'eval_automl_metric': 0.29300000000000004, 'eval_runtime': 13.7995, 'eval_samples_per_second': 72.466, 'eval_steps_per_second': 72.466, 'epoch': 1.0}\n",
"{'loss': 0.4754, 'learning_rate': 0.0, 'epoch': 2.0}\n",
"{'eval_loss': 0.8423631191253662, 'eval_automl_metric': 0.28700000000000003, 'eval_runtime': 13.2491, 'eval_samples_per_second': 75.477, 'eval_steps_per_second': 75.477, 'epoch': 2.0}\n",
"{'train_runtime': 190.5498, 'train_samples_per_second': 20.992, 'train_steps_per_second': 5.248, 'train_loss': 0.7318977355957031, 'epoch': 2.0}\n",
"[flaml.automl.logger: 04-12 15:39:15] {2526} INFO - at 1117.8s,\testimator transformer's best error=0.2850,\tbest estimator transformer's best error=0.2850\n",
"[flaml.automl.logger: 04-12 15:39:15] {2341} INFO - iteration 4, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 1.0922, 'learning_rate': 5.372722783974802e-06, 'epoch': 1.0}\n",
"{'loss': 0.66, 'learning_rate': 3.5818151893165346e-06, 'epoch': 2.0}\n",
"{'loss': 0.4378, 'learning_rate': 1.7909075946582673e-06, 'epoch': 3.0}\n",
"{'eval_loss': 0.8736429214477539, 'eval_automl_metric': 0.29300000000000004, 'eval_runtime': 14.0322, 'eval_samples_per_second': 71.265, 'eval_steps_per_second': 71.265, 'epoch': 3.0}\n",
"{'loss': 0.3225, 'learning_rate': 0.0, 'epoch': 4.0}\n",
"{'eval_loss': 0.9011046290397644, 'eval_automl_metric': 0.29200000000000004, 'eval_runtime': 13.052, 'eval_samples_per_second': 76.616, 'eval_steps_per_second': 76.616, 'epoch': 4.0}\n",
"{'train_runtime': 344.1498, 'train_samples_per_second': 23.246, 'train_steps_per_second': 5.811, 'train_loss': 0.6281196594238281, 'epoch': 4.0}\n",
"[flaml.automl.logger: 04-12 15:45:25] {2526} INFO - at 1487.7s,\testimator transformer's best error=0.2850,\tbest estimator transformer's best error=0.2850\n",
"[flaml.automl.logger: 04-12 15:45:25] {2341} INFO - iteration 5, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'loss': 1.1162, 'learning_rate': 1.192849707902046e-05, 'epoch': 1.0}\n",
"{'loss': 0.5407, 'learning_rate': 5.96424853951023e-06, 'epoch': 2.0}\n",
"{'eval_loss': 1.1717097759246826, 'eval_automl_metric': 0.375, 'eval_runtime': 13.9408, 'eval_samples_per_second': 71.732, 'eval_steps_per_second': 71.732, 'epoch': 2.0}\n",
"{'loss': 0.2207, 'learning_rate': 0.0, 'epoch': 3.0}\n",
"{'eval_loss': 1.4500138759613037, 'eval_automl_metric': 0.366, 'eval_runtime': 13.1329, 'eval_samples_per_second': 76.145, 'eval_steps_per_second': 76.145, 'epoch': 3.0}\n",
"{'train_runtime': 267.6649, 'train_samples_per_second': 22.416, 'train_steps_per_second': 5.604, 'train_loss': 0.6258482004801432, 'epoch': 3.0}\n",
"[flaml.automl.logger: 04-12 15:50:19] {2526} INFO - at 1781.7s,\testimator transformer's best error=0.2850,\tbest estimator transformer's best error=0.2850\n",
"[flaml.automl.logger: 04-12 15:50:19] {2642} INFO - selected model: None\n",
"[flaml.automl.logger: 04-12 15:50:19] {2041} INFO - fit succeeded\n",
"[flaml.automl.logger: 04-12 15:50:19] {2042} INFO - Time taken to find the best model: 901.1799373626709\n"
]
}
],
"source": [
"''' import AutoML class from flaml package '''\n",
"from flaml import AutoML\n",
"automl = AutoML()\n",
"\n",
"automl_settings = {\n",
" \"time_budget\": 1800, # setting the time budget\n",
" \"task\": \"multichoice-classification\", # setting the task as multiplechoice-classification\n",
" \"fit_kwargs_by_estimator\": { # if model_path is not set, the default model is facebook/muppet-roberta-base: https://huggingface.co/facebook/muppet-roberta-base\n",
" \"transformer\": {\n",
" \"output_dir\": \"data/output/\", # setting the output directory\n",
" \"model_path\": \"bert-base-uncased\", # the batch size for validation (inference)\n",
" }\n",
" },\n",
" \"gpu_per_trial\": 1, # set to 0 if no GPU is available\n",
" \"log_file_name\": \"seqclass.log\", # set the file to save the log for HPO\n",
" \"log_type\": \"all\", # the log type for trials: \"all\" if logging all the trials, \"better\" if only keeping the better trials\n",
" \"use_ray\": False, # set whether to use Ray\n",
" \"n_concurrent_trials\": 1,\n",
" \"fp16\": False\n",
"}\n",
"\n",
"from flaml import tune\n",
"custom_hp = {\n",
" \"transformer\": {\n",
" \"per_device_train_batch_size\": {\n",
" \"domain\": tune.choice([1, 2, 4]),\n",
" \"init_value\": 4,\n",
" \"low_cost_init_value\": 4,\n",
" },\n",
" }\n",
"}\n",
"\n",
"'''The main flaml automl API'''\n",
"automl.fit(X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, custom_hp=custom_hp, **automl_settings)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kh7ZJsIKTjhJ",
"outputId": "6de4231b-ff23-4ffa-b2ce-6b550b346129"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'Current Learner': 'transformer', 'Current Sample': 2000, 'Current Hyper-parameters': {'learning_rate': 9.999999999999999e-06, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 20, 'global_max_steps': 1500}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.999999999999999e-06, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 20, 'global_max_steps': 1500}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 2000, 'Current Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 14, 'global_max_steps': 1000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.999999999999999e-06, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 20, 'global_max_steps': 1500}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 2000, 'Current Hyper-parameters': {'learning_rate': 1.0296683485633468e-05, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 26, 'global_max_steps': 1000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 1.0296683485633468e-05, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 26, 'global_max_steps': 1000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 2000, 'Current Hyper-parameters': {'learning_rate': 1.4799994583688665e-05, 'num_train_epochs': 2, 'per_device_train_batch_size': 4, 'seed': 25, 'global_max_steps': 1000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 1.0296683485633468e-05, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 26, 'global_max_steps': 1000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 2000, 'Current Hyper-parameters': {'learning_rate': 7.163630378633069e-06, 'num_train_epochs': 4, 'per_device_train_batch_size': 4, 'seed': 27, 'global_max_steps': 2000}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 1.0296683485633468e-05, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 26, 'global_max_steps': 1000}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 2000, 'Current Hyper-parameters': {'learning_rate': 1.789274561853069e-05, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 32, 'global_max_steps': 1500}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 1.0296683485633468e-05, 'num_train_epochs': 3, 'per_device_train_batch_size': 4, 'seed': 26, 'global_max_steps': 1000}}\n",
"6\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAkAAAAHHCAYAAABXx+fLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABToklEQVR4nO3dfVzN9/8/8MfpVKdclIt0OiVysZCEheTaRMxszNfVB7ka+5CJzOi7L81cxIzZxoS52Mbm2pYhF4kxTchVHyohGV2wdMGonPP6/eHX+7OjokOnU70f99vt3G7O6/16v8/z1ez08Hq/3u+3QgghQERERCQjZqYugIiIiKisMQARERGR7DAAERERkewwABEREZHsMAARERGR7DAAERERkewwABEREZHsMAARERGR7DAAERERkewwABFRheTi4oLRo0ebugwiqqAYgIhkbOPGjVAoFDhz5oypS6lwHj9+jC+++AJeXl6wtbWFlZUVXF1dMXnyZCQkJJi6PCJ6AXNTF0BE9DLi4+NhZmaaf8Pdu3cPvXv3xtmzZ/HWW2/hX//6F6pVq4b4+Hhs2bIFa9asQV5enklqI6KSYQAiIpN78uQJdDodLC0tS7yPSqUyYkXPN3r0aJw7dw47duzAwIED9bbNmzcPH3/8cal8zsv8XIioZHgKjIhe6Pbt2xg7dizUajVUKhWaN2+O9evX6/XJy8vDnDlz4OnpCVtbW1StWhWdO3dGZGSkXr+kpCQoFAp8/vnnWL58ORo1agSVSoXLly/jk08+gUKhQGJiIkaPHo0aNWrA1tYWY8aMwd9//613nGfXABWczvv9998RGBiIOnXqoGrVqhgwYADu3r2rt69Op8Mnn3wCR0dHVKlSBd27d8fly5dLtK7o1KlT2Lt3L8aNG1co/ABPg9nnn38uve/WrRu6detWqN/o0aPh4uLywp/LuXPnYG5ujrlz5xY6Rnx8PBQKBVasWCG1ZWZmYurUqXB2doZKpULjxo2xePFi6HS6546LSG44A0REz5WWlob27dtDoVBg8uTJqFOnDvbv349x48YhOzsbU6dOBQBkZ2fj22+/xbBhwzB+/Hjk5ORg3bp18PX1RXR0NFq1aqV33A0bNuDx48eYMGECVCoVatWqJW0bPHgwGjRogJCQEMTExODbb7+Fvb09Fi9e/MJ6P/jgA9SsWRPBwcFISkrC8uXLMXnyZGzdulXqExQUhM8++wz9+vWDr68vLly4AF9fXzx+/PiFxw8LCwMAjBw5sgQ/PcM9+3PRaDTo2rUrtm3bhuDgYL2+W7duhVKpxKBBgwAAf//9N7p27Yrbt2/j/fffR7169XDy5EkEBQUhJSUFy5cvN0rNRBWSICLZ2rBhgwAgTp8+XWyfcePGCY1GI+7du6fXPnToUGFrayv+/vtvIYQQT548Ebm5uXp97t+/L9RqtRg7dqzUduPGDQFA2NjYiPT0dL3+wcHBAoBefyGEGDBggKhdu7ZeW/369cWoUaMKjcXHx0fodDqpfdq0aUKpVIrMzEwhhBCpqanC3Nxc9O/fX+94n3zyiQCgd8yiDBgwQAAQ9+/ff26/Al27dhVdu3Yt1D5q1ChRv3596f3zfi6rV68WAMSlS5f02t3c3MQbb7whvZ83b56oWrWqSEhI0Os3a9YsoVQqRXJycolqJpIDngIjomIJIbBz507069cPQgjcu3dPevn6+iIrKwsxMTEAAKVSKa1V0el0yMjIwJMnT9CmTRupzz8NHDgQderUKfJz//3vf+u979y5M/766y9kZ2e/sOYJEyZAoVDo7avVanHz5k0AQEREBJ48eYJJkybp7ffBBx+88NgApBqqV69eov6GKurn8u6778Lc3FxvFis2NhaXL1/GkCFDpLbt27ejc+fOqFmzpt5/Kx8fH2i1Wvz2229GqZmoIuIpMCIq1t27d5GZmYk1a9ZgzZo1RfZJT0+X/vzdd99h6dKliIuLQ35+vtTeoEGDQvsV1VagXr16eu9r1qwJALh//z5sbGyeW/Pz9gUgBaHGjRvr9atVq5bU93kKPj8nJwc1atR4YX9DFfVzsbOzQ48ePbBt2zbMmzcPwNPTX+bm5nj33XelflevXsXFixeLDZb//G9FJHcMQERUrIKFsyNGjMCoUaOK7OPh4QEA2LRpE0aPHo3+/ftjxowZsLe3h1KpREhICK5du1ZoP2tr62I/V6lUFtkuhHhhza+yb0k0bdoUAHDp0iV07tz5hf0VCkWRn63VaovsX9zPZejQoRgzZgzOnz+PVq1aYdu2bejRowfs7OykPjqdDj179sRHH31U5DFcXV1fWC+RXDAAEVGx6tSpg+rVq0Or1cLHx+e5fXfs2IGGDRti165deqegnl24a2r169cHACQmJurNtvz111/SLNHz9OvXDyEhIdi0aVOJAlDNmjVx/fr1Qu0FM1El1b9/f7z//vvSabCEhAQEBQXp9WnUqBEePHjwwv9WRMTL4InoOZRKJQYOHIidO3ciNja20PZ/Xl5eMPPyz9mOU6dOISoqyviFGqBHjx4wNzfHqlWr9Nr/eSn583h7e6N379749ttv8fPPPxfanpeXhw8//FB636hRI8TFxen9rC5cuIDff//doLpr1KgBX19fbNu2DVu2bIGlpSX69++v12fw4MGIiorCgQMHCu2fmZmJJ0+eGPSZRJUZZ4CICOvXr0d4eHih9oCAACxatAiRkZHw8vLC+PHj4ebmhoyMDMTExODw4cPIyMgAALz11lvYtWsXBgwYgL59++LGjRsIDQ2Fm5sbHjx4UNZDKpZarUZAQACWLl2Kt99+G71798aFCxewf/9+2NnZ6c1eFef7779Hr1698O6776Jfv37o0aMHqlatiqtXr2LLli1ISUmR7gU0duxYLFu2DL6+vhg3bhzS09MRGhqK5s2bl2hR9z8NGTIEI0aMwDfffANfX99Ca5BmzJiBsLAwvPXWWxg9ejQ8PT3x8OFDXLp0CTt27EBSUpLeKTMiOWMAIqJCsyEFRo8ejbp16yI6Ohqffvopdu3ahW+++Qa1a9dG8+bN9e7LM3r0aKSmpmL16tU4cOAA3NzcsGnTJmzfvh1Hjx4to5GUzOLFi1GlShWsXbsWhw8fhre3Nw4ePIhOnTrBysrqhfvXqVMHJ0+exDfffIOtW7fi448/Rl5eHurXr4+3334bAQEBUt9mzZrh+++/x5w5cxAYGAg3Nzf88MMP+PHHHw3+ubz99tuwtrZGTk6O3tVfBapUqYJjx45h4cKF2L59O77//nvY2NjA1dUVc+fOha2trUGfR1SZKURprQwkIqrAMjMzUbNmTcyfP7/UHmVBROUX1wARkew8evSoUFvBXZKLemwFEVU+PAVGRLKzdetWbNy4EW+++SaqVauGEydO4KeffkKvXr3QsWNHU5dHRGWAAYiIZMfDwwPm5ub47LPPkJ2dLS2Mnj9/vqlLI6IywjVAREREJDtcA0RERESywwBEREREssM1QEXQ6XS4c+cOqlevXqKbohEREZHpCSGQk5MDR0dHmJk9f46HAagId+7cgbOzs6nLICIiopdw69Yt1K1b97l9GICKUL16dQBPf4A2NjYmroaIiIhKIjs7G87OztLv8edhACpCwWkvGxsbBiAiIqIKpiTLV7gImoiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZId3giail6LVCUTfyEB6zmPYV7dCuwa1oDTjw4OJqGJgACIig4XHpmDunstIyXostWlsrRDczw293TUmrIyIqGR4CoyIDBIem4KJm2L0wg8ApGY9xsRNMQiPTTFRZUREJccZICIqMa1OYO6eyxBFbCtoCw77Dzo2tuPpMCJ6IWsLZYkeXGoMDEBEVGLRNzIKzfw8Ky07Fy0+OVhGFRFRRXb5U19UsTRNFOEpMCIqsfSc54cfIqKKgjNARFRi9tWtStRvw+i28GpYy8jVEFFFZ22hNNlnMwARUYm1a1ALGlsrpGY9LnIdkAKAg60VurjW4RogIirXeAqMiEpMaaZAcD+3IrcVxJ3gfm6yCz9anUDUtb/wy/nbiLr2F7S6ouIhEZUnnAEiIoP0dtdg1YjXERz2H6Rl50rtDjK9DxDviURUMSmEEPynyjOys7Nha2uLrKws2NjYmLoconIp53G+dLXXhtFtZXnaq+CeSM9+iRb8FFaNeJ0hiKgMGfL7m6fAiOil/DPseDWU32MwSnJPpLl7LvN0GFE5xQBERPQSXnRPJAEgJesxom9klF1RRFRiDEBERC+hpPdE4r2TiMonBiAiopdQ0nsilbQfEZUtBiAiopdQcE+k4lY+KfD0arB2DXhDSKLyiAGIiOgl/POeSM+GIDnfE4moomAAIiJ6SQX3RHKw1T/N5WBrxUvgico53giRiOgV9HbXoKebA6JvZCA95zHsqz897cWZH6LyjQGIiOgVKc0U8G5U29RlEJEBeAqMiIiIZKdcBKCVK1fCxcUFVlZW8PLyQnR0dLF9u3XrBoVCUejVt29fqc+uXbvQq1cv1K5dGwqFAufPny+DURAREVFFYfIAtHXrVgQGBiI4OBgxMTFo2bIlfH19kZ6eXmT/Xbt2ISUlRXrFxsZCqVRi0KBBUp+HDx+iU6dOWLx4cVkNg4iIiCoQk68BWrZsGcaPH48xY8YAAEJDQ7F3716sX78es2bNKtS/Vi39e2ps2bIFVapU0QtAI0eOBAAkJSUZr3AiIiKqsEw6A5SXl4ezZ8/Cx8dHajMzM4OPjw+ioqJKdIx169Zh6NChqFq1qrHKJCIiokrGpDNA9+7dg1arhVqt1mtXq9WIi4t74f7R0dGIjY3FunXrXqmO3Nxc5ObmSu+zs7Nf6XjF0eoEL5UlIiIqB0x+CuxVrFu3Di1atEC7du1e6TghISGYO3duKVVVtPDYFMzdc1nv6dEaWysE93PjzdKIiIjKmElPgdnZ2UGpVCItLU2vPS0tDQ4ODs/d9+HDh9iyZQvGjRv3ynUEBQUhKytLet26deuVj/lP4bEpmLgpRi/8AEBq1mNM3BSD8NiUUv08IiIiej6TzgBZWlrC09MTERER6N+/PwBAp9MhIiICkydPfu6+27dvR25uLkaMGPHKdahUKqhUqlc+TlG0OoG5ey5DFLGtoC047D/o2NiOp8OoQvk7T2vqEoiIXprJT4EFBgZi1KhRaNOmDdq1a4fly5fj4cOH0lVhfn5+cHJyQkhIiN5+69atQ//+/VG7duG7r2ZkZCA5ORl37twBAMTHxwMAHBwcXjizVNqib2QUmvl5Vlp2Llp8crCMKiIiomdxjab8mDwADRkyBHfv3sWcOXOQmpqKVq1aITw8XFoYnZycDDMz/TN18fHxOHHiBA4eLDo0hIWFSQEKAIYOHQoACA4OxieffGKcgRQjPef54YeoomtTvyasLZSmLoPopXGNpjwphBBFnZ2RtezsbNja2iIrKws2NjavdKyoa39h2No/Xthvw+i28GpY64X9iMobawslFAr+S5kqpoI1ms/+Iiz4G71qxOsMQRWIIb+/TT4DVNm1a1ALGlsrpGY9LnIdkAKAg60VurjW4XQrEVEZetEaTQWAuXsuo6ebA7+fKyGTPwqjslOaKRDczw3Af/9FUaDgfXA/N/7PRURUxl60RlMASMl6jOgbGWVXFJUZBqAy0Ntdg1UjXoeDrZVeu4OtFadXiYhMpKRrNLmWs3LiKbAy0ttdg55uDrzKgIionLCvbvXiTgb0o4qFAagMKc0U8G5U+LJ9IiIqeyVdo9muAS9QqYx4CoyIiGSJazTljQGIiIhki2s05YunwIiISNa4RlOeGICIiEj2uEZTfngKjIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSnXASglStXwsXFBVZWVvDy8kJ0dHSxfbt16waFQlHo1bdvX6mPEAJz5syBRqOBtbU1fHx8cPXq1bIYChEREVUAJg9AW7duRWBgIIKDgxETE4OWLVvC19cX6enpRfbftWsXUlJSpFdsbCyUSiUGDRok9fnss8/w1VdfITQ0FKdOnULVqlXh6+uLx48fl9WwiIiIqBxTCCGEKQvw8vJC27ZtsWLFCgCATqeDs7MzPvjgA8yaNeuF+y9fvhxz5sxBSkoKqlatCiEEHB0dMX36dHz44YcAgKysLKjVamzcuBFDhw594TGzs7Nha2uLrKws2NjYvNoAiYiIqEwY8vvbpDNAeXl5OHv2LHx8fKQ2MzMz+Pj4ICoqqkTHWLduHYYOHYqqVasCAG7cuIHU1FS9Y9ra2sLLy6vYY+bm5iI7O1vvRURERJWXSQPQvXv3oNVqoVar9drVajVSU1NfuH90dDRiY2Px3nvvSW0F+xlyzJCQENja2kovZ2dnQ4dCREREFYjJ1wC9inXr1qFFixZo167dKx0nKCgIWVlZ0uvWrVulVCERERGVRyYNQHZ2dlAqlUhLS9NrT0tLg4ODw3P3ffjwIbZs2YJx48bptRfsZ8gxVSoVbGxs9F5ERERUeZk0AFlaWsLT0xMRERFSm06nQ0REBLy9vZ+77/bt25Gbm4sRI0botTdo0AAODg56x8zOzsapU6deeEyqHLQ6gahrf+GX87cRde0vaHUmXedPRETlkLmpCwgMDMSoUaPQpk0btGvXDsuXL8fDhw8xZswYAICfnx+cnJwQEhKit9+6devQv39/1K5dW69doVBg6tSpmD9/Pl577TU0aNAAs2fPhqOjI/r3719WwyITCY9Nwdw9l5GS9d9bHmhsrRDczw293TUmrIyIiMoTkwegIUOG4O7du5gzZw5SU1PRqlUrhIeHS4uYk5OTYWamP1EVHx+PEydO4ODBg0Ue86OPPsLDhw8xYcIEZGZmolOnTggPD4eVlZXRx0OmEx6bgombYvDsfE9q1mNM3BSDVSNeZwgiIiIA5eA+QOUR7wNU8Wh1Ap0WH9Gb+fknBQAHWyucmPkGlGaKsi2OiIjKRIW5DxBRaYm+kVFs+AEAASAl6zGib2SUXVFERFRuMQBRpZCeU7LHnJS0HxERVW4MQFQp2Fcv2fqukvYjIqLKjQGIKoV2DWpBY2uF4lb3KPD0arB2DWqVZVlERFROMQBRpaA0UyC4nxsAFApBBe+D+7lxATQREQFgAKJKpLe7BqtGvA4HW/3TXA62VrwEnoiI9Jj8PkBEpam3uwY93RwQfSMD6TmPYV/96WkvzvwQEdE/MQBRpaM0U8C7Ue0XdyQiItniKTAiIiKSHQYgIiIikh0GICIiIpIdBiAiIiKSHQYgIiIikh0GICIiIpIdBiAiIiKSHQYgIiIikh0GICIiIpIdBiAiIiKSHQYgIiIikh0GICIiIpIdBiAiIiKSHQYgIiIikh0GICIiIpIdBiAiIiKSHQYgIiIikh0GICIiIpIdBiAiIiKSHQYgIiIikh0GICIiIpIdBiAiIiKSHZMHoJUrV8LFxQVWVlbw8vJCdHT0c/tnZmbC398fGo0GKpUKrq6u2Ldvn7Q9JycHU6dORf369WFtbY0OHTrg9OnTxh4GERERVSAmDUBbt25FYGAggoODERMTg5YtW8LX1xfp6elF9s/Ly0PPnj2RlJSEHTt2ID4+HmvXroWTk5PU57333sOhQ4fwww8/4NKlS+jVqxd8fHxw+/btshoWERERlXMKIYQwZIfr16+jYcOGpfLhXl5eaNu2LVasWAEA0Ol0cHZ2xgcffIBZs2YV6h8aGoolS5YgLi4OFhYWhbY/evQI1atXxy+//IK+fftK7Z6enujTpw/mz59forqys7Nha2uLrKws2NjYvOToiIiIqCwZ8vvb4Bmgxo0bo3v37ti0aRMeP3780kXm5eXh7Nmz8PHx+W8xZmbw8fFBVFRUkfuEhYXB29sb/v7+UKvVcHd3x8KFC6HVagEAT548gVarhZWVld5+1tbWOHHixEvXSkRERJWLwQEoJiYGHh4eCAwMhIODA95///0Xrtspyr1796DVaqFWq/Xa1Wo1UlNTi9zn+vXr2LFjB7RaLfbt24fZs2dj6dKl0sxO9erV4e3tjXnz5uHOnTvQarXYtGkToqKikJKSUmwtubm5yM7O1nsRERFR5WVwAGrVqhW+/PJL3LlzB+vXr0dKSgo6deoEd3d3LFu2DHfv3jVGnQCeniKzt7fHmjVr4OnpiSFDhuDjjz9GaGio1OeHH36AEAJOTk5QqVT46quvMGzYMJiZFT/UkJAQ2NraSi9nZ2ejjYGIiIhM76UXQZubm+Pdd9/F9u3bsXjxYiQmJuLDDz+Es7Mz/Pz8njvjAgB2dnZQKpVIS0vTa09LS4ODg0OR+2g0Gri6ukKpVEptzZo1Q2pqKvLy8gAAjRo1wrFjx/DgwQPcunUL0dHRyM/Pf+66paCgIGRlZUmvW7dulfTHQERERBXQSwegM2fOYNKkSdBoNFi2bBk+/PBDXLt2DYcOHcKdO3fwzjvvPHd/S0tLeHp6IiIiQmrT6XSIiIiAt7d3kft07NgRiYmJ0Ol0UltCQgI0Gg0sLS31+latWhUajQb379/HgQMHnluPSqWCjY2N3ouIiIgqMWGgpUuXCnd3d2FhYSHeeecdsWfPHqHVavX63Lp1SyiVyhcea8uWLUKlUomNGzeKy5cviwkTJogaNWqI1NRUIYQQI0eOFLNmzZL6Jycni+rVq4vJkyeL+Ph48euvvwp7e3sxf/58qU94eLjYv3+/uH79ujh48KBo2bKl8PLyEnl5eSUeY1ZWlgAgsrKySrwPERERmZYhv7/NDQ1Mq1atwtixYzF69GhoNJoi+9jb22PdunUvPNaQIUNw9+5dzJkzB6mpqWjVqhXCw8OlhdHJycl6a3ecnZ1x4MABTJs2DR4eHnByckJAQABmzpwp9cnKykJQUBD+/PNP1KpVCwMHDsSCBQuKvGyeiIiI5Mng+wDJAe8DREREVPEY9T5AGzZswPbt2wu1b9++Hd99952hhyMiIiIqcwYHoJCQENjZ2RVqt7e3x8KFC0ulKCIiIiJjMjgAJScno0GDBoXa69evj+Tk5FIpioiIiMiYDA5A9vb2uHjxYqH2CxcuoHbt2qVSFBEREZExGRyAhg0bhilTpiAyMhJarRZarRZHjhxBQEAAhg4daowaiYiIiEqVwZfBz5s3D0lJSejRowfMzZ/urtPp4OfnxzVAREREVCG89GXwCQkJuHDhAqytrdGiRQvUr1+/tGszGV4GT0REVPEY8vvb4BmgAq6urnB1dX3Z3YmIiIhM5qUC0J9//omwsDAkJydLDyEtsGzZslIpjIiIiMhYDA5AERERePvtt9GwYUPExcXB3d0dSUlJEELg9ddfN0aNRERERKXK4KvAgoKC8OGHH+LSpUuwsrLCzp07cevWLXTt2hWDBg0yRo1EREREpcrgAHTlyhX4+fkBAMzNzfHo0SNUq1YNn376KRYvXlzqBRIRERGVNoMDUNWqVaV1PxqNBteuXZO23bt3r/QqIyIiIjISg9cAtW/fHidOnECzZs3w5ptvYvr06bh06RJ27dqF9u3bG6NGIiIiolJlcABatmwZHjx4AACYO3cuHjx4gK1bt+K1117jFWBERERUIRgUgLRaLf788094eHgAeHo6LDQ01CiFERERERmLQWuAlEolevXqhfv37xurHiIiIiKjM3gRtLu7O65fv26MWoiIiIjKhMEBaP78+fjwww/x66+/IiUlBdnZ2XovIiIiovLO4Iehmpn9NzMpFArpz0IIKBQKaLXa0qvORPgwVCIioorHqA9DjYyMfOnCiIiIiMoDgwNQ165djVEHERERUZkxOAD99ttvz93epUuXly6GiIiIqCwYHIC6detWqO2fa4EqwxogIiIiqtwMvgrs/v37eq/09HSEh4ejbdu2OHjwoDFqJCIiIipVBs8A2draFmrr2bMnLC0tERgYiLNnz5ZKYURERETGYvAMUHHUajXi4+NL63BERERERmPwDNDFixf13gshkJKSgkWLFqFVq1alVRcRERGR0RgcgFq1agWFQoFn75/Yvn17rF+/vtQKIyIiIjIWgwPQjRs39N6bmZmhTp06sLKyKrWiiIiIiIzJ4ABUv359Y9RBREREVGYMXgQ9ZcoUfPXVV4XaV6xYgalTpxpcwMqVK+Hi4gIrKyt4eXkhOjr6uf0zMzPh7+8PjUYDlUoFV1dX7Nu3T9qu1Woxe/ZsNGjQANbW1mjUqBHmzZtX6JQdERERyZfBAWjnzp3o2LFjofYOHTpgx44dBh1r69atCAwMRHBwMGJiYtCyZUv4+voiPT29yP55eXno2bMnkpKSsGPHDsTHx2Pt2rVwcnKS+ixevBirVq3CihUrcOXKFSxevBifffYZvv76a8MGSkRERJWWwafA/vrrryLvBWRjY4N79+4ZdKxly5Zh/PjxGDNmDAAgNDQUe/fuxfr16zFr1qxC/devX4+MjAycPHkSFhYWAAAXFxe9PidPnsQ777yDvn37Stt/+umnF84sERERkXwYPAPUuHFjhIeHF2rfv38/GjZsWOLj5OXl4ezZs/Dx8flvMWZm8PHxQVRUVJH7hIWFwdvbG/7+/lCr1XB3d8fChQv1Hr/RoUMHREREICEhAQBw4cIFnDhxAn369Cm2ltzcXGRnZ+u9iIiIqPIyeAYoMDAQkydPxt27d/HGG28AACIiIrB06VIsX768xMe5d+8etFot1Gq1XrtarUZcXFyR+1y/fh1HjhzB8OHDsW/fPiQmJmLSpEnIz89HcHAwAGDWrFnIzs5G06ZNoVQqodVqsWDBAgwfPrzYWkJCQjB37twS105EREQVm8EBaOzYscjNzcWCBQswb948AE9PM61atQp+fn6lXuA/6XQ62NvbY82aNVAqlfD09MTt27exZMkSKQBt27YNmzdvxo8//ojmzZvj/PnzmDp1KhwdHTFq1KgijxsUFITAwEDpfXZ2NpydnY06FiIiIjIdgwMQAEycOBETJ07E3bt3YW1tjWrVqhl8DDs7OyiVSqSlpem1p6WlwcHBoch9NBoNLCwsoFQqpbZmzZohNTUVeXl5sLS0xIwZMzBr1iwMHToUANCiRQvcvHkTISEhxQYglUoFlUpl8BiIiIioYjJ4DdCNGzdw9epVAECdOnWk8HP16lUkJSWV+DiWlpbw9PRERESE1KbT6RAREQFvb+8i9+nYsSMSExOh0+mktoSEBGg0GlhaWgIA/v77b5iZ6Q9LqVTq7UNERETyZnAAGj16NE6ePFmo/dSpUxg9erRBxwoMDMTatWvx3Xff4cqVK5g4cSIePnwoXRXm5+eHoKAgqf/EiRORkZGBgIAAJCQkYO/evVi4cCH8/f2lPv369cOCBQuwd+9eJCUlYffu3Vi2bBkGDBhg6FCJiIiokjL4FNi5c+eKvA9Q+/btMXnyZIOONWTIENy9exdz5sxBamoqWrVqhfDwcGlhdHJyst5sjrOzMw4cOIBp06bBw8MDTk5OCAgIwMyZM6U+X3/9NWbPno1JkyYhPT0djo6OeP/99zFnzhxDh0pERESVlEIYeItkW1tbHD16FK1bt9ZrP3v2LLp164acnJxSLdAUsrOzYWtri6ysLNjY2Ji6HCIiIioBQ35/G3wKrEuXLggJCdG7945Wq0VISAg6depkeLVEREREZczgU2CLFy9Gly5d0KRJE3Tu3BkAcPz4cWRnZ+PIkSOlXiARERFRaTN4BsjNzQ0XL17E4MGDkZ6ejpycHPj5+SEuLg7u7u7GqJGIiIioVBm8Bqg4mZmZ2LRpk8ELocsjrgEiIiKqeIy6BuhZERER+Ne//gWNRiPdjZmIiIioPHupAHTr1i18+umnaNCgAXr16gUA2L17N1JTU0u1OCIiIiJjKHEAys/Px/bt2+Hr64smTZrg/PnzWLJkCczMzPB///d/6N27NywsLIxZKxEREVGpKPFVYE5OTmjatClGjBiBLVu2oGbNmgCAYcOGGa04IiIiImMo8QzQkydPoFAooFAo9B5GSkRERFTRlDgA3blzBxMmTMBPP/0EBwcHDBw4ELt374ZCoTBmfURERESlrsQByMrKCsOHD8eRI0dw6dIlNGvWDFOmTMGTJ0+wYMECHDp0SO/u0ERERETl1UtdBdaoUSPMnz8fN2/exN69e5Gbm4u33npLeogpERERUXlm8KMw/snMzAx9+vRBnz59cPfuXfzwww+lVRcRERGR0ZTanaArE94JmoiIqOIp0ztBExEREVU0DEBEREQkOwxAREREJDsMQERERCQ7Bl8FptVqsXHjRkRERCA9PR06nU5v+5EjR0qtOCIiIiJjMDgABQQEYOPGjejbty/c3d15J2giIiKqcAwOQFu2bMG2bdvw5ptvGqMeIiIiIqMzeA2QpaUlGjdubIxaiIiIiMqEwQFo+vTp+PLLL8H7JxIREVFFZfApsBMnTiAyMhL79+9H8+bNYWFhobd9165dpVYcERERkTEYHIBq1KiBAQMGGKMWIiIiojJhcADasGGDMeogIiIiKjMv/TT4u3fvIj4+HgDQpEkT1KlTp9SKIiIiIjImgxdBP3z4EGPHjoVGo0GXLl3QpUsXODo6Yty4cfj777+NUSMRERFRqTI4AAUGBuLYsWPYs2cPMjMzkZmZiV9++QXHjh3D9OnTjVEjERERUalSCAOvZ7ezs8OOHTvQrVs3vfbIyEgMHjwYd+/eLc36TCI7Oxu2trbIysqCjY2NqcshIiKiEjDk97fBM0B///031Gp1oXZ7e/uXPgW2cuVKuLi4wMrKCl5eXoiOjn5u/8zMTPj7+0Oj0UClUsHV1RX79u2Ttru4uEChUBR6+fv7v1R9REREVLkYHIC8vb0RHByMx48fS22PHj3C3Llz4e3tbXABW7duRWBgIIKDgxETE4OWLVvC19cX6enpRfbPy8tDz549kZSUhB07diA+Ph5r166Fk5OT1Of06dNISUmRXocOHQIADBo0yOD6iIiIqPIx+BRYbGwsfH19kZubi5YtWwIALly4ACsrKxw4cADNmzc3qAAvLy+0bdsWK1asAADodDo4Ozvjgw8+wKxZswr1Dw0NxZIlSxAXF1foJozFmTp1Kn799VdcvXq1RA9v5SkwIiKiiseop8Dc3d1x9epVhISEoFWrVmjVqhUWLVqEq1evGhx+8vLycPbsWfj4+Py3IDMz+Pj4ICoqqsh9wsLC4O3tDX9/f6jVari7u2PhwoXQarXFfsamTZswduxYPrmeiIiIALzkfYCqVKmC8ePHv/KH37t3D1qtttCaIrVajbi4uCL3uX79Oo4cOYLhw4dj3759SExMxKRJk5Cfn4/g4OBC/X/++WdkZmZi9OjRxdaRm5uL3Nxc6X12dvbLDYiIiIgqhBIFoLCwMPTp0wcWFhYICwt7bt+33367VAorjk6ng729PdasWQOlUglPT0/cvn0bS5YsKTIArVu3Dn369IGjo2OxxwwJCcHcuXONWTYRERGVIyUKQP3790dqairs7e3Rv3//YvspFIpiT0UVxc7ODkqlEmlpaXrtaWlpcHBwKHIfjUYDCwsLKJVKqa1Zs2ZITU1FXl4eLC0tpfabN2/i8OHDL3xAa1BQEAIDA6X32dnZcHZ2LvE4iIiIqGIp0RqgglmXgj8X9zIk/ACApaUlPD09ERERofdZERERxV5R1rFjRyQmJkKn00ltCQkJ0Gg0euEHePrcMnt7e/Tt2/e5dahUKtjY2Oi9iIiIqPIyeBH0999/r7depkBeXh6+//57gwsIDAzE2rVr8d133+HKlSuYOHEiHj58iDFjxgAA/Pz8EBQUJPWfOHEiMjIyEBAQgISEBOzduxcLFy4sdI8fnU6HDRs2YNSoUTA3f+lHnhEREVElZHAAGjNmDLKysgq15+TkSKHFEEOGDMHnn3+OOXPmoFWrVjh//jzCw8OlhdHJyclISUmR+js7O+PAgQM4ffo0PDw8MGXKFAQEBBS6ZP7w4cNITk7G2LFjDa6JiIiIKjeD7wNkZmaGtLS0Qk9/v3DhArp3746MjIxSLdAUeB8gIiKiiseQ398lPjfUunVr6ZESPXr00DutpNVqcePGDfTu3fvlqyYiIiIqIyUOQAVXf50/fx6+vr6oVq2atM3S0hIuLi4YOHBgqRdIREREVNpKHIAK7rHj4uKCIUOGwMrKymhFERERERmTwZdHjRo1yhh1EBEREZUZgwOQVqvFF198gW3btiE5ORl5eXl62yvDImgiIiKq3Ay+DH7u3LlYtmwZhgwZgqysLAQGBuLdd9+FmZkZPvnkEyOUSERERFS6DA5Amzdvxtq1azF9+nSYm5tj2LBh+PbbbzFnzhz88ccfxqiRiIiIqFQZHIBSU1PRokULAEC1atWkmyK+9dZb2Lt3b+lWR0RERGQEBgegunXrSndmbtSoEQ4ePAgAOH36NFQqVelWR0RERGQEBgegAQMGSA8v/eCDDzB79my89tpr8PPz42MniIiIqEIw+FEYz4qKikJUVBRee+019OvXr7TqMik+CoOIiKjiMcqjMIrj7e0Nb2/vVz0MERERUZkpUQAKCwsr8QHffvvtly6GiIiIqCyUKAAVPAesgEKhwLNnzhQKBYCnN0okIiIiKs9KtAhap9NJr4MHD6JVq1bYv38/MjMzkZmZif379+P1119HeHi4seslIiIiemUGrwGaOnUqQkND0alTJ6nN19cXVapUwYQJE3DlypVSLZCIiIiotBl8Gfy1a9dQo0aNQu22trZISkoqhZKIiIiIjMvgANS2bVsEBgYiLS1NaktLS8OMGTPQrl27Ui2OiIiIyBgMDkDr169HSkoK6tWrh8aNG6Nx48aoV68ebt++jXXr1hmjRiIiIqJSZfAaoMaNG+PixYs4dOgQ4uLiAADNmjWDj4+PdCUYERERUXn2yneCrox4J2giIqKKp9TvBP3VV19hwoQJsLKywldfffXcvlOmTCl5pUREREQmUKIZoAYNGuDMmTOoXbs2GjRoUPzBFApcv369VAs0Bc4AERERVTylPgN048aNIv9MREREVBEZfBUYERERUUVXohmgwMDAEh9w2bJlL10MERERUVkoUQA6d+5ciQ7Gy+CJiIioIihRAIqMjDR2HURERERlhmuAiIiISHYMvhM0AJw5cwbbtm1DcnIy8vLy9Lbt2rWrVAojIiIiMhaDZ4C2bNmCDh064MqVK9i9ezfy8/Pxn//8B0eOHIGtra0xaiQiIiIqVQYHoIULF+KLL77Anj17YGlpiS+//BJxcXEYPHgw6tWrZ3ABK1euhIuLC6ysrODl5YXo6Ojn9s/MzIS/vz80Gg1UKhVcXV2xb98+vT63b9/GiBEjULt2bVhbW6NFixY4c+aMwbURERFR5WRwALp27Rr69u0LALC0tMTDhw+hUCgwbdo0rFmzxqBjbd26FYGBgQgODkZMTAxatmwJX19fpKenF9k/Ly8PPXv2RFJSEnbs2IH4+HisXbsWTk5OUp/79++jY8eOsLCwwP79+3H58mUsXboUNWvWNHSoREREVEkZvAaoZs2ayMnJAQA4OTkhNjYWLVq0QGZmJv7++2+DjrVs2TKMHz8eY8aMAQCEhoZi7969WL9+PWbNmlWo//r165GRkYGTJ0/CwsICAODi4qLXZ/HixXB2dsaGDRuktuc9voOIiIjkx+AZoC5duuDQoUMAgEGDBiEgIADjx4/HsGHD0KNHjxIfJy8vD2fPnoWPj89/izEzg4+PD6KioorcJywsDN7e3vD394darYa7uzsWLlwIrVar16dNmzYYNGgQ7O3t0bp1a6xdu/a5teTm5iI7O1vvRURERJVXiQNQbGwsAGDFihUYOnQoAODjjz9GYGAg0tLSMHDgQKxbt67EH3zv3j1otVqo1Wq9drVajdTU1CL3uX79Onbs2AGtVot9+/Zh9uzZWLp0KebPn6/XZ9WqVXjttddw4MABTJw4EVOmTMF3331XbC0hISGwtbWVXs7OziUeBxEREVU8JXoaPPB0dqZt27Z47733MHToUFSvXv2VPvjOnTtwcnLCyZMn4e3tLbV/9NFHOHbsGE6dOlVoH1dXVzx+/Bg3btyAUqkE8PQ02pIlS5CSkgLg6bqkNm3a4OTJk9J+U6ZMwenTp4udWcrNzUVubq70Pjs7G87OznwaPBERUQViyNPgSzwDdOzYMTRv3hzTp0+HRqPBqFGjcPz48Zcu0s7ODkqlEmlpaXrtaWlpcHBwKHIfjUYDV1dXKfwAQLNmzZCamirdj0ij0cDNzU1vv2bNmiE5ObnYWlQqFWxsbPReREREVHmVOAB17twZ69evR0pKCr7++mskJSWha9eucHV1xeLFi4s9bVUcS0tLeHp6IiIiQmrT6XSIiIjQmxH6p44dOyIxMRE6nU5qS0hIgEajgaWlpdQnPj5eb7+EhATUr1/foPqIiIioEhOv4OrVq+J///d/hbOzs7CwsBD9+vUzaP8tW7YIlUolNm7cKC5fviwmTJggatSoIVJTU4UQQowcOVLMmjVL6p+cnCyqV68uJk+eLOLj48Wvv/4q7O3txfz586U+0dHRwtzcXCxYsEBcvXpVbN68WVSpUkVs2rSpxHVlZWUJACIrK8ug8RAREZHpGPL7+5UCkBBCPHjwQKxevVrUqlVLmJmZGbz/119/LerVqycsLS1Fu3btxB9//CFt69q1qxg1apRe/5MnTwovLy+hUqlEw4YNxYIFC8STJ0/0+uzZs0e4u7sLlUolmjZtKtasWWNQTQxAREREFY8hv79LvAj6Wb/99hvWr1+PnTt3wszMDIMHD8a4cePQvn370pygMglDFlERERFR+WDI72+DboR4584dbNy4ERs3bkRiYiI6dOiAr776CoMHD0bVqlVfqWgiIiKislLiANSnTx8cPnwYdnZ28PPzw9ixY9GkSRNj1kZERERkFCUOQBYWFtixYwfeeustvcvQiYiIiCqaEgegsLAwY9ZBREREVGYMfhYYERERUUXHAERERESywwBEREREssMARERERLLDAERERESywwBEREREssMARERERLLDAERERESywwBEREREssMARERERLLDAERERESywwBEREREssMARERERLLDAERERESywwBEREREssMARERERLLDAERERESywwBEREREssMARERERLLDAERERESywwBEREREssMARERERLLDAERERESywwBEREREssMARERERLLDAERERESyUy4C0MqVK+Hi4gIrKyt4eXkhOjr6uf0zMzPh7+8PjUYDlUoFV1dX7Nu3T9r+ySefQKFQ6L2aNm1q7GEQERFRBWFu6gK2bt2KwMBAhIaGwsvLC8uXL4evry/i4+Nhb29fqH9eXh569uwJe3t77NixA05OTrh58yZq1Kih16958+Y4fPiw9N7c3ORDJSIionLC5Klg2bJlGD9+PMaMGQMACA0Nxd69e7F+/XrMmjWrUP/169cjIyMDJ0+ehIWFBQDAxcWlUD9zc3M4ODgYtXYiIiKqmEx6CiwvLw9nz56Fj4+P1GZmZgYfHx9ERUUVuU9YWBi8vb3h7+8PtVoNd3d3LFy4EFqtVq/f1atX4ejoiIYNG2L48OFITk426liIiIio4jDpDNC9e/eg1WqhVqv12tVqNeLi4orc5/r16zhy5AiGDx+Offv2ITExEZMmTUJ+fj6Cg4MBAF5eXti4cSOaNGmClJQUzJ07F507d0ZsbCyqV69e6Ji5ubnIzc2V3mdnZ5fiKImIiKi8MfkpMEPpdDrY29tjzZo1UCqV8PT0xO3bt7FkyRIpAPXp00fq7+HhAS8vL9SvXx/btm3DuHHjCh0zJCQEc+fOLbMxEBERkWmZ9BSYnZ0dlEol0tLS9NrT0tKKXb+j0Wjg6uoKpVIptTVr1gypqanIy8srcp8aNWrA1dUViYmJRW4PCgpCVlaW9Lp169ZLjoiIiIgqApMGIEtLS3h6eiIiIkJq0+l0iIiIgLe3d5H7dOzYEYmJidDpdFJbQkICNBoNLC0ti9znwYMHuHbtGjQaTZHbVSoVbGxs9F5ERERUeZn8PkCBgYFYu3YtvvvuO1y5cgUTJ07Ew4cPpavC/Pz8EBQUJPWfOHEiMjIyEBAQgISEBOzduxcLFy6Ev7+/1OfDDz/EsWPHkJSUhJMnT2LAgAFQKpUYNmxYmY+PiIiIyh+TrwEaMmQI7t69izlz5iA1NRWtWrVCeHi4tDA6OTkZZmb/zWnOzs44cOAApk2bBg8PDzg5OSEgIAAzZ86U+vz5558YNmwY/vrrL9SpUwedOnXCH3/8gTp16pT5+IiIiKj8UQghhKmLKG+ys7Nha2uLrKwsng4jIiKqIAz5/W3yU2BEREREZY0BiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZIcBiIiIiGSHAYiIiIhkhwGIiIiIZKdcBKCVK1fCxcUFVlZW8PLyQnR09HP7Z2Zmwt/fHxqNBiqVCq6urti3b1+RfRctWgSFQoGpU6caoXIiIiKqiMxNXcDWrVsRGBiI0NBQeHl5Yfny5fD19UV8fDzs7e0L9c/Ly0PPnj1hb2+PHTt2wMnJCTdv3kSNGjUK9T19+jRWr14NDw+PMhgJERERVRQmnwFatmwZxo8fjzFjxsDNzQ2hoaGoUqUK1q9fX2T/9evXIyMjAz///DM6duwIFxcXdO3aFS1bttTr9+DBAwwfPhxr165FzZo1y2IoREREVEGYNADl5eXh7Nmz8PHxkdrMzMzg4+ODqKioIvcJCwuDt7c3/P39oVar4e7ujoULF0Kr1er18/f3R9++ffWOXZzc3FxkZ2frvYiIiKjyMukpsHv37kGr1UKtVuu1q9VqxMXFFbnP9evXceTIEQwfPhz79u1DYmIiJk2ahPz8fAQHBwMAtmzZgpiYGJw+fbpEdYSEhGDu3LmvNhgiIiKqMEx+CsxQOp0O9vb2WLNmDTw9PTFkyBB8/PHHCA0NBQDcunULAQEB2Lx5M6ysrEp0zKCgIGRlZUmvW7duGXMIREREZGImnQGys7ODUqlEWlqaXntaWhocHByK3Eej0cDCwgJKpVJqa9asGVJTU6VTaunp6Xj99del7VqtFr/99htWrFiB3NxcvX0BQKVSQaVSleLIiIiIqDwz6QyQpaUlPD09ERERIbXpdDpERETA29u7yH06duyIxMRE6HQ6qS0hIQEajQaWlpbo0aMHLl26hPPnz0uvNm3aYPjw4Th//nyh8ENERETyY/LL4AMDAzFq1Ci0adMG7dq1w/Lly/Hw4UOMGTMGAODn5wcnJyeEhIQAACZOnIgVK1YgICAAH3zwAa5evYqFCxdiypQpAIDq1avD3d1d7zOqVq2K2rVrF2onIiIieTJ5ABoyZAju3r2LOXPmIDU1Fa1atUJ4eLi0MDo5ORlmZv+dqHJ2dsaBAwcwbdo0eHh4wMnJCQEBAZg5c6aphkBEREQVjEIIIUxdRHmTnZ0NW1tbZGVlwcbGxtTlEBERUQkY8vu7wl0FRkRERPSqGICIiIhIdhiAiIiISHYYgIiIiEh2GICIiIhIdhiAiIiISHYYgIiIiEh2GICIiIhIdhiAiIiISHYYgIiIiEh2GICIiIhIdhiAiIiISHYYgIiIiEh2zE1dABEREcmHVicQfSMD6TmPYV/dCu0a1ILSTFHmdTAAERERUZkIj03B3D2XkZL1WGrT2FohuJ8bertryrQWngIjIiIiowuPTcHETTF64QcAUrMeY+KmGITHppRpPQxAREREZFRancDcPZchithW0DZ3z2VodUX1MA4GICIiIjKq6BsZhWZ+/kkASMl6jOgbGWVWEwMQERERGVV6TvHh52X6lQYGICIiIjIq++pWpdqvNDAAERERkVG1a1ALGlsrFHexuwJPrwZr16BWmdXEAERERERGpTRTILifGwAUCkEF74P7uZXp/YAYgIiIiMjoertrsGrE63Cw1T/N5WBrhVUjXi/z+wDxRohERERUJnq7a9DTzYF3giYiIiJ5UZop4N2otqnL4CkwIiIikh8GICIiIpIdBiAiIiKSHQYgIiIikh0GICIiIpKdchGAVq5cCRcXF1hZWcHLywvR0dHP7Z+ZmQl/f39oNBqoVCq4urpi37590vZVq1bBw8MDNjY2sLGxgbe3N/bv32/sYRAREVEFYfLL4Ldu3YrAwECEhobCy8sLy5cvh6+vL+Lj42Fvb1+of15eHnr27Al7e3vs2LEDTk5OuHnzJmrUqCH1qVu3LhYtWoTXXnsNQgh89913eOedd3Du3Dk0b968DEdHRERE5ZFCCCFMWYCXlxfatm2LFStWAAB0Oh2cnZ3xwQcfYNasWYX6h4aGYsmSJYiLi4OFhUWJP6dWrVpYsmQJxo0b98K+2dnZsLW1RVZWFmxsbEo+GCIiIjIZQ35/m/QUWF5eHs6ePQsfHx+pzczMDD4+PoiKiipyn7CwMHh7e8Pf3x9qtRru7u5YuHAhtFptkf21Wi22bNmChw8fwtvb2yjjICIioorFpKfA7t27B61WC7VardeuVqsRFxdX5D7Xr1/HkSNHMHz4cOzbtw+JiYmYNGkS8vPzERwcLPW7dOkSvL298fjxY1SrVg27d++Gm5tbkcfMzc1Fbm6u9D4rKwvA0yRJREREFUPB7+2SnNwy+RogQ+l0Otjb22PNmjVQKpXw9PTE7du3sWTJEr0A1KRJE5w/fx5ZWVnYsWMHRo0ahWPHjhUZgkJCQjB37txC7c7OzkYdCxEREZW+nJwc2NraPrePSQOQnZ0dlEol0tLS9NrT0tLg4OBQ5D4ajQYWFhZQKpVSW7NmzZCamoq8vDxYWloCACwtLdG4cWMAgKenJ06fPo0vv/wSq1evLnTMoKAgBAYGSu91Oh0yMjJQu3ZtKBRl/4C20pCdnQ1nZ2fcunVLVuuY5DhuOY4ZkOe4OWZ5jBmQ57hLY8xCCOTk5MDR0fGFfU0agCwtLeHp6YmIiAj0798fwNPwERERgcmTJxe5T8eOHfHjjz9Cp9PBzOzpEqaEhARoNBop/BRFp9Ppneb6J5VKBZVKpdf2z6vKKrKCWwHIjRzHLccxA/IcN8csH3Ic96uO+UUzPwVMfh+gwMBArF27Ft999x2uXLmCiRMn4uHDhxgzZgwAwM/PD0FBQVL/iRMnIiMjAwEBAUhISMDevXuxcOFC+Pv7S32CgoLw22+/ISkpCZcuXUJQUBCOHj2K4cOHl/n4iIiIqPwx+RqgIUOG4O7du5gzZw5SU1PRqlUrhIeHSwujk5OTpZke4Om6nAMHDmDatGnw8PCAk5MTAgICMHPmTKlPeno6/Pz8kJKSAltbW3h4eODAgQPo2bNnmY+PiIiIyh+TByAAmDx5crGnvI4ePVqozdvbG3/88Uexx1u3bl1plVZhqVQqBAcHFzq1V9nJcdxyHDMgz3FzzPIhx3GX9ZhNfiNEIiIiorJm8jVARERERGWNAYiIiIhkhwGIiIiIZIcBiIiIiGSHAagCW7RoERQKBaZOnSq1PX78GP7+/qhduzaqVauGgQMHFrrTdnJyMvr27YsqVarA3t4eM2bMwJMnT8q4esPcvn0bI0aMQO3atWFtbY0WLVrgzJkz0nYhBObMmQONRgNra2v4+Pjg6tWresfIyMjA8OHDYWNjgxo1amDcuHF48OBBWQ+lRLRaLWbPno0GDRrA2toajRo1wrx58/Seb1MZxvzbb7+hX79+cHR0hEKhwM8//6y3vbTGePHiRXTu3BlWVlZwdnbGZ599ZuyhFet5Y87Pz8fMmTPRokULVK1aFY6OjvDz88OdO3f0jlGZxvysf//731AoFFi+fLlee0UbM1CycV+5cgVvv/02bG1tUbVqVbRt2xbJycnS9or2nf6iMT948ACTJ09G3bp1YW1tDTc3N4SGhur1KbMxC6qQoqOjhYuLi/Dw8BABAQFS+7///W/h7OwsIiIixJkzZ0T79u1Fhw4dpO1PnjwR7u7uwsfHR5w7d07s27dP2NnZiaCgIBOMomQyMjJE/fr1xejRo8WpU6fE9evXxYEDB0RiYqLUZ9GiRcLW1lb8/PPP4sKFC+Ltt98WDRo0EI8ePZL69O7dW7Rs2VL88ccf4vjx46Jx48Zi2LBhphjSCy1YsEDUrl1b/Prrr+LGjRti+/btolq1auLLL7+U+lSGMe/bt098/PHHYteuXQKA2L17t9720hhjVlaWUKvVYvjw4SI2Nlb89NNPwtraWqxevbqshqnneWPOzMwUPj4+YuvWrSIuLk5ERUWJdu3aCU9PT71jVKYx/9OuXbtEy5YthaOjo/jiiy/0tlW0MQvx4nEnJiaKWrVqiRkzZoiYmBiRmJgofvnlF5GWlib1qWjf6S8a8/jx40WjRo1EZGSkuHHjhli9erVQKpXil19+kfqU1ZgZgCqgnJwc8dprr4lDhw6Jrl27SgEoMzNTWFhYiO3bt0t9r1y5IgCIqKgoIcTTv5xmZmYiNTVV6rNq1SphY2MjcnNzy3QcJTVz5kzRqVOnYrfrdDrh4OAglixZIrVlZmYKlUolfvrpJyGEEJcvXxYAxOnTp6U++/fvFwqFQty+fdt4xb+kvn37irFjx+q1vfvuu2L48OFCiMo55me/LEtrjN98842oWbOm3t/vmTNniiZNmhh5RC/2vDBQIDo6WgAQN2/eFEJU3jH/+eefwsnJScTGxor69evrBaCKPmYhih73kCFDxIgRI4rdp6J/pxc15ubNm4tPP/1Ur+31118XH3/8sRCibMfMU2AVkL+/P/r27QsfHx+99rNnzyI/P1+vvWnTpqhXrx6ioqIAAFFRUWjRooV0p20A8PX1RXZ2Nv7zn/+UzQAMFBYWhjZt2mDQoEGwt7dH69atsXbtWmn7jRs3kJqaqjduW1tbeHl56Y27Ro0aaNOmjdTHx8cHZmZmOHXqVNkNpoQ6dOiAiIgIJCQkAAAuXLiAEydOoE+fPgAq55ifVVpjjIqKQpcuXfSeFejr64v4+Hjcv3+/jEbz8rKysqBQKKTnE1bGMet0OowcORIzZsxA8+bNC22vrGPeu3cvXF1d4evrC3t7e3h5eemdMqqM3+kdOnRAWFgYbt++DSEEIiMjkZCQgF69egEo2zEzAFUwW7ZsQUxMDEJCQgptS01NhaWlZaEHuarVaqSmpkp9/vmXpmB7wbby6Pr161i1ahVee+01HDhwABMnTsSUKVPw3XffAfhv3UWN65/jtre319tubm6OWrVqlctxz5o1C0OHDkXTpk1hYWGB1q1bY+rUqdLz7CrjmJ9VWmOsiH/nCzx+/BgzZ87EsGHDpIdDVsYxL168GObm5pgyZUqR2yvjmNPT0/HgwQMsWrQIvXv3xsGDBzFgwAC8++67OHbsGIDK+Z3+9ddfw83NDXXr1oWlpSV69+6NlStXokuXLgDKdszl4lEYVDK3bt1CQEAADh06BCsrK1OXU2Z0Oh3atGmDhQsXAgBat26N2NhYhIaGYtSoUSauzji2bduGzZs348cff0Tz5s1x/vx5TJ06FY6OjpV2zKQvPz8fgwcPhhACq1atMnU5RnP27Fl8+eWXiImJgUKhMHU5ZUan0wEA3nnnHUybNg0A0KpVK5w8eRKhoaHo2rWrKcszmq+//hp//PEHwsLCUL9+ffz222/w9/eHo6NjobMaxsYZoArk7NmzSE9Px+uvvw5zc3OYm5vj2LFj+Oqrr2Bubg61Wo28vDxkZmbq7ZeWlgYHBwcAgIODQ6HV9AXvC/qUNxqNBm5ubnptzZo1k66UKKi7qHH9c9zp6el62588eYKMjIxyOe4ZM2ZIs0AtWrTAyJEjMW3aNGnmrzKO+VmlNcaK+He+IPzcvHkThw4dkmZ/gMo35uPHjyM9PR316tWTvtdu3ryJ6dOnw8XFBUDlGzMA2NnZwdzc/IXfbZXpO/3Ro0f43//9Xyxbtgz9+vWDh4cHJk+ejCFDhuDzzz8HULZjZgCqQHr06IFLly7h/Pnz0qtNmzYYPny49GcLCwtERERI+8THxyM5ORne3t4Anj5I9tKlS3pfJgVfsM/+j1hedOzYEfHx8XptCQkJqF+/PgCgQYMGcHBw0Bt3dnY2Tp06pTfuzMxMnD17Vupz5MgR6HQ6eHl5lcEoDPP333/DzEz/f0+lUin9q7EyjvlZpTVGb29v/Pbbb8jPz5f6HDp0CE2aNEHNmjXLaDQlVxB+rl69isOHD6N27dp62yvbmEeOHImLFy/qfa85OjpixowZOHDgAIDKN2YAsLS0RNu2bZ/73ebp6VmpvtPz8/ORn5//3O+2Mh2zYWu6qbz551VgQjy9fLBevXriyJEj4syZM8Lb21t4e3tL2wsuH+zVq5c4f/68CA8PF3Xq1CnXl8FHR0cLc3NzsWDBAnH16lWxefNmUaVKFbFp0yapz6JFi0SNGjXEL7/8Ii5evCjeeeedIi+Xbt26tTh16pQ4ceKEeO2118rVJeH/NGrUKOHk5CRdBr9r1y5hZ2cnPvroI6lPZRhzTk6OOHfunDh37pwAIJYtWybOnTsnXfFUGmPMzMwUarVajBw5UsTGxootW7aIKlWqmOzy6OeNOS8vT7z99tuibt264vz58yIlJUV6/fPqlso05qI8exWYEBVvzEK8eNy7du0SFhYWYs2aNeLq1avi66+/FkqlUhw/flw6RkX7Tn/RmLt27SqaN28uIiMjxfXr18WGDRuElZWV+Oabb6RjlNWYGYAquGcD0KNHj8SkSZNEzZo1RZUqVcSAAQNESkqK3j5JSUmiT58+wtraWtjZ2Ynp06eL/Pz8Mq7cMHv27BHu7u5CpVKJpk2bijVr1uht1+l0Yvbs2UKtVguVSiV69Ogh4uPj9fr89ddfYtiwYaJatWrCxsZGjBkzRuTk5JTlMEosOztbBAQEiHr16gkrKyvRsGFD8fHHH+v9EqwMY46MjBQACr1GjRolhCi9MV64cEF06tRJqFQq4eTkJBYtWlRWQyzkeWO+ceNGkdsAiMjISOkYlWnMRSkqAFW0MQtRsnGvW7dONG7cWFhZWYmWLVuKn3/+We8YFe07/UVjTklJEaNHjxaOjo7CyspKNGnSRCxdulTodDrpGGU1ZoUQ/7i1LBEREZEMcA0QERERyQ4DEBEREckOAxARERHJDgMQERERyQ4DEBEREckOAxARERHJDgMQERERyQ4DEBEREckOAxARvbKjR49CoVBIDzDcuHEjatSo8crHLa3jGOt4ANCtWzdMnTq1VI9piC5duuDHH38sUd/27dtj586dRq6IqGJgACKSkdDQUFSvXh1PnjyR2h48eAALCwt069ZNr29BqLl27ZrR6omMjMSbb76J2rVro0qVKnBzc8P06dNx+/Zto31mSSUlJUGhUDz3tXHjRuzatQvz5s0zSY1hYWFIS0vD0KFDS9T///7v/zBr1izpwZNEcsYARCQj3bt3x4MHD3DmzBmp7fjx43BwcMCpU6fw+PFjqT0yMhL16tVDo0aNjFLL6tWr4ePjAwcHB+zcuROXL19GaGgosrKysHTpUqN8piGcnZ2RkpIivaZPn47mzZvrtQ0ZMgS1atVC9erVTVLjV199hTFjxhR6unZx+vTpg5ycHOzfv9/IlRGVfwxARDLSpEkTaDQaHD16VGo7evQo3nnnHTRo0AB//PGHXnv37t0BAD/88APatGmD6tWrw8HBAf/617+Qnp7+0nX8+eefmDJlCqZMmYL169ejW7ducHFxQZcuXfDtt99izpw5xe67atUqNGrUCJaWlmjSpAl++OEHve2ZmZl4//33oVarYWVlBXd3d/z6669FHuvu3bto06YNBgwYgNzcXL1tSqUSDg4O0qtatWowNzfXa7O2ti50CszFxQXz58+Hn58fqlWrhvr16yMsLAx3797FO++8g2rVqsHDw0MvhALAiRMn0LlzZ1hbW8PZ2RlTpkzBw4cPi/053L17F0eOHEG/fv2kNiEEPvnkE9SrVw8qlQqOjo6YMmWK3pjefPNNbNmypdjjEskFAxCRzHTv3h2RkZHS+8jISHTr1g1du3aV2h89eoRTp05JASg/Px/z5s3DhQsX8PPPPyMpKQmjR49+6Rq2b9+OvLw8fPTRR0VuL26dzu7duxEQEIDp06cjNjYW77//PsaMGSPVrdPp0KdPH/z+++/YtGkTLl++jEWLFkGpVBY61q1bt9C5c2e4u7tjx44dUKlULz2eZ33xxRfo2LEjzp07h759+2LkyJHw8/PDiBEjEBMTg0aNGsHPzw8Fz6K+du0aevfujYEDB+LixYvYunUrTpw4gcmTJxf7GSdOnECVKlXQrFkzqW3nzp344osvsHr1aly9ehU///wzWrRoobdfu3btcPz48VIbK1GF9UrPvSeiCmft2rWiatWqIj8/X2RnZwtzc3ORnp4ufvzxR9GlSxchhBARERECgLh582aRxzh9+rQAIHJycoQQQkRGRgoA4v79+0IIITZs2CBsbW2LrWHixInCxsbmhbU+e5wOHTqI8ePH6/UZNGiQePPNN4UQQhw4cECYmZmJ+Pj45x4vLi5OODs7iylTpgidTvfCOoQQIjg4WLRs2bJQe9euXUVAQID0vn79+mLEiBHS+5SUFAFAzJ49W2qLiooSAERKSooQQohx48aJCRMm6B33+PHjwszMTDx69KjIer744gvRsGFDvbalS5cKV1dXkZeXV+w4fvnlF2FmZia0Wm2xfYjkgDNARDLTrVs3PHz4EKdPn8bx48fh6uqKOnXqoGvXrtI6oKNHj6Jhw4aoV68eAODs2bPo168f6tWrh+rVq6Nr164AgOTk5JeqQQgBhUJh8H5XrlxBx44d9do6duyIK1euAADOnz+PunXrwtXVtdhjPHr0CJ07d8a7776LL7/88qXqeBEPDw/pz2q1GgD0ZmIK2gpOI164cAEbN25EtWrVpJevry90Oh1u3LhR7DisrKz02gYNGoRHjx6hYcOGGD9+PHbv3q234B0ArK2todPpCp3yI5IbBiAimWncuDHq1q2LyMhIREZGSmHG0dERzs7OOHnyJCIjI/HGG28AAB4+fAhfX1/Y2Nhg8+bNOH36NHbv3g0AyMvLe6kaXF1dkZWVhZSUlNIZ1P9nbW39wj4qlQo+Pj749ddfjXa1mYWFhfTngoBVVFvB1VgPHjzA+++/j/Pnz0uvCxcu4OrVq8UuQrezs8P9+/f12pydnREfH49vvvkG1tbWmDRpErp06YL8/HypT0ZGBqpWrVqinxVRZcYARCRD3bt3x9GjR3H06FG9y9+7dOmC/fv3Izo6Wlr/ExcXh7/++guLFi1C586d0bRp01daAA0A//M//wNLS0t89tlnRW4vuJ/Qs5o1a4bff/9dr+3333+Hm5sbgKczL3/++ScSEhKK/WwzMzP88MMP8PT0RPfu3XHnzp2XG0Qpev3113H58mU0bty40MvS0rLIfVq3bo3U1NRCIcja2hr9+vXDV199haNHjyIqKgqXLl2StsfGxqJ169ZGHQ9RRWBu6gKIqOx1794d/v7+yM/Pl2aAAKBr166YPHky8vLypABUr149WFpa4uuvv8a///1vxMbGvvJ9b5ydnfHFF19g8uTJyM7Ohp+fH1xcXPDnn3/i+++/R7Vq1Yq8FH7GjBkYPHgwWrduDR8fH+zZswe7du3C4cOHpfq7dOmCgQMHYtmyZWjcuDHi4uKgUCjQu3dv6ThKpRKbN2/GsGHD8MYbb+Do0aNwcHB4pTG9ipkzZ6J9+/aYPHky3nvvPVStWhWXL1/GoUOHsGLFiiL3ad26Nezs7PD777/jrbfeAvD0Ro9arRZeXl6oUqUKNm3aBGtra9SvX1/a7/jx4+jVq1eZjIuoPOMMEJEMde/eHY8ePULjxo2l9SjA0wCRk5MjXS4PAHXq1MHGjRuxfft2uLm5YdGiRfj8889fuYZJkybh4MGDuH37NgYMGICmTZvivffeg42NDT788MMi9+nfvz++/PJLfP7552jevDlWr16NDRs26M1i7dy5E23btsWwYcPg5uaGjz76CFqtttCxzM3N8dNPP6F58+Z44403XnlW61V4eHjg2LFjSEhIQOfOndG6dWvMmTMHjo6Oxe6jVCoxZswYbN68WWqrUaMG1q5di44dO8LDwwOHDx/Gnj17ULt2bQDA7du3cfLkSYwZM8boYyIq7xRC/P/rMImIqEJJTU1F8+bNERMTozfLU5yZM2fi/v37WLNmTRlUR1S+cQaIiKiCcnBwwLp160p8NZ69vb3JHttBVN5wBoiIiIhkhzNAREREJDsMQERERCQ7DEBEREQkOwxAREREJDsMQERERCQ7DEBEREQkOwxAREREJDsMQERERCQ7DEBEREQkO/8P/7nVHkughiIAAAAASUVORK5CYII=",
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from flaml.automl.data import get_output_from_log\n",
"time_history, best_valid_loss_history, valid_loss_history, config_history, metric_history = \\\n",
" get_output_from_log(filename=automl_settings['log_file_name'], time_budget=3000)\n",
"for config in config_history:\n",
" print(config)\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"plt.title('Learning Curve')\n",
"plt.xlabel('Wall Clock Time (s)')\n",
"plt.ylabel('Validation Accuracy')\n",
"print(len(valid_loss_history))\n",
"plt.scatter(time_history, 1 - np.array(valid_loss_history))\n",
"plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "664qCdihTjhJ"
},
"source": [
"### 4.2 Text Summarization Example"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "kmB4kaF_TjhJ"
},
"source": [
"The text summarization task summarizes a long text into a short sentence. For example:\n",
"\n",
"- Document: Army explosives experts were called out to deal with a suspect package at the offices on the Newtownards Road on Friday night. Roads were sealed off and traffic diverted as a controlled explosion was carried out. The premises, used by East Belfast MP Naomi Long, have been targeted a number of times. Most recently, petrol bomb attacks were carried out on the offices on consecutive nights in April and May. The attacks began following a Belfast City Council vote in December 2012 restricting the flying of the union flag at the City Hall. Condemning the latest hoax, Alliance MLA Chris Lyttle said: \"It is a serious incident for the local area, it causes serious disruption, it puts people's lives at risk, it can prevent emergency services reaching the area. \"Ultimately we need people with information to share that with the police in order for them to do their job and bring these people to justice.\n",
"\n",
"- Summary: A suspicious package left outside an Alliance Party office in east Belfast has been declared a hoax.\n",
"\n",
"In this example, we use FLAML to perform *abstractive summarization* using the t5-small language model, i.e., the summary is generated word-by-word. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "amlQnvcxTjhK",
"outputId": "e9c0c7fc-25af-4f71-f10d-2ad49bbdf0f7"
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "a8a74fbdcfb0446bbd3bed5ff20e019a",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading builder script: 0%| | 0.00/5.76k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "24f2f0cbb85047869a0482cd53e16794",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading readme: 0%| | 0.00/6.24k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Downloading and preparing dataset xsum/default to /root/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71...\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e316310cc7e043c4b90e376c7a75aaf0",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading data files: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e9583654a3fe40dc83b314b98befcdc4",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading data: 0%| | 0.00/255M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "bed7b214c8cf4d6ead7ac1e2efe576f9",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading data: 0%| | 0.00/1.00M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "790331deea9b47919ceb5ad53d5f9c40",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating train split: 0%| | 0/204045 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "ae35414d84a844a5bcf936650efb8c34",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating validation split: 0%| | 0/11332 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "7089718c04724ccaa86034d0b8b7130e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Generating test split: 0%| | 0/11334 [00:00<?, ? examples/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dataset xsum downloaded and prepared to /root/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71. Subsequent calls will reuse this data.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:datasets.builder:Found cached dataset xsum (/root/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71)\n",
"WARNING:datasets.builder:Found cached dataset xsum (/root/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1000\n",
"400\n"
]
}
],
"source": [
"from datasets import load_dataset\n",
"\n",
"train_dataset = load_dataset(\"xsum\", split=\"train\").to_pandas()[:1000]\n",
"valid_dataset = load_dataset(\"xsum\", split=\"validation\").to_pandas()[:400]\n",
"test_dataset = load_dataset(\"xsum\", split=\"test\").to_pandas()\n",
"\n",
"custom_sent_keys = [\"document\"] # specify the column names of the input sentences\n",
"label_key = \"summary\" # specify the column name of the label \n",
"\n",
"X_train, y_train = train_dataset[custom_sent_keys], train_dataset[label_key]\n",
"X_val, y_val = valid_dataset[custom_sent_keys], valid_dataset[label_key]\n",
"X_test = test_dataset[custom_sent_keys]\n",
"\n",
"print(len(train_dataset))\n",
"print(len(valid_dataset))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "aYq8XAtxTjhK",
"outputId": "3fb9a111-ba6f-4d75-d0d8-4d4c76173163"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 15:52:13] {1768} INFO - task = summarization\n",
"[flaml.automl.logger: 04-12 15:52:13] {1775} INFO - Data split method: uniform\n",
"[flaml.automl.logger: 04-12 15:52:13] {1778} INFO - Evaluation method: holdout\n",
"[flaml.automl.logger: 04-12 15:52:13] {1891} INFO - Minimizing error metric: rouge1\n",
"[flaml.automl.logger: 04-12 15:52:13] {2011} INFO - List of ML learners in AutoML Run: ['transformer']\n",
"[flaml.automl.logger: 04-12 15:52:13] {2341} INFO - iteration 0, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/flaml/automl/data.py:297: SettingWithCopyWarning: \n",
"A value is trying to be set on a copy of a slice from a DataFrame.\n",
"Try using .loc[row_indexer,col_indexer] = value instead\n",
"\n",
"See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
" X[str_columns] = X[str_columns].astype(\"string\")\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "a35ff1cffae842c0acc37707e4a541ea",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)lve/main/config.json: 0%| | 0.00/1.21k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "0a316b24169342caaccd5bd9fefc9a3b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)ve/main/spiece.model: 0%| | 0.00/792k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "50969066597c4ca9ab4bd2dae2c17c0b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)/main/tokenizer.json: 0%| | 0.00/1.39M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "29a315f149a04f7887e345e6006b101c",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading pytorch_model.bin: 0%| | 0.00/242M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "969f654854874beaacf59257ea6df20e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading (…)neration_config.json: 0%| | 0.00/147 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Unzipping tokenizers/punkt.zip.\n",
"/usr/local/lib/python3.9/dist-packages/flaml/automl/ml.py:209: FutureWarning: load_metric is deprecated and will be removed in the next major version of datasets. Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate\n",
" metric = datasets.load_metric(datasets_metric_name)\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1ac587d53608479a9789a65ffad25ef8",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading builder script: 0%| | 0.00/2.17k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 4.139829158782959, 'eval_automl_metric': 0.861043247562474, 'eval_runtime': 88.7601, 'eval_samples_per_second': 4.507, 'eval_steps_per_second': 4.507, 'epoch': 0.12}\n",
"{'train_runtime': 93.8448, 'train_samples_per_second': 1.066, 'train_steps_per_second': 0.043, 'train_loss': 4.210696220397949, 'epoch': 0.12}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 15:55:25] {2479} INFO - Estimated sufficient time budget=1915790s. Estimated necessary time budget=1916s.\n",
"[flaml.automl.logger: 04-12 15:55:25] {2526} INFO - at 191.6s,\testimator transformer's best error=0.8610,\tbest estimator transformer's best error=0.8610\n",
"[flaml.automl.logger: 04-12 15:55:25] {2341} INFO - iteration 1, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 4.153659820556641, 'eval_automl_metric': 0.8610390521300715, 'eval_runtime': 83.9022, 'eval_samples_per_second': 4.767, 'eval_steps_per_second': 4.767, 'epoch': 0.12}\n",
"{'train_runtime': 88.4476, 'train_samples_per_second': 1.131, 'train_steps_per_second': 0.023, 'train_loss': 4.123888969421387, 'epoch': 0.12}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 15:58:24] {2526} INFO - at 370.6s,\testimator transformer's best error=0.8610,\tbest estimator transformer's best error=0.8610\n",
"[flaml.automl.logger: 04-12 15:58:24] {2341} INFO - iteration 2, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 4.139829158782959, 'eval_automl_metric': 0.861043247562474, 'eval_runtime': 81.3582, 'eval_samples_per_second': 4.917, 'eval_steps_per_second': 4.917, 'epoch': 0.12}\n",
"{'train_runtime': 85.9738, 'train_samples_per_second': 1.163, 'train_steps_per_second': 0.047, 'train_loss': 4.210696220397949, 'epoch': 0.12}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 16:01:21] {2526} INFO - at 547.4s,\testimator transformer's best error=0.8610,\tbest estimator transformer's best error=0.8610\n",
"[flaml.automl.logger: 04-12 16:01:21] {2341} INFO - iteration 3, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 4.143656253814697, 'eval_automl_metric': 0.8608808953259102, 'eval_runtime': 80.6625, 'eval_samples_per_second': 4.959, 'eval_steps_per_second': 4.959, 'epoch': 0.12}\n",
"{'train_runtime': 85.1755, 'train_samples_per_second': 1.174, 'train_steps_per_second': 0.023, 'train_loss': 4.254851341247559, 'epoch': 0.12}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 16:04:17] {2526} INFO - at 723.4s,\testimator transformer's best error=0.8609,\tbest estimator transformer's best error=0.8609\n",
"[flaml.automl.logger: 04-12 16:04:17] {2341} INFO - iteration 4, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 4.041061878204346, 'eval_automl_metric': 0.8605258085686105, 'eval_runtime': 81.8821, 'eval_samples_per_second': 4.885, 'eval_steps_per_second': 4.885, 'epoch': 1.0}\n",
"{'train_runtime': 100.4684, 'train_samples_per_second': 9.953, 'train_steps_per_second': 0.159, 'train_loss': 4.15610408782959, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 16:07:29] {2526} INFO - at 915.9s,\testimator transformer's best error=0.8605,\tbest estimator transformer's best error=0.8605\n",
"[flaml.automl.logger: 04-12 16:07:29] {2341} INFO - iteration 5, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 3.9432952404022217, 'eval_automl_metric': 0.8609624645900873, 'eval_runtime': 80.9141, 'eval_samples_per_second': 4.944, 'eval_steps_per_second': 4.944, 'epoch': 1.0}\n",
"{'train_runtime': 99.1635, 'train_samples_per_second': 10.084, 'train_steps_per_second': 0.161, 'train_loss': 4.104092121124268, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 16:10:40] {2526} INFO - at 1106.4s,\testimator transformer's best error=0.8605,\tbest estimator transformer's best error=0.8605\n",
"[flaml.automl.logger: 04-12 16:10:40] {2341} INFO - iteration 6, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 4.097936630249023, 'eval_automl_metric': 0.8612426478909745, 'eval_runtime': 80.6458, 'eval_samples_per_second': 4.96, 'eval_steps_per_second': 4.96, 'epoch': 1.0}\n",
"{'train_runtime': 99.0009, 'train_samples_per_second': 10.101, 'train_steps_per_second': 0.162, 'train_loss': 4.184399127960205, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 16:13:50] {2526} INFO - at 1296.4s,\testimator transformer's best error=0.8605,\tbest estimator transformer's best error=0.8605\n",
"[flaml.automl.logger: 04-12 16:13:50] {2341} INFO - iteration 7, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 3.8940911293029785, 'eval_automl_metric': 0.8598968257660171, 'eval_runtime': 80.9135, 'eval_samples_per_second': 4.944, 'eval_steps_per_second': 4.944, 'epoch': 1.0}\n",
"{'train_runtime': 99.2612, 'train_samples_per_second': 10.074, 'train_steps_per_second': 0.161, 'train_loss': 4.087018013000488, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 16:17:00] {2526} INFO - at 1486.5s,\testimator transformer's best error=0.8599,\tbest estimator transformer's best error=0.8599\n",
"[flaml.automl.logger: 04-12 16:17:00] {2341} INFO - iteration 8, current learner transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'eval_loss': 4.041061878204346, 'eval_automl_metric': 0.8605258085686105, 'eval_runtime': 81.0644, 'eval_samples_per_second': 4.934, 'eval_steps_per_second': 4.934, 'epoch': 1.0}\n",
"{'train_runtime': 98.6821, 'train_samples_per_second': 10.134, 'train_steps_per_second': 0.162, 'train_loss': 4.15610408782959, 'epoch': 1.0}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.9/dist-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-small automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py:3586: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
" warnings.warn(\n",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[flaml.automl.logger: 04-12 16:20:09] {2526} INFO - at 1675.4s,\testimator transformer's best error=0.8599,\tbest estimator transformer's best error=0.8599\n",
"[flaml.automl.logger: 04-12 16:20:09] {2642} INFO - selected model: None\n",
"[flaml.automl.logger: 04-12 16:20:09] {2041} INFO - fit succeeded\n",
"[flaml.automl.logger: 04-12 16:20:09] {2042} INFO - Time taken to find the best model: 1486.497179031372\n",
"[flaml.automl.logger: 04-12 16:20:09] {2054} WARNING - Time taken to find the best model is 83% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n"
]
}
],
"source": [
"''' import AutoML class from flaml package '''\n",
"from flaml import AutoML\n",
"automl = AutoML()\n",
"\n",
"import ray\n",
"\n",
"\n",
"automl_settings = {\n",
" \"time_budget\": 1800, # setting the time budget\n",
" \"task\": \"summarization\", # setting the task as summarization\n",
" \"fit_kwargs_by_estimator\": { # if model_path is not set, the default model is t5-small: https://huggingface.co/t5-small\n",
" \"transformer\": {\n",
" \"output_dir\": \"data/output/\", # setting the output directory\n",
" \"model_path\": \"t5-small\",\n",
" \"pad_to_max_length\": True,\n",
" }\n",
" },\n",
" \"gpu_per_trial\": 1, # set to 0 if no GPU is available\n",
" \"log_file_name\": \"seqclass.log\", # set the file to save the log for HPO\n",
" \"log_type\": \"all\", # the log type for trials: \"all\" if logging all the trials, \"better\" if only keeping the better trials\n",
" \"use_ray\": False, # set whether to use Ray\n",
" \"metric\": \"rouge1\",\n",
" \"n_concurrent_trials\": 1, \n",
" \"fp16\": False\n",
"}\n",
"\n",
"from flaml import tune\n",
"custom_hp = {\n",
" \"transformer\": {\n",
" \"num_train_epochs\": {\n",
" \"domain\": tune.choice([0.1, 1, 2, 3, 4, 5]),\n",
" \"init_value\": 0.1, \n",
" \"low_cost_init_value\": 0.1,\n",
" },\n",
" }\n",
"}\n",
"\n",
"\n",
"'''The main flaml automl API'''\n",
"automl.fit(X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, custom_hp=custom_hp, **automl_settings)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "xRWfyDdSJZRT"
},
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xPy67MBFTjhK",
"outputId": "265348a5-20a2-4a49-a73e-0969688abb36"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 9.999999999999999e-06, 'num_train_epochs': 0.1, 'per_device_train_batch_size': 32, 'seed': 20, 'global_max_steps': 4}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.999999999999999e-06, 'num_train_epochs': 0.1, 'per_device_train_batch_size': 32, 'seed': 20, 'global_max_steps': 4}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 0.1, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 2}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 0.1, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 2}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 9.999999999999997e-06, 'num_train_epochs': 0.1, 'per_device_train_batch_size': 32, 'seed': 20, 'global_max_steps': 4}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 0.1, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 2}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 1.3959402525606234e-05, 'num_train_epochs': 0.1, 'per_device_train_batch_size': 64, 'seed': 13, 'global_max_steps': 2}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 1.3959402525606234e-05, 'num_train_epochs': 0.1, 'per_device_train_batch_size': 64, 'seed': 13, 'global_max_steps': 2}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 16}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 16}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 1.6876495255790516e-05, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 20, 'global_max_steps': 16}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 16}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 5.588857190057775e-06, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 8, 'global_max_steps': 16}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 9.711865003865157e-06, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 16}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 2.0896439261730886e-05, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 12, 'global_max_steps': 16}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 2.0896439261730886e-05, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 12, 'global_max_steps': 16}}\n",
"{'Current Learner': 'transformer', 'Current Sample': 1000, 'Current Hyper-parameters': {'learning_rate': 9.711865003865154e-06, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 14, 'global_max_steps': 16}, 'Best Learner': 'transformer', 'Best Hyper-parameters': {'learning_rate': 2.0896439261730886e-05, 'num_train_epochs': 1, 'per_device_train_batch_size': 64, 'seed': 12, 'global_max_steps': 16}}\n",
"9\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAlEAAAHHCAYAAACfqw0dAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABSyklEQVR4nO3de1yUVeI/8M8MyHBzBpSbKBeR9YKKIgiSGWokmuElN81VQXPbdLVUypSvG+TWLmyxid3UdNcsM81NLd0EDaS8sKIgJpBapmnIRRcZEOQ2c35/+ONZJ0DhERgYP+/Xa14v5zzneZ5zBpj5eJ7znFEIIQSIiIiIqEWUxm4AERERUWfEEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCQDQxQRERGRDAxRRERERDIwRBERERHJwBBFRA80T09PzJ0719jNIKJOiCGKiO7bhx9+CIVCgZMnTxq7KZ1OVVUV1qxZg6CgIGg0GlhaWqJv375YvHgxzp8/b+zmEdFdmBu7AURExnTu3Dkolcb5/+T169cxfvx4ZGZm4oknnsDvfvc72Nra4ty5c9i+fTs++OAD1NTUGKVtRHRvDFFEZDLq6uqg1+thYWHR7H1UKlUbtuju5s6di1OnTuFf//oXpk2bZrDttddew6pVq1rlPHJeFyK6N17OI6J2k5+fj2eeeQbOzs5QqVQYOHAg/vnPfxrUqampQUxMDPz9/aHRaGBjY4NRo0bh0KFDBvUuXboEhUKBhIQEJCYmok+fPlCpVMjLy8Orr74KhUKBH3/8EXPnzoWdnR00Gg3mzZuHyspKg+P8ek5U/aXJo0ePIioqCo6OjrCxscHUqVNx7do1g331ej1effVVuLq6wtraGmPGjEFeXl6z5lkdP34c//73vzF//vwGAQq4He4SEhKk56NHj8bo0aMb1Js7dy48PT3v+bqcOnUK5ubmWL16dYNjnDt3DgqFAu+++65UVlpaiqVLl8LNzQ0qlQre3t7429/+Br1ef9d+ET1IOBJFRO2iqKgII0aMgEKhwOLFi+Ho6Ij9+/dj/vz5KCsrw9KlSwEAZWVl2LRpE2bOnIlnn30W5eXl+Mc//oGwsDBkZGRg6NChBsfdvHkzqqqq8Ic//AEqlQrdunWTtk2fPh29e/dGXFwcsrKysGnTJjg5OeFvf/vbPdv7/PPPw97eHrGxsbh06RISExOxePFi7NixQ6oTHR2NN954A+Hh4QgLC8Pp06cRFhaGqqqqex7/yy+/BADMmTOnGa9ey/36denRowdCQkLw2WefITY21qDujh07YGZmhqeeegoAUFlZiZCQEOTn5+O5556Du7s7jh07hujoaBQUFCAxMbFN2kzU6Qgiovu0efNmAUCcOHGiyTrz588XPXr0ENevXzcof/rpp4VGoxGVlZVCCCHq6upEdXW1QZ0bN24IZ2dn8cwzz0hlFy9eFACEWq0WxcXFBvVjY2MFAIP6QggxdepU0b17d4MyDw8PERkZ2aAvoaGhQq/XS+XLli0TZmZmorS0VAghRGFhoTA3NxdTpkwxON6rr74qABgcszFTp04VAMSNGzfuWq9eSEiICAkJaVAeGRkpPDw8pOd3e102bNggAIgzZ84YlPv4+IixY8dKz1977TVhY2Mjzp8/b1Bv5cqVwszMTFy+fLlZbSYydbycR0RtTgiBzz//HOHh4RBC4Pr169IjLCwMWq0WWVlZAAAzMzNp7o5er0dJSQnq6uoQEBAg1bnTtGnT4Ojo2Oh5FyxYYPB81KhR+O9//4uysrJ7tvkPf/gDFAqFwb46nQ4///wzACAlJQV1dXX44x//aLDf888/f89jA5Da0LVr12bVb6nGXpcnn3wS5ubmBqNpOTk5yMvLw4wZM6SynTt3YtSoUbC3tzf4WYWGhkKn0+Hbb79tkzYTdTa8nEdEbe7atWsoLS3FBx98gA8++KDROsXFxdK/t2zZgr///e84e/YsamtrpfLevXs32K+xsnru7u4Gz+3t7QEAN27cgFqtvmub77YvAClMeXt7G9Tr1q2bVPdu6s9fXl4OOzu7e9ZvqcZeFwcHBzz66KP47LPP8NprrwG4fSnP3NwcTz75pFTvhx9+wHfffddkOL3zZ0X0IGOIIqI2Vz8Zefbs2YiMjGy0jq+vLwBg69atmDt3LqZMmYLly5fDyckJZmZmiIuLw4ULFxrsZ2Vl1eR5zczMGi0XQtyzzfezb3P0798fAHDmzBmMGjXqnvUVCkWj59bpdI3Wb+p1efrppzFv3jxkZ2dj6NCh+Oyzz/Doo4/CwcFBqqPX6/HYY4/h5ZdfbvQYffv2vWd7iR4EDFFE1OYcHR3RtWtX6HQ6hIaG3rXuv/71L3h5eWHXrl0Gl9N+PRna2Dw8PAAAP/74o8Goz3//+19ptOpuwsPDERcXh61btzYrRNnb2+Onn35qUF4/ItZcU6ZMwXPPPSdd0jt//jyio6MN6vTp0wc3b96858+K6EHHOVFE1ObMzMwwbdo0fP7558jJyWmw/c6lA+pHgO4cdTl+/DjS09PbvqEt8Oijj8Lc3Bzr1q0zKL9zmYC7CQ4Oxvjx47Fp0ybs2bOnwfaamhq89NJL0vM+ffrg7NmzBq/V6dOncfTo0Ra1287ODmFhYfjss8+wfft2WFhYYMqUKQZ1pk+fjvT0dCQnJzfYv7S0FHV1dS06J5Gp4kgUEbWaf/7zn0hKSmpQvmTJEsTHx+PQoUMICgrCs88+Cx8fH5SUlCArKwtff/01SkpKAABPPPEEdu3ahalTp2LixIm4ePEi1q9fDx8fH9y8ebO9u9QkZ2dnLFmyBH//+98xadIkjB8/HqdPn8b+/fvh4OBgMIrWlI8++gjjxo3Dk08+ifDwcDz66KOwsbHBDz/8gO3bt6OgoEBaK+qZZ57BW2+9hbCwMMyfPx/FxcVYv349Bg4c2KyJ8neaMWMGZs+ejffffx9hYWEN5mQtX74cX375JZ544gnMnTsX/v7+qKiowJkzZ/Cvf/0Lly5dMrj8R/SgYogiolbz61GZenPnzkWvXr2QkZGBP//5z9i1axfef/99dO/eHQMHDjRYt2nu3LkoLCzEhg0bkJycDB8fH2zduhU7d+5EWlpaO/Wkef72t7/B2toaGzduxNdff43g4GAcOHAADz/8MCwtLe+5v6OjI44dO4b3338fO3bswKpVq1BTUwMPDw9MmjQJS5YskeoOGDAAH330EWJiYhAVFQUfHx98/PHH2LZtW4tfl0mTJsHKygrl5eUGd+XVs7a2xjfffIO//vWv2LlzJz766COo1Wr07dsXq1evhkajadH5iEyVQrTWLEkiIkJpaSns7e3x+uuvt9rXthBRx8Q5UUREMt26datBWf1q3o19RQsRmRZeziMikmnHjh348MMP8fjjj8PW1hZHjhzBp59+inHjxmHkyJHGbh4RtTGGKCIimXx9fWFubo433ngDZWVl0mTz119/3dhNI6J2wDlRRERERDJwThQRERGRDAxRRERERDJwTlQb0uv1uHr1Krp27dqshfeIiIjI+IQQKC8vh6urK5TKpsebGKLa0NWrV+Hm5mbsZhAREZEMV65cQa9evZrczhDVhrp27Qrg9g9BrVYbuTVERETUHGVlZXBzc5M+x5vCENWG6i/hqdVqhigiIqJO5l5TcTixnIiIiEgGhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIBq5YTkREZEJ0eoGMiyUoLq+CU1dLBPbuBjPl3VfeJnkYooiIiExEUk4BVu/NQ4G2SirrobFEbLgPxg/qYcSWmSZeziMiIjIBSTkFWLg1yyBAAUChtgoLt2YhKafASC0zXRyJIiIi6uR0eoHVe/MgGtlWXxb7ZS5GejuY3KU9qy5m9/yi4LbCEEVERNTJZVwsaTAC9WtFZdUY/OqBdmpR+8n7cxisLYwTZ3g5j4iIqJMrLr97gKK2wZEoIiKiTs6pq2Wz6m2eOxxBXt3auDXty6qLmdHOzRBFRETUyQX27oYeGksUaqsanRelAOCiscQjfR1Nbk6UMfFyHhERUSdnplQgNtyn0W31kSk23IcBqpUxRBEREZmA8YN6YN3sYXBWqwzKXTSWWDd7GNeJagO8nEdERGQixg/qgZHeDtJdeJvnDuclvDbEkSgiIiITcmdgCvLiV760JYYoIiIiIhkYooiIiIhkYIgiIiIikoEhioiIiEgGhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIBoYoIiIiIhkYooiIiIhk6BAh6r333oOnpycsLS0RFBSEjIyMJuvm5uZi2rRp8PT0hEKhQGJi4l2PHR8fD4VCgaVLlxqUV1VVYdGiRejevTtsbW0xbdo0FBUVGdS5fPkyJk6cCGtrazg5OWH58uWoq6uT200iIiIyIUYPUTt27EBUVBRiY2ORlZWFIUOGICwsDMXFxY3Wr6yshJeXF+Lj4+Hi4nLXY584cQIbNmyAr69vg23Lli3D3r17sXPnTnzzzTe4evUqnnzySWm7TqfDxIkTUVNTg2PHjmHLli348MMPERMTc38dJiIiItMgjCwwMFAsWrRIeq7T6YSrq6uIi4u7574eHh5izZo1jW4rLy8Xv/nNb8TBgwdFSEiIWLJkibSttLRUdOnSRezcuVMq+/777wUAkZ6eLoQQ4quvvhJKpVIUFhZKddatWyfUarWorq5uVt+0Wq0AILRabbPqExER3a+K6lrhsWKf8FixT1RU1xq7OZ1Scz+/jToSVVNTg8zMTISGhkplSqUSoaGhSE9Pv69jL1q0CBMnTjQ4dr3MzEzU1tYabOvfvz/c3d2l86anp2Pw4MFwdnaW6oSFhaGsrAy5ubn31TYiIiLq/MyNefLr169Dp9MZBBUAcHZ2xtmzZ2Ufd/v27cjKysKJEyca3V5YWAgLCwvY2dk1OG9hYaFUp7F21W9rTHV1Naqrq6XnZWVlcrtAREREHZzR50S1titXrmDJkiX45JNPYGlp2a7njouLg0ajkR5ubm7ten4iIiJqP0YNUQ4ODjAzM2twV1xRUdE9J403JTMzE8XFxRg2bBjMzc1hbm6Ob775Bm+//TbMzc2h0+ng4uKCmpoalJaWNnleFxeXRttVv60x0dHR0Gq10uPKlSuy+kBEREQdn1FDlIWFBfz9/ZGSkiKV6fV6pKSkIDg4WNYxH330UZw5cwbZ2dnSIyAgALNmzUJ2djbMzMzg7++PLl26GJz33LlzuHz5snTe4OBgnDlzxuAuwYMHD0KtVsPHx6fRc6tUKqjVaoMHERERmSajzokCgKioKERGRiIgIACBgYFITExERUUF5s2bBwCIiIhAz549ERcXB+D2ZPS8vDzp3/n5+cjOzoatrS28vb3RtWtXDBo0yOAcNjY26N69u1Su0Wgwf/58REVFoVu3blCr1Xj++ecRHByMESNGAADGjRsHHx8fzJkzB2+88QYKCwvxpz/9CYsWLYJKpWqvl4eIiIg6KKOHqBkzZuDatWuIiYlBYWEhhg4diqSkJGkS9+XLl6FU/m/A7OrVq/Dz85OeJyQkICEhASEhIUhLS2v2edesWQOlUolp06ahuroaYWFheP/996XtZmZm2LdvHxYuXIjg4GDY2NggMjISf/7zn++/00RERNTpKYQQwtiNMFVlZWXQaDTQarW8tEdERO2isqYOPjHJAIC8P4fB2sLo4yWdTnM/v03u7jwiIiKi9sAQRURERCQDQxQRERGRDAxRRERERDIwRBERERHJwBBFREREJANDFBEREZEMDFFEREREMjBEEREREcnAEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCQDQxQRERGRDAxRRERERDIwRBERERHJwBBFREREJANDFBEREZEMDFFEREREMjBEEREREcnAEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCQDQxQRERGRDAxRRERERDIwRBERERHJwBBFREREJANDFBEREZEMDFFEREREMjBEEREREcnAEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCQDQxQRERGRDEYPUe+99x48PT1haWmJoKAgZGRkNFk3NzcX06ZNg6enJxQKBRITExvUWbduHXx9faFWq6FWqxEcHIz9+/cb1Llw4QKmTp0KR0dHqNVqTJ8+HUVFRQZ1zp8/j8mTJ8PBwQFqtRoPP/wwDh061Cp9JiIios7PqCFqx44diIqKQmxsLLKysjBkyBCEhYWhuLi40fqVlZXw8vJCfHw8XFxcGq3Tq1cvxMfHIzMzEydPnsTYsWMxefJk5ObmAgAqKiowbtw4KBQKpKam4ujRo6ipqUF4eDj0er10nCeeeAJ1dXVITU1FZmYmhgwZgieeeAKFhYWt/0IQERFR5yOMKDAwUCxatEh6rtPphKurq4iLi7vnvh4eHmLNmjXNOo+9vb3YtGmTEEKI5ORkoVQqhVarlbaXlpYKhUIhDh48KIQQ4tq1awKA+Pbbb6U6ZWVlAoBUpzm0Wq0AYHAuIiKitlRRXSs8VuwTHiv2iYrqWmM3p1Nq7ue30UaiampqkJmZidDQUKlMqVQiNDQU6enprXIOnU6H7du3o6KiAsHBwQCA6upqKBQKqFQqqZ6lpSWUSiWOHDkCAOjevTv69euHjz76CBUVFairq8OGDRvg5OQEf3//VmkbERERdW7mxjrx9evXodPp4OzsbFDu7OyMs2fP3texz5w5g+DgYFRVVcHW1ha7d++Gj48PAGDEiBGwsbHBihUr8Ne//hVCCKxcuRI6nQ4FBQUAAIVCga+//hpTpkxB165doVQq4eTkhKSkJNjb2zd53urqalRXV0vPy8rK7qsfRERE1HEZfWJ5W+jXrx+ys7Nx/PhxLFy4EJGRkcjLywMAODo6YufOndi7dy9sbW2h0WhQWlqKYcOGQam8/XIIIbBo0SI4OTnh8OHDyMjIwJQpUxAeHi4FrcbExcVBo9FIDzc3t3bpLxEREbU/o41EOTg4wMzMrMFdcUVFRU1OGm8uCwsLeHt7AwD8/f1x4sQJrF27Fhs2bAAAjBs3DhcuXMD169dhbm4OOzs7uLi4wMvLCwCQmpqKffv24caNG1Cr1QCA999/HwcPHsSWLVuwcuXKRs8bHR2NqKgo6XlZWRmDFBERkYky2kiUhYUF/P39kZKSIpXp9XqkpKRI85dai16vN7jMVs/BwQF2dnZITU1FcXExJk2aBOD2XYAApJGpekql0uAOvl9TqVTS0gr1DyIiIjJNRhuJAoCoqChERkYiICAAgYGBSExMREVFBebNmwcAiIiIQM+ePREXFwfg9mT0+styNTU1yM/PR3Z2NmxtbaWRp+joaEyYMAHu7u4oLy/Htm3bkJaWhuTkZOm8mzdvxoABA+Do6Ij09HQsWbIEy5YtQ79+/QAAwcHBsLe3R2RkJGJiYmBlZYWNGzfi4sWLmDhxYnu+RERERNRBGTVEzZgxA9euXUNMTAwKCwsxdOhQJCUlSZPNL1++bDAadPXqVfj5+UnPExISkJCQgJCQEKSlpQEAiouLERERgYKCAmg0Gvj6+iI5ORmPPfaYtN+5c+cQHR2NkpISeHp6YtWqVVi2bJm03cHBAUlJSVi1ahXGjh2L2tpaDBw4EF988QWGDBnSxq8KERERdQYKIYQwdiNMVVlZGTQaDbRaLS/tERFRu6isqYNPzO2rL3l/DoO1hVHHSzql5n5+m+TdeURERERtjSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIBoYoIiIiIhkYooiIiIhkYIgiIiIikoEhioiIiEgGhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIBoYoIiIiIhkYooiIiIhkYIgiIiIikoEhioiIiEgGhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIBoYoIiIiIhkYooiIiIhkYIgiIiIikoEhioiIiEgGhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZjB6i3nvvPXh6esLS0hJBQUHIyMhosm5ubi6mTZsGT09PKBQKJCYmNqizbt06+Pr6Qq1WQ61WIzg4GPv37zeoc+HCBUydOhWOjo5Qq9WYPn06ioqKGhzr3//+N4KCgmBlZQV7e3tMmTLlfrtLREREJsKoIWrHjh2IiopCbGwssrKyMGTIEISFhaG4uLjR+pWVlfDy8kJ8fDxcXFwardOrVy/Ex8cjMzMTJ0+exNixYzF58mTk5uYCACoqKjBu3DgoFAqkpqbi6NGjqKmpQXh4OPR6vXSczz//HHPmzMG8efNw+vRpHD16FL/73e9a/0UgIiKiTkkhhBDGOnlQUBCGDx+Od999FwCg1+vh5uaG559/HitXrrzrvp6enli6dCmWLl16z/N069YNb775JubPn48DBw5gwoQJuHHjBtRqNQBAq9XC3t4eBw4cQGhoKOrq6uDp6YnVq1dj/vz5svtXVlYGjUYDrVYrnYuIiKgtVdbUwScmGQCQ9+cwWFuYG7lFnU9zP7+NNhJVU1ODzMxMhIaG/q8xSiVCQ0ORnp7eKufQ6XTYvn07KioqEBwcDACorq6GQqGASqWS6llaWkKpVOLIkSMAgKysLOTn50OpVMLPzw89evTAhAkTkJOT0yrtIiIios7PaCHq+vXr0Ol0cHZ2Nih3dnZGYWHhfR37zJkzsLW1hUqlwoIFC7B79274+PgAAEaMGAEbGxusWLEClZWVqKiowEsvvQSdToeCggIAwE8//QQAePXVV/GnP/0J+/btg729PUaPHo2SkpImz1tdXY2ysjKDBxEREZkmo08sbwv9+vVDdnY2jh8/joULFyIyMhJ5eXkAAEdHR+zcuRN79+6Fra0tNBoNSktLMWzYMCiVt1+O+rlRq1atwrRp0+Dv74/NmzdDoVBg586dTZ43Li4OGo1Geri5ubV9Z4mIiMgojHah1MHBAWZmZg3uiisqKmpy0nhzWVhYwNvbGwDg7++PEydOYO3atdiwYQMAYNy4cbhw4QKuX78Oc3Nz2NnZwcXFBV5eXgCAHj16AIA0egUAKpUKXl5euHz5cpPnjY6ORlRUlPS8rKyMQYqIiMhEGW0kysLCAv7+/khJSZHK9Ho9UlJSpPlLrUWv16O6urpBuYODA+zs7JCamori4mJMmjQJwO3gpVKpcO7cOalubW0tLl26BA8PjybPo1KppKUV6h9ERERkmow6ZT8qKgqRkZEICAhAYGAgEhMTUVFRgXnz5gEAIiIi0LNnT8TFxQG4PRm9/rJcTU0N8vPzkZ2dDVtbW2nkKTo6GhMmTIC7uzvKy8uxbds2pKWlITk5WTrv5s2bMWDAADg6OiI9PR1LlizBsmXL0K9fPwCAWq3GggULEBsbCzc3N3h4eODNN98EADz11FPt9voQERFRx2XUEDVjxgxcu3YNMTExKCwsxNChQ5GUlCRNNr98+bI0TwkArl69Cj8/P+l5QkICEhISEBISgrS0NABAcXExIiIiUFBQAI1GA19fXyQnJ+Oxxx6T9jt37hyio6NRUlICT09PrFq1CsuWLTNo25tvvglzc3PMmTMHt27dQlBQEFJTU2Fvb9+GrwgRERF1FkZdJ8rUcZ0oIiJqb1wn6v51+HWiiIiIiDozhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIBoYoIiIiIhlaLURduXIFzzzzTGsdjoiIiKhDa7UQVVJSgi1btrTW4YiIiIg6tGZ/K+GXX3551+0//fTTfTeGiIiIqLNodoiaMmUKFAoFhBBN1lEoFK3SKCIiIqKOrtmX83r06IFdu3ZBr9c3+sjKymrLdhIRERF1KM0OUf7+/sjMzGxy+71GqYiIiIhMSbMv5y1fvhwVFRVNbvf29sahQ4dapVFEREREHV2zQ9SoUaPuut3GxgYhISH33SAiIiKizoCLbRIRERHJwBBFREREJEOzL+cRET1odHqBjIslKC6vglNXSwT27gYzJZdyITK2jvK3yRBFRNSIpJwCrN6bhwJtlVTWQ2OJ2HAfjB/Uw4gtI3qwdaS/TV7OIyL6laScAizcmmXwJg0AhdoqLNyahaScAiO1jOjB1tH+NmWNRH388cdYv349Ll68iPT0dHh4eCAxMRG9e/fG5MmTW7uNRETtRqcXWL03D42teldfFvtlLkZ6O/DSHnVIlTU6YzehTdzrb1MBYPXePDzm49Juf5stDlHr1q1DTEwMli5dir/85S/Q6W7/sOzs7JCYmMgQRUSdWsbFkgb/y/21orJqDH71QDu1iIiAe/9tCgAF2ipkXCxBcJ/u7dKmFl/Oe+edd7Bx40asWrUKZmZmUnlAQADOnDnTqo0jImpvxeV3D1BEnUWAhz2supjdu2In0dy/zfb8G27xSNTFixfh5+fXoFylUt11RXMios7Aqatls+ptnjscQV7d2rg1RPJZdTGDQmE6l5yb+7fZ3HqtocUhqnfv3sjOzoaHh4dBeVJSEgYMGNBqDSMiMobA3t3QQ2OJQm1Vo3MvFABcNJZ4pK8j50QRtaPm/m0G9m6//9y0+HJeVFQUFi1ahB07dkAIgYyMDPzlL39BdHQ0Xn755bZoIxFRuzFTKhAb7tPotvrIFBvuwwBF1M7u/Nv89V+fsf42FUKIxgLdXX3yySd49dVXceHCBQCAq6srVq9ejfnz57d6AzuzsrIyaDQaaLVaqNVqYzeHiFogKacAsV/moqisWirjOlFExtce60Q19/NbVoiqV1lZiZs3b8LJyUnuIUwaQxRR51ZeVSvdhbd57nBewiPqINp6xfLmfn7f14rl1tbWsLa2vp9DEBF1WHe+KQd58StfiDoKM6Wi3ZYxuJsWhyg/P79GZ/srFApYWlrC29sbc+fOxZgxY1qlgUREREQdUYsnlo8fPx4//fQTbGxsMGbMGIwZMwa2tra4cOEChg8fjoKCAoSGhuKLL75oi/YSERERdQgtHom6fv06XnzxRbzyyisG5a+//jp+/vlnHDhwALGxsXjttde4ejkRERGZrBaPRH322WeYOXNmg/Knn34an332GQBg5syZOHfu3P23joiIiKiDanGIsrS0xLFjxxqUHzt2DJaWt1cJ1ev10r+JiIiITFGLL+c9//zzWLBgATIzMzF8+HAAwIkTJ7Bp0yb83//9HwAgOTkZQ4cObdWGEhEREXUkshfbfPfdd6VLdv369cPzzz+P3/3udwCAW7duSXfrPci4ThRR51ZZUwefmGQAQN6fw2BtcV+rwhBRJ9Hcz+8WX84DgFmzZiE9PR0lJSUoKSlBenq6FKAAwMrKqkUB6r333oOnpycsLS0RFBSEjIyMJuvm5uZi2rRp8PT0hEKhQGJiYoM669atg6+vL9RqNdRqNYKDg7F//36DOhcuXMDUqVPh6OgItVqN6dOno6ioqNFzVldXY+jQoVAoFMjOzm52v4iIiMh0yQpRrWnHjh2IiopCbGwssrKyMGTIEISFhaG4uLjR+pWVlfDy8kJ8fDxcXFwardOrVy/Ex8cjMzMTJ0+exNixYzF58mTk5uYCACoqKjBu3DgoFAqkpqbi6NGjqKmpQXh4OPR6fYPjvfzyy3B1dW29ThMREVHnJ1pIoVAIpVLZ5KOlAgMDxaJFi6TnOp1OuLq6iri4uHvu6+HhIdasWdOs89jb24tNmzYJIYRITk4WSqVSaLVaaXtpaalQKBTi4MGDBvt99dVXon///iI3N1cAEKdOnWrW+YQQQqvVCgAG5yGizqOiulZ4rNgnPFbsExXVtcZuDhG1k+Z+frf4Av/u3bsNntfW1uLUqVPYsmULVq9e3aJj1dTUIDMzE9HR0VKZUqlEaGgo0tPTW9q0Rul0OuzcuRMVFRUIDg4GcPvynEKhgEqlkupZWlpCqVTiyJEjCA0NBQAUFRXh2WefxZ49e5r19TbV1dWorv7fl5WWlZW1Sh+IiIio42lxiGpsAc3f/va3GDhwIHbs2IH58+c3+1jXr1+HTqeDs7OzQbmzszPOnj3b0qYZOHPmDIKDg1FVVQVbW1vs3r0bPj4+AIARI0bAxsYGK1aswF//+lcIIbBy5UrodDoUFBQAAIQQmDt3LhYsWICAgABcunTpnueMi4trcZAkIiKizqnV5kSNGDECKSkprXW4+9avXz9kZ2fj+PHjWLhwISIjI5GXlwcAcHR0xM6dO7F3717Y2tpCo9GgtLQUw4YNg1J5+yV55513UF5ebjBKdi/R0dHQarXS48qVK23SNyIiIjK+Vrlf99atW3j77bfRs2fPFu3n4OAAMzOzBnfFFRUVNTlpvLksLCzg7e0NAPD398eJEyewdu1abNiwAQAwbtw4XLhwAdevX4e5uTns7Ozg4uICLy8vAEBqairS09MNLvkBQEBAAGbNmoUtW7Y0OKdKpWpQn4iIiExTi0OUvb09FAqF9FwIgfLyclhbW2Pr1q0tOpaFhQX8/f2RkpKCKVOmALi92nlKSgoWL17c0qbdlV6vN5ivVM/BwQHA7dBUXFyMSZMmAQDefvttvP7661K9q1evIiwsDDt27EBQUFCrto2IiIg6nxaHqF+vy6RUKuHo6IigoCDY29u3uAFRUVGIjIxEQEAAAgMDkZiYiIqKCsybNw8AEBERgZ49eyIuLg7A7cno9ZflampqkJ+fj+zsbNja2kojT9HR0ZgwYQLc3d1RXl6Obdu2IS0tDcnJydJ5N2/ejAEDBsDR0RHp6elYsmQJli1bhn79+gEA3N3dDdppa2sLAOjTpw969erV4n4SERGRaWlxiIqMjGzVBsyYMQPXrl1DTEwMCgsLMXToUCQlJUmTzS9fvizNUwJujwj5+flJzxMSEpCQkICQkBCkpaUBAIqLixEREYGCggJoNBr4+voiOTkZjz32mLTfuXPnEB0djZKSEnh6emLVqlVYtmxZq/aNiIiITJesr30pLS3FP/7xD3z//fcAgIEDB+KZZ56BRqNp9QZ2ZvzaF6LOjV/7QvRgarOvfTl58iT69OmDNWvWSF/78tZbb6FPnz7Iysq6r0YTERERdRYt/m/VsmXLMGnSJGzcuBHm5rd3r6urw+9//3ssXboU3377bas3koiIiKijaXGIOnnypEGAAgBzc3O8/PLLCAgIaNXGEREREXVULb6cp1arcfny5QblV65cQdeuXVulUUREREQdXYtD1IwZMzB//nzs2LEDV65cwZUrV7B9+3b8/ve/x8yZM9uijUREREQdTosv5yUkJEChUCAiIgJ1dXUAgC5dumDhwoWIj49v9QYSERERdUQtDlEWFhZYu3Yt4uLicOHCBQC3F6C0trbGrVu3Wr2BRERERB2R7C8gtra2xuDBgzF48GCYmZnhrbfeQu/evVuzbUREREQdVrNDVHV1NaKjoxEQEICHHnoIe/bsAXD761N69+6NNWvWcMVvIiIiemA0+3JeTEwMNmzYgNDQUBw7dgxPPfUU5s2bh//85z9466238NRTT8HMzKwt20pERETUYTQ7RO3cuRMfffQRJk2ahJycHPj6+qKurg6nT5+GQqFoyzYSERERdTjNvpz3yy+/wN/fHwAwaNAgqFQqLFu2jAGKiIiIHkjNDlE6nQ4WFhbSc3Nzc9ja2rZJo4iIiIg6umZfzhNCYO7cuVCpVACAqqoqLFiwADY2Ngb1du3a1botJCIiIuqAmh2iIiMjDZ7Pnj271RtDRERE1Fk0O0Rt3ry5LdtBRERE1KnIXmyTiIiI6EHGEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCQDQxQRERGRDAxRRERERDIwRBERERHJwBBFREREJANDFBEREZEMDFFEREREMjBEEREREcnAEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCQDQxQRERGRDAxRRERERDIwRBERERHJwBBFREREJANDFBEREZEMHSJEvffee/D09ISlpSWCgoKQkZHRZN3c3FxMmzYNnp6eUCgUSExMbFBn3bp18PX1hVqthlqtRnBwMPbv329Q58KFC5g6dSocHR2hVqsxffp0FBUVSdsvXbqE+fPno3fv3rCyskKfPn0QGxuLmpqaVus3ERERdV5GD1E7duxAVFQUYmNjkZWVhSFDhiAsLAzFxcWN1q+srISXlxfi4+Ph4uLSaJ1evXohPj4emZmZOHnyJMaOHYvJkycjNzcXAFBRUYFx48ZBoVAgNTUVR48eRU1NDcLDw6HX6wEAZ8+ehV6vx4YNG5Cbm4s1a9Zg/fr1+L//+7+2eSGIiIioU1EIIYQxGxAUFIThw4fj3XffBQDo9Xq4ubnh+eefx8qVK++6r6enJ5YuXYqlS5fe8zzdunXDm2++ifnz5+PAgQOYMGECbty4AbVaDQDQarWwt7fHgQMHEBoa2ugx3nzzTaxbtw4//fRTs/pWVlYGjUYDrVYrnYeIOo/Kmjr4xCQDAPL+HAZrC3Mjt4iI2kNzP7+NOhJVU1ODzMxMg9CiVCoRGhqK9PT0VjmHTqfD9u3bUVFRgeDgYABAdXU1FAoFVCqVVM/S0hJKpRJHjhxp8lharRbdunVrcnt1dTXKysoMHkRERGSajBqirl+/Dp1OB2dnZ4NyZ2dnFBYW3texz5w5A1tbW6hUKixYsAC7d++Gj48PAGDEiBGwsbHBihUrUFlZiYqKCrz00kvQ6XQoKCho9Hg//vgj3nnnHTz33HNNnjMuLg4ajUZ6uLm53VcfiIiIqOMy+pyottKvXz9kZ2fj+PHjWLhwISIjI5GXlwcAcHR0xM6dO7F3717Y2tpCo9GgtLQUw4YNg1LZ8CXJz8/H+PHj8dRTT+HZZ59t8pzR0dHQarXS48qVK23WPyIiIjIuo17gd3BwgJmZmcFdcQBQVFTU5KTx5rKwsIC3tzcAwN/fHydOnMDatWuxYcMGAMC4ceNw4cIFXL9+Hebm5rCzs4OLiwu8vLwMjnP16lWMGTMGDz30ED744IO7nlOlUhlcIiQiIiLTZdSRKAsLC/j7+yMlJUUq0+v1SElJkeYvtRa9Xo/q6uoG5Q4ODrCzs0NqaiqKi4sxadIkaVt+fj5Gjx4Nf39/bN68udFRKiIiInowGf1Wk6ioKERGRiIgIACBgYFITExERUUF5s2bBwCIiIhAz549ERcXB+D2ZPT6y3I1NTXIz89HdnY2bG1tpZGn6OhoTJgwAe7u7igvL8e2bduQlpaG5ORk6bybN2/GgAED4OjoiPT0dCxZsgTLli1Dv379APwvQHl4eCAhIQHXrl2T9r3fUTIiIiLq/IweombMmIFr164hJiYGhYWFGDp0KJKSkqTJ5pcvXzYYAbp69Sr8/Pyk5wkJCUhISEBISAjS0tIAAMXFxYiIiEBBQQE0Gg18fX2RnJyMxx57TNrv3LlziI6ORklJCTw9PbFq1SosW7ZM2n7w4EH8+OOP+PHHH9GrVy+DNht5VQgiIiLqAIy+TpQp4zpRRJ0b14kiejB1inWiiIiIiDorhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIBi6/S0QtptMLZFwsQXF5FZy6WiKwdzeYKRXGbhYRUbtiiCKiFknKKcDqvXko0FZJZT00logN98H4QT2M2DIiovbFy3lE1GxJOQVYuDXLIEABQKG2Cgu3ZiEpp8BILSMian8ciSKiZtHpBVbvzUNj31heXxb7ZS5GejuYzKW9yhqdsZtARB0YQxQRNUvGxZIGI1C/VlRWjcGvHminFhERGRcv5xFRsxSX3z1AmbIAD3tYdTEzdjOIqIPhSBQRNYtTV8tm1ds8dziCvLq1cWval1UXMygUpnGJkohaD0MUETVLYO9u6KGxRKG2qtF5UQoALhpLPNLX0WTmRBER3Q0v5xFRs5gpFYgN92l0W31kig33YYAiogcGQxQRNdv4QT2wbvYwOKtVBuUuGkusmz2M60QR0QOFl/OIqEXGD+qBkd4O0l14m+cO5yU8InogcSSKiFrszsAU5MWvfCGiBxNDFBEREZEMDFFEREREMjBEEREREcnAEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCQDVywnInrA6fQCGRdLUFxeBaeulgjszQVUiZqDIYqI6AGWlFOA1XvzUKCtksp6aCwRG+7D70IkugdeziMiekAl5RRg4dYsgwAFAIXaKizcmoWknAIjtYyoc2CIIiJ6AOn0Aqv35kE0sq2+bPXePOj0jdUgIoAhiojogZRxsaTBCNSdBIACbRUyLpa0X6OIOhmGKCKiB1BxedMBSk49ogcRQxQR0QPIqatlq9YjehAxRBERPYACe3dDD40lmlrIQIHbd+kF9u7Wns0i6lQYooiIHkBmSgViw30AoEGQqn8eG+7D9aKI7oIhiojoATV+UA+smz0MLhrDS3YuGkusmz2M60QR3UOHCFHvvfcePD09YWlpiaCgIGRkZDRZNzc3F9OmTYOnpycUCgUSExMb1Fm3bh18fX2hVquhVqsRHByM/fv3G9S5cOECpk6dCkdHR6jVakyfPh1FRUUGdUpKSjBr1iyo1WrY2dlh/vz5uHnzZqv0mYioIxg/qAeOrBiLT58dgbVPD8Wnz47AkRVjGaCImsHoIWrHjh2IiopCbGwssrKyMGTIEISFhaG4uLjR+pWVlfDy8kJ8fDxcXFwardOrVy/Ex8cjMzMTJ0+exNixYzF58mTk5uYCACoqKjBu3DgoFAqkpqbi6NGjqKmpQXh4OPR6vXScWbNmITc3FwcPHsS+ffvw7bff4g9/+EPrvwhEREZkplQguE93TB7aE8F9uvMSHlFzCSMLDAwUixYtkp7rdDrh6uoq4uLi7rmvh4eHWLNmTbPOY29vLzZt2iSEECI5OVkolUqh1Wql7aWlpUKhUIiDBw8KIYTIy8sTAMSJEyekOvv37xcKhULk5+c365xarVYAMDgPkSmoqK4VHiv2CY8V+0RFda2xm0NE1Kqa+/lt1JGompoaZGZmIjQ0VCpTKpUIDQ1Fenp6q5xDp9Nh+/btqKioQHBwMACguroaCoUCKpVKqmdpaQmlUokjR44AANLT02FnZ4eAgACpTmhoKJRKJY4fP97ouaqrq1FWVmbwICIiItNk1BB1/fp16HQ6ODs7G5Q7OzujsLDwvo595swZ2NraQqVSYcGCBdi9ezd8fG7fiTJixAjY2NhgxYoVqKysREVFBV566SXodDoUFNz+rqjCwkI4OTkZHNPc3BzdunVrsm1xcXHQaDTSw83N7b76QERERB2X0edEtZV+/fohOzsbx48fx8KFCxEZGYm8vDwAgKOjI3bu3Im9e/fC1tYWGo0GpaWlGDZsGJRK+S9JdHQ0tFqt9Lhy5UprdYeIiIg6GHNjntzBwQFmZmYN7oorKipqctJ4c1lYWMDb2xsA4O/vjxMnTmDt2rXYsGEDAGDcuHG4cOECrl+/DnNzc9jZ2cHFxQVeXl4AABcXlwaT2+vq6lBSUtJk21QqlcElQiIiIjJdRh2JsrCwgL+/P1JSUqQyvV6PlJQUaf5Sa9Hr9aiurm5Q7uDgADs7O6SmpqK4uBiTJk0CAAQHB6O0tBSZmZlS3dTUVOj1egQFBbVq24iIiKjzMepIFABERUUhMjISAQEBCAwMRGJiIioqKjBv3jwAQEREBHr27Im4uDgAtyej11+Wq6mpQX5+PrKzs2FrayuNPEVHR2PChAlwd3dHeXk5tm3bhrS0NCQnJ0vn3bx5MwYMGABHR0ekp6djyZIlWLZsGfr16wcAGDBgAMaPH49nn30W69evR21tLRYvXoynn34arq6u7fkSERERUQdk9BA1Y8YMXLt2DTExMSgsLMTQoUORlJQkTTa/fPmywTylq1evws/PT3qekJCAhIQEhISEIC0tDQBQXFyMiIgIFBQUQKPRwNfXF8nJyXjsscek/c6dO4fo6GiUlJTA09MTq1atwrJlywza9sknn2Dx4sV49NFHoVQqMW3aNLz99ttt+Go82HR6gYyLJSgur4JT19vf2cX1aoiIqKNSCCGEsRthqsrKyqDRaKDVaqFWq1vlmKYaNJJyCrB6bx4KtFVSWQ+NJWLDfTr1ysmm+vOqrKmDT8ztkd28P4fB2sLo/x8jImo1zf385jtfJ2KqQSMppwALt2bh12m+UFuFhVuzOu13eJnqz4uIiG5jiOokTDVo6PQCq/fmNegXAKks9stcjPR26FQjOAfzCrFk++kG5QXaKizYmoW1Tw/BYz73dweqMVXW6IzdBCIio2OI6gRMNWgAwPGfSgxGahpTVFaNwa8eaKcWtY/bAathyCIios6DIaoTyLj4YAYN6vgCPOxh1cXM2M0gIjIKhqhOoLj87gHqQbB57nAEeXUzdjOaZd93V/Hyv87cs94bvx2MJ3w793IZVl3MoFB0rtFPIqLWwhDVCTh1tWxWvc4UNOrp9AKhb32DorKGC6ECgAKAi8YSj/R17DSXKt3sbZpdj3e1ERF1XnwH7wQCe3dDD40lCrVVjc6L6oxB406rJw3Ewq1ZAGDQv/qexIb7dKp+NffnFdi7cwVeIiIyZLJfQGxKzJQKxIb7APhfsKjXWYPGncYP6oF1s4fBRWM44uaiseyUdx2a+s+LiIhu42Kbbai1F9s09XWHTG1hSlP/eRERmarmfn4zRLUhrlhO/HkREXU+XLHcRJkpFQju093YzaBm4s+LiMh0cU4UERERkQwMUUREREQyMEQRERERycAQRURERCQDQxQRERGRDAxRRERERDIwRBERERHJwBBFREREJANDFBEREZEMDFFEREREMjBEEREREcnAEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCQDQxQRERGRDAxRRERERDIwRBERERHJwBBFREREJANDFBEREZEMDFFEREREMjBEEREREcnAEEVEREQkA0MUERERkQwMUUREREQyMEQRERERycAQRURERCSD0UPUe++9B09PT1haWiIoKAgZGRlN1s3NzcW0adPg6ekJhUKBxMTEBnXWrVsHX19fqNVqqNVqBAcHY//+/QZ1CgsLMWfOHLi4uMDGxgbDhg3D559/blDn/PnzmDx5MhwcHKBWq/Hwww/j0KFDrdJnIiIi6vyMGqJ27NiBqKgoxMbGIisrC0OGDEFYWBiKi4sbrV9ZWQkvLy/Ex8fDxcWl0Tq9evVCfHw8MjMzcfLkSYwdOxaTJ09Gbm6uVCciIgLnzp3Dl19+iTNnzuDJJ5/E9OnTcerUKanOE088gbq6OqSmpiIzMxNDhgzBE088gcLCwtZ9EYiIiKhzEkYUGBgoFi1aJD3X6XTC1dVVxMXF3XNfDw8PsWbNmmadx97eXmzatEl6bmNjIz766CODOt26dRMbN24UQghx7do1AUB8++230vaysjIBQBw8eLBZ5xRCCK1WKwAIrVbb7H2IiIjIuJr7+W20kaiamhpkZmYiNDRUKlMqlQgNDUV6enqrnEOn02H79u2oqKhAcHCwVP7QQw9hx44dKCkpgV6vx/bt21FVVYXRo0cDALp3745+/frho48+QkVFBerq6rBhwwY4OTnB39+/yfNVV1ejrKzM4EFERESmydxYJ75+/Tp0Oh2cnZ0Nyp2dnXH27Nn7OvaZM2cQHByMqqoq2NraYvfu3fDx8ZG2f/bZZ5gxYwa6d+8Oc3NzWFtbY/fu3fD29gYAKBQKfP3115gyZQq6du0KpVIJJycnJCUlwd7evsnzxsXFYfXq1ffVdiIiIuocjD6xvC3069cP2dnZOH78OBYuXIjIyEjk5eVJ21955RWUlpbi66+/xsmTJxEVFYXp06fjzJkzAAAhBBYtWgQnJyccPnwYGRkZmDJlCsLDw1FQUNDkeaOjo6HVaqXHlStX2ryvREREZBxGG4lycHCAmZkZioqKDMqLioqanDTeXBYWFtKokr+/P06cOIG1a9diw4YNuHDhAt59913k5ORg4MCBAIAhQ4bg8OHDeO+997B+/XqkpqZi3759uHHjBtRqNQDg/fffx8GDB7FlyxasXLmy0fOqVCqoVKr7ajsRERF1DkYbibKwsIC/vz9SUlKkMr1ej5SUFIP5S61Br9ejuroawO07/IDb86/uZGZmBr1ef9c6SqVSqkNEREQPNqONRAFAVFQUIiMjERAQgMDAQCQmJqKiogLz5s0DcHspgp49eyIuLg7A7cno9ZflampqkJ+fj+zsbNja2kojT9HR0ZgwYQLc3d1RXl6Obdu2IS0tDcnJyQCA/v37w9vbG8899xwSEhLQvXt37NmzBwcPHsS+ffsAAMHBwbC3t0dkZCRiYmJgZWWFjRs34uLFi5g4cWJ7v0xERETUARk1RM2YMQPXrl1DTEwMCgsLMXToUCQlJUmTzS9fvmwwGnT16lX4+flJzxMSEpCQkICQkBCkpaUBAIqLixEREYGCggJoNBr4+voiOTkZjz32GACgS5cu+Oqrr7By5UqEh4fj5s2b8Pb2xpYtW/D4448DuH2pMSkpCatWrcLYsWNRW1uLgQMH4osvvsCQIUPa6dUhIiKijkwhhBDGboSpKisrg0ajgVarleZWERGRcej0AhkXS1BcXgWnrpYI7N0NZkqFsZtFHVBzP7+NOhJFRETUHpJyCrB6bx4KtFVSWQ+NJWLDfTB+UA8jtow6M5Nc4oCIiKheUk4BFm7NMghQAFCorcLCrVlIyml66Rqiu2GIIiIik6XTC6zem4fG5q3Ul63emwednjNbqOUYooiIyGRlXCxpMAJ1JwGgQFuFjIsl7dcoMhkMUUREZLKKy5sOUHLqEd2JIYqIiEyWU1fLVq1HdCeGKCIiMlmBvbuhh8YSTS1koMDtu/QCe3drz2aRiWCIIiIik2WmVCA23AcAGgSp+uex4T5cL4pkYYgiIiKTNn5QD6ybPQwuGsNLdi4aS6ybPYzrRJFsXGyTiIhM3vhBPfCYjwtXLKdWxRBFREQPBDOlAsF9uhu7GWRCeDmPiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIBoYoIiIiIhkYooiIiIhkYIgiIiIikoEhioiIiEgGrljehoQQAICysjIjt4SIiIiaq/5zu/5zvCkMUW2ovLwcAODm5mbklhAREVFLlZeXQ6PRNLldIe4Vs0g2vV6Pq1evomvXrlAo2uZLLsvKyuDm5oYrV65ArVa3yTk6mgetzw9afwH2mX02XQ9anztrf4UQKC8vh6urK5TKpmc+cSSqDSmVSvTq1atdzqVWqzvVL2hreND6/KD1F2CfHxTss+nrjP292whUPU4sJyIiIpKBIYqIiIhIBoaoTk6lUiE2NhYqlcrYTWk3D1qfH7T+Auzzg4J9Nn2m3l9OLCciIiKSgSNRRERERDIwRBERERHJwBBFREREJANDFBEREZEMDFEdUFxcHIYPH46uXbvCyckJU6ZMwblz5wzqVFVVYdGiRejevTtsbW0xbdo0FBUVGdS5fPkyJk6cCGtrazg5OWH58uWoq6trz67IEh8fD4VCgaVLl0plptjf/Px8zJ49G927d4eVlRUGDx6MkydPStuFEIiJiUGPHj1gZWWF0NBQ/PDDDwbHKCkpwaxZs6BWq2FnZ4f58+fj5s2b7d2VZtHpdHjllVfQu3dvWFlZoU+fPnjttdcMvpuqs/f522+/RXh4OFxdXaFQKLBnzx6D7a3Vv++++w6jRo2CpaUl3Nzc8MYbb7R115p0tz7X1tZixYoVGDx4MGxsbODq6oqIiAhcvXrV4Bim1OdfW7BgARQKBRITEw3KO1Ofm9Pf77//HpMmTYJGo4GNjQ2GDx+Oy5cvS9tN8T0cACCowwkLCxObN28WOTk5Ijs7Wzz++OPC3d1d3Lx5U6qzYMEC4ebmJlJSUsTJkyfFiBEjxEMPPSRtr6urE4MGDRKhoaHi1KlT4quvvhIODg4iOjraGF1qtoyMDOHp6Sl8fX3FkiVLpHJT629JSYnw8PAQc+fOFcePHxc//fSTSE5OFj/++KNUJz4+Xmg0GrFnzx5x+vRpMWnSJNG7d29x69Ytqc748ePFkCFDxH/+8x9x+PBh4e3tLWbOnGmMLt3TX/7yF9G9e3exb98+cfHiRbFz505ha2sr1q5dK9Xp7H3+6quvxKpVq8SuXbsEALF7926D7a3RP61WK5ydncWsWbNETk6O+PTTT4WVlZXYsGFDe3XTwN36XFpaKkJDQ8WOHTvE2bNnRXp6uggMDBT+/v4GxzClPt9p165dYsiQIcLV1VWsWbPGYFtn6vO9+vvjjz+Kbt26ieXLl4usrCzx448/ii+++EIUFRVJdUztPbweQ1QnUFxcLACIb775Rghx+42pS5cuYufOnVKd77//XgAQ6enpQojbv/RKpVIUFhZKddatWyfUarWorq5u3w40U3l5ufjNb34jDh48KEJCQqQQZYr9XbFihXj44Yeb3K7X64WLi4t48803pbLS0lKhUqnEp59+KoQQIi8vTwAQJ06ckOrs379fKBQKkZ+f33aNl2nixInimWeeMSh78sknxaxZs4QQptfnX3/YtFb/3n//fWFvb2/we71ixQrRr1+/Nu7Rvd0tUNTLyMgQAMTPP/8shDDdPv/yyy+iZ8+eIicnR3h4eBiEqM7c58b6O2PGDDF79uwm9zHF9/B6vJzXCWi1WgBAt27dAACZmZmora1FaGioVKd///5wd3dHeno6ACA9PR2DBw+Gs7OzVCcsLAxlZWXIzc1tx9Y336JFizBx4kSDfgGm2d8vv/wSAQEBeOqpp+Dk5AQ/Pz9s3LhR2n7x4kUUFhYa9Fmj0SAoKMigz3Z2dggICJDqhIaGQqlU4vjx4+3XmWZ66KGHkJKSgvPnzwMATp8+jSNHjmDChAkATLPPd2qt/qWnp+ORRx6BhYWFVCcsLAznzp3DjRs32qk38mm1WigUCtjZ2QEwzT7r9XrMmTMHy5cvx8CBAxtsN6U+6/V6/Pvf/0bfvn0RFhYGJycnBAUFGVzyM8X38HoMUR2cXq/H0qVLMXLkSAwaNAgAUFhYCAsLC+lNqJ6zszMKCwulOnf+MtZvr9/W0Wzfvh1ZWVmIi4trsM0U+/vTTz9h3bp1+M1vfoPk5GQsXLgQL7zwArZs2QLgf21urE939tnJyclgu7m5Obp169Yh+7xy5Uo8/fTT6N+/P7p06QI/Pz8sXboUs2bNAmCafb5Ta/Wvs/2u36mqqgorVqzAzJkzpS+jNcU+/+1vf4O5uTleeOGFRrebUp+Li4tx8+ZNxMfHY/z48Thw4ACmTp2KJ598Et988w0A03wPr2du7AbQ3S1atAg5OTk4cuSIsZvSZq5cuYIlS5bg4MGDsLS0NHZz2oVer0dAQAD++te/AgD8/PyQk5OD9evXIzIy0sitaxufffYZPvnkE2zbtg0DBw5EdnY2li5dCldXV5PtM/1PbW0tpk+fDiEE1q1bZ+zmtJnMzEysXbsWWVlZUCgUxm5Om9Pr9QCAyZMnY9myZQCAoUOH4tixY1i/fj1CQkKM2bw2x5GoDmzx4sXYt28fDh06hF69eknlLi4uqKmpQWlpqUH9oqIiuLi4SHV+fedD/fP6Oh1FZmYmiouLMWzYMJibm8Pc3BzffPMN3n77bZibm8PZ2dmk+gsAPXr0gI+Pj0HZgAEDpLtZ6tvcWJ/u7HNxcbHB9rq6OpSUlHTIPi9fvlwajRo8eDDmzJmDZcuWSaOPptjnO7VW/zrb7zrwvwD1888/4+DBg9IoFGB6fT58+DCKi4vh7u4uvZ/9/PPPePHFF+Hp6QnAtPrs4OAAc3Pze76fmdp7eD2GqA5ICIHFixdj9+7dSE1NRe/evQ22+/v7o0uXLkhJSZHKzp07h8uXLyM4OBgAEBwcjDNnzhj8oda/ef36l93YHn30UZw5cwbZ2dnSIyAgALNmzZL+bUr9BYCRI0c2WLbi/Pnz8PDwAAD07t0bLi4uBn0uKyvD8ePHDfpcWlqKzMxMqU5qair0ej2CgoLaoRctU1lZCaXS8C3HzMxM+p+sKfb5Tq3Vv+DgYHz77beora2V6hw8eBD9+vWDvb19O/Wm+eoD1A8//ICvv/4a3bt3N9huan2eM2cOvvvuO4P3M1dXVyxfvhzJyckATKvPFhYWGD58+F3fz0ztM8uAsWe2U0MLFy4UGo1GpKWliYKCAulRWVkp1VmwYIFwd3cXqamp4uTJkyI4OFgEBwdL2+tvFx03bpzIzs4WSUlJwtHRscPfLlrvzrvzhDC9/mZkZAhzc3Pxl7/8Rfzwww/ik08+EdbW1mLr1q1Snfj4eGFnZye++OIL8d1334nJkyc3eju8n5+fOH78uDhy5Ij4zW9+02Fu9/+1yMhI0bNnT2mJg127dgkHBwfx8ssvS3U6e5/Ly8vFqVOnxKlTpwQA8dZbb4lTp05Jd6K1Rv9KS0uFs7OzmDNnjsjJyRHbt28X1tbWRrvd/259rqmpEZMmTRK9evUS2dnZBu9nd95xZUp9bsyv784TonP1+V793bVrl+jSpYv44IMPxA8//CDeeecdYWZmJg4fPiwdw9Tew+sxRHVAABp9bN68Wapz69Yt8cc//lHY29sLa2trMXXqVFFQUGBwnEuXLokJEyYIKysr4eDgIF588UVRW1vbzr2R59chyhT7u3fvXjFo0CChUqlE//79xQcffGCwXa/Xi1deeUU4OzsLlUolHn30UXHu3DmDOv/973/FzJkzha2trVCr1WLevHmivLy8PbvRbGVlZWLJkiXC3d1dWFpaCi8vL7Fq1SqDD9PO3udDhw41+rcbGRkphGi9/p0+fVo8/PDDQqVSiZ49e4r4+Pj26mIDd+vzxYsXm3w/O3TokHQMU+pzYxoLUZ2pz83p7z/+8Q/h7e0tLC0txZAhQ8SePXsMjmGK7+FCCKEQ4o7lgomIiIioWTgnioiIiEgGhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIOoy0tDQoFArpi0o//PBD2NnZ3fdxW+s4bXU8ABg9ejSWLl3aqsdsiUceeQTbtm1rVt0RI0bg888/b+MWEXV8DFFE1GLr169H165dUVdXJ5XdvHkTXbp0wejRow3q1gejCxcutFl7Dh06hMcffxzdu3eHtbU1fHx88OKLLyI/P7/Nztlcly5dgkKhuOvjww8/xK5du/Daa68ZpY1ffvklioqK8PTTTzer/p/+9CesXLlS+vJoogcVQxQRtdiYMWNw8+ZNnDx5Uio7fPgwXFxccPz4cVRVVUnlhw4dgru7O/r06dMmbdmwYQNCQ0Ph4uKCzz//HHl5eVi/fj20Wi3+/ve/t8k5W8LNzQ0FBQXS48UXX8TAgQMNymbMmIFu3bqha9euRmnj22+/jXnz5kGpbN5HwoQJE1BeXo79+/e3ccuIOjaGKCJqsX79+qFHjx5IS0uTytLS0jB58mT07t0b//nPfwzKx4wZAwD4+OOPERAQgK5du8LFxQW/+93vUFxcLLsdv/zyC1544QW88MIL+Oc//4nRo0fD09MTjzzyCDZt2oSYmJgm9123bh369OkDCwsL9OvXDx9//LHB9tLSUjz33HNwdnaGpaUlBg0ahH379jV6rGvXriEgIABTp05FdXW1wTYzMzO4uLhID1tbW5ibmxuUWVlZNbic5+npiddffx0RERGwtbWFh4cHvvzyS1y7dg2TJ0+Gra0tfH19DYIsABw5cgSjRo2ClZUV3Nzc8MILL6CioqLJ1+HatWtITU1FeHi4VCaEwKuvvgp3d3eoVCq4urrihRdeMOjT448/ju3btzd5XKIHAUMUEckyZswYHDp0SHp+6NAhjB49GiEhIVL5rVu3cPz4cSlE1dbW4rXXXsPp06exZ88eXLp0CXPnzpXdhp07d6KmpgYvv/xyo9ubmre0e/duLFmyBC+++CJycnLw3HPPYd68eVK79Xo9JkyYgKNHj2Lr1q3Iy8tDfHw8zMzMGhzrypUrGDVqFAYNGoR//etfUKlUsvvza2vWrMHIkSNx6tQpTJw4EXPmzEFERARmz56NrKws9OnTBxEREaj/HvkLFy5g/PjxmDZtGr777jvs2LEDR44cweLFi5s8x5EjR2BtbY0BAwZIZZ9//jnWrFmDDRs24IcffsCePXswePBgg/0CAwNx+PDhVusrUackiIhk2Lhxo7CxsRG1tbWirKxMmJubi+LiYrFt2zbxyCOPCCGESElJEQDEzz//3OgxTpw4IQCI8vJyIYQQhw4dEgDEjRs3hBBCbN68WWg0mibbsHDhQqFWq+/Z1l8f56GHHhLPPvusQZ2nnnpKPP7440IIIZKTk4VSqRTnzp276/HOnj0r3NzcxAsvvCD0ev092yGEELGxsWLIkCENykNCQsSSJUuk5x4eHmL27NnS84KCAgFAvPLKK1JZenq6ACAKCgqEEELMnz9f/OEPfzA47uHDh4VSqRS3bt1qtD1r1qwRXl5eBmV///vfRd++fUVNTU2T/fjiiy+EUqkUOp2uyTpEpo4jUUQky+jRo1FRUYETJ07g8OHD6Nu3LxwdHRESEiLNi0pLS4OXlxfc3d0BAJmZmQgPD4e7uzu6du2KkJAQAMDly5dltUEIAYVC0eL9vv/+e4wcOdKgbOTIkfj+++8BANnZ2ejVqxf69u3b5DFu3bqFUaNG4cknn8TatWtlteNefH19pX87OzsDgMGIUH1Z/SXR06dP48MPP4Stra30CAsLg16vx8WLF5vsh6WlpUHZU089hVu3bsHLywvPPvssdu/ebXATAQBYWVlBr9c3uHxJ9CBhiCIiWby9vdGrVy8cOnQIhw4dkgKRq6sr3NzccOzYMRw6dAhjx44FAFRUVCAsLAxqtRqffPIJTpw4gd27dwMAampqZLWhb9++0Gq1KCgoaJ1O/X9WVlb3rKNSqRAaGop9+/a12V2AXbp0kf5dH9IaK6u/S+7mzZt47rnnkJ2dLT1Onz6NH374ocmJ/Q4ODrhx44ZBmZubG86dO4f3338fVlZW+OMf/4hHHnkEtbW1Up2SkhLY2Ng067UiMlUMUUQk25gxY5CWloa0tDSDpQ0eeeQR7N+/HxkZGdJ8qLNnz+K///0v4uPjMWrUKPTv3/++JpUDwG9/+1tYWFjgjTfeaHR7/XpTvzZgwAAcPXrUoOzo0aPw8fEBcHsE6JdffsH58+ebPLdSqcTHH38Mf39/jBkzBlevXpXXiVY0bNgw5OXlwdvbu8HDwsKi0X38/PxQWFjYIEhZWVkhPDwcb7/9NtLS0pCeno4zZ85I23NycuDn59em/SHq6MyN3QAi6rzGjBmDRYsWoba2VhqJAoCQkBAsXrwYNTU1Uohyd3eHhYUF3nnnHSxYsAA5OTn3vS6Sm5sb1qxZg8WLF6OsrAwRERHw9PTEL7/8go8++gi2traNLnOwfPlyTJ8+HX5+fggNDcXevXuxa9cufP3111L7H3nkEUybNg1vvfUWvL29cfbsWSgUCowfP146jpmZGT755BPMnDkTY8eORVpaGlxcXO6rT/djxYoVGDFiBBYvXozf//73sLGxQV5eHg4ePIh333230X38/Pzg4OCAo0eP4oknngBwezFRnU6HoKAgWFtbY+vWrbCysoKHh4e03+HDhzFu3Lh26RdRR8WRKCKSbcyYMbh16xa8vb2l+TnA7RBSXl4uLYUAAI6Ojvjwww+xc+dO+Pj4ID4+HgkJCffdhj/+8Y84cOAA8vPzMXXqVPTv3x+///3voVar8dJLLzW6z5QpU7B27VokJCRg4MCB2LBhAzZv3mwwmvb5559j+PDhmDlzJnx8fPDyyy9Dp9M1OJa5uTk+/fRTDBw4EGPHjr3v0bX74evri2+++Qbnz5/HqFGj4Ofnh5iYGLi6uja5j5mZGebNm4dPPvlEKrOzs8PGjRsxcuRI+Pr64uuvv8bevXvRvXt3AEB+fj6OHTuGefPmtXmfiDoyhRD//95YIiJ6IBUWFmLgwIHIysoyGG1qyooVK3Djxg188MEH7dA6oo6LI1FERA84FxcX/OMf/2j2XZJOTk5G+4oaoo6EI1FEREREMnAkioiIiEgGhigiIiIiGRiiiIiIiGRgiCIiIiKSgSGKiIiISAaGKCIiIiIZGKKIiIiIZGCIIiIiIpKBIYqIiIhIhv8HNjKMT+aFRdUAAAAASUVORK5CYII=",
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"\n",
"from flaml.automl.data import get_output_from_log\n",
"time_history, best_valid_loss_history, valid_loss_history, config_history, metric_history = \\\n",
" get_output_from_log(filename=automl_settings['log_file_name'], time_budget=3000)\n",
"for config in config_history:\n",
" print(config)\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"plt.title('Learning Curve')\n",
"plt.xlabel('Wall Clock Time (s)')\n",
"plt.ylabel('Rouge 1')\n",
"print(len(valid_loss_history))\n",
"plt.scatter(time_history, 1 - np.array(valid_loss_history))\n",
"plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\n",
"plt.show()"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"provenance": []
},
"gpuClass": "standard",
"interpreter": {
"hash": "e9d36fc5b7c3dd4177ff1b60184dd696c0acc18150a44682abca4d769811bd46"
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.0"
}
},
"nbformat": 4,
"nbformat_minor": 0
}