From 323448d59da824629b260653bafad00040854a9a Mon Sep 17 00:00:00 2001 From: Sergey Kolesnikov Date: Sun, 18 Oct 2020 15:14:57 +0300 Subject: [PATCH 1/3] notebooks update --- .../notebooks/classification-tutorial.ipynb | 203 +---------- .../customizing_what_happens_in_train.ipynb | 25 +- examples/notebooks/demo.ipynb | 144 ++++---- .../notebooks/segmentation-tutorial.ipynb | 158 +-------- examples/notebooks/table-data-tutorial.ipynb | 334 ------------------ 5 files changed, 101 insertions(+), 763 deletions(-) delete mode 100644 examples/notebooks/table-data-tutorial.ipynb diff --git a/examples/notebooks/classification-tutorial.ipynb b/examples/notebooks/classification-tutorial.ipynb index e465b63153..195eda959f 100644 --- a/examples/notebooks/classification-tutorial.ipynb +++ b/examples/notebooks/classification-tutorial.ipynb @@ -33,8 +33,7 @@ "outputs": [], "source": [ "# this variable will be used in `runner.train` and by default we disable FP16 mode\n", - "is_fp16_used = False\n", - "is_alchemy_used = False" + "is_fp16_used = False" ] }, { @@ -45,20 +44,15 @@ }, "outputs": [], "source": [ + "# Catalyst \n", + "!pip install catalyst==20.10.1\n", + "\n", "# for augmentations\n", - "!pip install -U albumentations\n", + "!pip install albumentations==0.4.1\n", "\n", "# for pretrained models for PyTorch\n", "!pip install pretrainedmodels\n", "\n", - "\n", - "################\n", - "# Catalyst itself\n", - "!pip install catalyst==20.07\n", - "# For specific version of catalyst, uncomment:\n", - "# ! pip install git+http://github.com/catalyst-team/catalyst.git@{master/commit_hash}\n", - "################\n", - "\n", "# for TTA\n", "# !pip install ttach\n", "\n", @@ -66,50 +60,12 @@ "!pip install tensorflow\n", "%load_ext tensorboard\n", "\n", - "# for alchemy experiment logging integration, uncomment this 2 lines below\n", - "# !pip install -U alchemy\n", - "# is_alchemy_used = True\n", - "\n", "# if Your machine support Apex FP16, uncomment this 3 lines below\n", "# !git clone https://github.com/NVIDIA/apex\n", "# !pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./apex\n", "# is_fp16_used = True" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Colab extras – Plotly\n", - "\n", - "To intergate visualization library `plotly` to colab, run" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import IPython\n", - "\n", - "def configure_plotly_browser_state():\n", - " display(IPython.core.display.HTML('''\n", - " \n", - " \n", - " '''))\n", - "\n", - "\n", - "IPython.get_ipython().events.register('pre_run_cell', configure_plotly_browser_state)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -140,63 +96,6 @@ "utils.prepare_cudnn(deterministic=True)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Reproducibility\n", - "\n", - "[![Alchemy logo](https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master/pics/alchemy_logo.png)](https://github.com/catalyst-team/alchemy)\n", - "\n", - "To make your research more reproducible and easy to monitor, Catalyst has an integration with [Alchemy](https://alchemy.host) – experiment tracking tool for deep learning.\n", - "\n", - "To use monitoring, goto [Alchemy](https://alchemy.host/) and get your personal token." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# for alchemy experiment logging integration, uncomment this 2 lines below\n", - "# !pip install -U alchemy\n", - "# is_alchemy_used = True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "if is_alchemy_used:\n", - " monitoring_params = {\n", - " \"token\": None, # insert your personal token here\n", - " \"project\": \"classification_example\",\n", - " \"group\": \"first_trials\",\n", - " \"experiment\": \"first_experiment\",\n", - " }\n", - " assert monitoring_params[\"token\"] is not None\n", - "else:\n", - " monitoring_params = None" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "-------" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -240,7 +139,7 @@ "source": [ "%%bash\n", "\n", - "download-gdrive 1eAk36MEMjKPKL5j9VWLvNTVKk4ube9Ml artworks.tar.gz\n", + "download-gdrive 1eAk36MEMjKPKL5j9VWLvNTVKk4ube9Ml artworks.tar.gz &>/dev/null\n", "extract-archive artworks.tar.gz &>/dev/null" ] }, @@ -255,6 +154,7 @@ "ROOT = \"artworks/\"\n", "ALL_IMAGES = list(Path(ROOT).glob(\"**/*.jpg\"))\n", "ALL_IMAGES = list(filter(lambda x: not x.name.startswith(\".\"), ALL_IMAGES))\n", + "assert len(ALL_IMAGES) == 8446\n", "print(\"Number of images:\", len(ALL_IMAGES))" ] }, @@ -415,9 +315,8 @@ "metadata": {}, "outputs": [], "source": [ - "from catalyst.contrib.data.cv import ImageReader\n", - "from catalyst.dl import utils\n", - "from catalyst.data import ScalarReader, ReaderCompose\n", + "from catalyst import utils\n", + "from catalyst.data import ScalarReader, ReaderCompose, ImageReader\n", "\n", "num_classes = len(tag_to_label)\n", "\n", @@ -889,7 +788,7 @@ "outputs": [], "source": [ "# as we are working on classification task\n", - "from catalyst.dl.callbacks import AccuracyCallback, AUCCallback, F1ScoreCallback\n", + "from catalyst.dl import AccuracyCallback, AUCCallback, F1ScoreCallback\n", "\n", "callbacks = [\n", " AccuracyCallback(num_classes=num_classes),\n", @@ -904,10 +803,6 @@ " )\n", "]\n", "\n", - "if is_alchemy_used:\n", - " from catalyst.dl import AlchemyLogger\n", - " callbacks.append(AlchemyLogger(**monitoring_params))\n", - "\n", "runner.train(\n", " model=model,\n", " criterion=criterion,\n", @@ -922,7 +817,7 @@ " logdir=logdir,\n", " num_epochs=num_epochs,\n", " # save our best checkpoint by AUC metric\n", - " main_metric=\"auc/_mean\",\n", + " main_metric=\"auc/mean\",\n", " # AUC needs to be maximized.\n", " minimize_metric=False,\n", " # for FP16. It uses the variable from the very first cell\n", @@ -932,32 +827,6 @@ ")" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Training analysis\n", - "\n", - "The `utils.plot_metrics` method reads tensorboard logs from the logdir and plots beautiful metrics with `plotly` package." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# tensorboard should be enough, uncomment to check plotly version\n", - "# it can take a while (colab issue)\n", - "# utils.plot_metrics(\n", - "# logdir=logdir, \n", - "# # specify which metrics we want to plot\n", - "# metrics=[\"loss\", \"accuracy01\", \"auc/_mean\", \"f1_score\", \"_base/lr\"]\n", - "# )" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -1088,7 +957,7 @@ }, "outputs": [], "source": [ - "from catalyst.dl.utils import trace\n", + "from catalyst.utils import trace\n", "\n", "if is_fp16_used:\n", " model = trace.load_traced_model(\n", @@ -1228,7 +1097,7 @@ "\n", "For example,\n", "\n", - "[catalyst.contrib.criterion](https://catalyst-team.github.io/catalyst/api/contrib.html#module-catalyst.contrib.criterion.ce):\n", + "[catalyst.contrib.criterion](https://catalyst-team.github.io/catalyst/api/contrib.html):\n", "- HuberLoss\n", "- CenterLoss\n", "- FocalLossMultiClass\n", @@ -1237,10 +1106,10 @@ "- LovaszLossBinary / LovaszLossMultiClass / LovaszLossMultiLabel\n", "- WingLoss\n", "\n", - "Lr scheduler in [catalyst.contrib.schedulers](https://catalyst-team.github.io/catalyst/api/contrib.html#module-catalyst.contrib.schedulers.base):\n", + "Lr scheduler in [catalyst.contrib.schedulers](https://catalyst-team.github.io/catalyst/api/contrib.html):\n", "- OneCycleLRWithWarmup\n", "\n", - "Moreover, in [catalyst.contrib.models](https://catalyst-team.github.io/catalyst/api/contrib.html#models) you can find various models for segmentation:\n", + "Moreover, in [catalyst.contrib.models](https://catalyst-team.github.io/catalyst/api/contrib.html) you can find various models for segmentation:\n", "- Unet / ResnetUnet\n", "- Linknet / ResnetLinknet\n", "- FPNUnet / ResnetFPNUnet\n", @@ -1248,7 +1117,7 @@ "- MobileUnet\n", "\n", "\n", - "Finally, several handwritten modules in [catalyst.contrib.modules](https://catalyst-team.github.io/catalyst/api/contrib.html#module-catalyst.contrib.modules.common):\n", + "Finally, several handwritten modules in [catalyst.contrib.modules](https://catalyst-team.github.io/catalyst/api/contrib.html):\n", "- Flatten\n", "- TemporalAttentionPooling\n", "- LamaPooling\n", @@ -1324,30 +1193,13 @@ " ],\n", " logdir=logdir,\n", " num_epochs=num_epochs,\n", - " main_metric=\"auc/_mean\",\n", + " main_metric=\"auc/mean\",\n", " minimize_metric=False,\n", " fp16=fp16_params,\n", " verbose=True\n", ")" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# it can take a while (colab issue)\n", - "utils.plot_metrics(\n", - " logdir=logdir, \n", - " metrics=[\"loss\", \"accuracy01\", \"auc/_mean\", \"f1_score\", \"_base/lr\"]\n", - ")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -1440,30 +1292,13 @@ " ],\n", " logdir=logdir,\n", " num_epochs=num_epochs,\n", - " main_metric=\"auc/_mean\",\n", + " main_metric=\"auc/mean\",\n", " minimize_metric=False,\n", " fp16=fp16_params,\n", " verbose=True\n", ")" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# it can take a while (colab issue)\n", - "utils.plot_metrics(\n", - " logdir=logdir, \n", - " metrics=[\"loss\", \"accuracy01\", \"auc/_mean\", \"f1_score\", \"_base/lr\"]\n", - ")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -1497,7 +1332,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.7" } }, "nbformat": 4, diff --git a/examples/notebooks/customizing_what_happens_in_train.ipynb b/examples/notebooks/customizing_what_happens_in_train.ipynb index 3500fdf3ea..439b9eb3b8 100644 --- a/examples/notebooks/customizing_what_happens_in_train.ipynb +++ b/examples/notebooks/customizing_what_happens_in_train.ipynb @@ -66,7 +66,7 @@ }, "outputs": [], "source": [ - "!pip install catalyst==20.6\n", + "!pip install catalyst==20.10.1\n", "# don't forget to restart runtime for correct `PIL` work with Colab" ] }, @@ -96,8 +96,7 @@ "outputs": [], "source": [ "import catalyst\n", - "from catalyst import dl\n", - "from catalyst.dl import utils\n", + "from catalyst import dl, utils\n", "catalyst.__version__" ] }, @@ -603,7 +602,7 @@ "import torch\n", "from torch import nn\n", "from torch.nn import functional as F\n", - "from catalyst.contrib.nn.modules import GlobalMaxPool2d, Flatten, Lambda\n", + "from catalyst.contrib.nn import GlobalMaxPool2d, Flatten, Lambda\n", "\n", "# Create the discriminator\n", "discriminator = nn.Sequential(\n", @@ -665,14 +664,9 @@ "source": [ "class GANRunner(dl.Runner):\n", " \n", - " def __init__(\n", - " self, \n", - " latent_dim: int, \n", - " model = None, \n", - " device = None, \n", - " ):\n", - " super().__init__(model, device)\n", + " def _init(self, latent_dim: int):\n", " self.latent_dim = latent_dim\n", + " self.experiment = None # spoiler for next lesson ;)\n", "\n", " def predict_batch(self, batch):\n", " random_latent_vectors = torch.randn(1, self.latent_dim).to(self.device)\n", @@ -738,7 +732,8 @@ "source": [ "import os\n", "from torch.utils.data import DataLoader\n", - "from catalyst.contrib.data.dataset import MNIST, ToTensor\n", + "from catalyst.data.cv import ToTensor\n", + "from catalyst.contrib.datasets import MNIST\n", "\n", "loaders = {\n", " \"train\": DataLoader(\n", @@ -832,15 +827,15 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.7" }, "pycharm": { "stem_cell": { "cell_type": "raw", - "source": [], "metadata": { "collapsed": false - } + }, + "source": [] } } }, diff --git a/examples/notebooks/demo.ipynb b/examples/notebooks/demo.ipynb index d7641ee600..2f2a187dca 100644 --- a/examples/notebooks/demo.ipynb +++ b/examples/notebooks/demo.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install -U torch==1.4.0 torchvision==0.5.0 torchtext==0.5.0 catalyst==20.05 pandas==1.0.1 tqdm==4.43" + "! pip install -U torch==1.4.0 torchvision==0.5.0 torchtext==0.5.0 catalyst==20.10.1 pandas==1.0.1 tqdm==4.43" ] }, { @@ -133,19 +133,15 @@ "import torch\n", "from torch.nn import functional as F\n", "from torch.utils.data import DataLoader\n", - "from torchvision.datasets import MNIST\n", - "from torchvision import transforms\n", + "from catalyst.data.cv import ToTensor\n", + "from catalyst.contrib.datasets import MNIST\n", "\n", "model = torch.nn.Linear(28 * 28, 10)\n", "optimizer = torch.optim.Adam(model.parameters(), lr=0.02)\n", "\n", "loaders = {\n", - " \"train\": DataLoader(\n", - " MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), \n", - " batch_size=32),\n", - " \"valid\": DataLoader(\n", - " MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), \n", - " batch_size=32),\n", + " \"train\": DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32),\n", + " \"valid\": DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32),\n", "}" ] }, @@ -155,8 +151,8 @@ "metadata": {}, "outputs": [], "source": [ - "from catalyst import dl\n", - "from catalyst.utils import metrics\n", + "from catalyst import dl, metrics\n", + "\n", "\n", "class CustomRunner(dl.Runner):\n", " def _handle_batch(self, batch):\n", @@ -224,8 +220,8 @@ "from torch import nn\n", "from torch.nn import functional as F\n", "from torch.utils.data import DataLoader\n", - "from torchvision.datasets import MNIST\n", - "from torchvision import transforms\n", + "from catalyst.data.cv import ToTensor\n", + "from catalyst.contrib.datasets import MNIST\n", "\n", "class ClassifyAE(nn.Module):\n", " def __init__(self, in_features, hid_features, out_features):\n", @@ -245,12 +241,8 @@ "optimizer = torch.optim.Adam(model.parameters(), lr=0.02)\n", "\n", "loaders = {\n", - " \"train\": DataLoader(\n", - " MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), \n", - " batch_size=32),\n", - " \"valid\": DataLoader(\n", - " MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), \n", - " batch_size=32),\n", + " \"train\": DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32),\n", + " \"valid\": DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32),\n", "}" ] }, @@ -260,8 +252,8 @@ "metadata": {}, "outputs": [], "source": [ - "from catalyst import dl\n", - "from catalyst.utils import metrics\n", + "from catalyst import dl, metrics\n", + "\n", "\n", "class CustomRunner(dl.Runner):\n", " def _handle_batch(self, batch):\n", @@ -333,8 +325,8 @@ "from torch import nn\n", "from torch.nn import functional as F\n", "from torch.utils.data import DataLoader\n", - "from torchvision.datasets import MNIST\n", - "from torchvision import transforms\n", + "from catalyst.data.cv import ToTensor\n", + "from catalyst.contrib.datasets import MNIST\n", "\n", "\n", "LOG_SCALE_MAX = 2\n", @@ -388,12 +380,8 @@ "optimizer = torch.optim.Adam(model.parameters(), lr=0.02)\n", "\n", "loaders = {\n", - " \"train\": DataLoader(\n", - " MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), \n", - " batch_size=32),\n", - " \"valid\": DataLoader(\n", - " MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), \n", - " batch_size=32),\n", + " \"train\": DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32),\n", + " \"valid\": DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32),\n", "}" ] }, @@ -403,8 +391,8 @@ "metadata": {}, "outputs": [], "source": [ - "from catalyst import dl\n", - "from catalyst.utils import metrics\n", + "from catalyst import dl, metrics\n", + "\n", "\n", "class CustomRunner(dl.Runner):\n", " def _handle_batch(self, batch):\n", @@ -479,6 +467,11 @@ { "cell_type": "code", "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import os\n", @@ -486,8 +479,8 @@ "from torch import nn\n", "from torch.nn import functional as F\n", "from torch.utils.data import DataLoader\n", - "from torchvision.datasets import MNIST\n", - "from torchvision import transforms\n", + "from catalyst.data.cv import ToTensor\n", + "from catalyst.contrib.datasets import MNIST\n", "\n", "class ClassifyUnet(nn.Module):\n", " def __init__(self, in_channels, in_hw, out_features):\n", @@ -508,28 +501,23 @@ "optimizer = torch.optim.Adam(model.parameters(), lr=0.02)\n", "\n", "loaders = {\n", - " \"train\": DataLoader(\n", - " MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), \n", - " batch_size=32),\n", - " \"valid\": DataLoader(\n", - " MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), \n", - " batch_size=32),\n", + " \"train\": DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32),\n", + " \"valid\": DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32),\n", "}" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from catalyst import dl\n", - "from catalyst.utils import metrics\n", + "from catalyst import dl, metrics\n", + "\n", "\n", "class CustomRunner(dl.Runner):\n", " def _handle_batch(self, batch):\n", @@ -558,17 +546,16 @@ " self.optimizer.step()\n", " self.optimizer.zero_grad()\n", " " - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "runner = CustomRunner()\n", @@ -581,31 +568,21 @@ " timeit=False,\n", " logdir=\"./logs_custom_unet\"\n", ")" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "---" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# GAN" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", @@ -616,7 +593,7 @@ "import torch\n", "from torch import nn\n", "from torch.nn import functional as F\n", - "from catalyst.contrib.nn.modules import GlobalMaxPool2d, Flatten, Lambda\n", + "from catalyst.contrib.nn import GlobalMaxPool2d, Flatten, Lambda\n", "\n", "# Create the discriminator\n", "discriminator = nn.Sequential(\n", @@ -711,16 +688,14 @@ "metadata": {}, "outputs": [], "source": [ - "\n", "import os\n", - "import torchvision.transforms as transforms\n", "from torch.utils.data import DataLoader\n", - "from torchvision.datasets import MNIST\n", + "from catalyst.data.cv import ToTensor\n", + "from catalyst.contrib.datasets import MNIST\n", + "\n", "\n", "loaders = {\n", - " \"train\": DataLoader(\n", - " MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), \n", - " batch_size=64),\n", + " \"train\": DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32),\n", "}\n", "\n", "runner = CustomRunner()\n", @@ -1242,13 +1217,20 @@ " ]\n", ")" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python [conda env:py37-dev]", "language": "python", - "name": "python3" + "name": "conda-env-py37-dev-py" }, "language_info": { "codemirror_mode": { @@ -1260,9 +1242,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.7" } }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/examples/notebooks/segmentation-tutorial.ipynb b/examples/notebooks/segmentation-tutorial.ipynb index f2f7edcbad..9e11b42a50 100644 --- a/examples/notebooks/segmentation-tutorial.ipynb +++ b/examples/notebooks/segmentation-tutorial.ipynb @@ -40,8 +40,7 @@ "outputs": [], "source": [ "# this variable will be used in `runner.train` and by default we disable FP16 mode\n", - "is_fp16_used = False\n", - "is_alchemy_used = False" + "is_fp16_used = False" ] }, { @@ -57,6 +56,9 @@ }, "outputs": [], "source": [ + "# Catalyst\n", + "!pip install catalyst==20.10.1\n", + "\n", "# for augmentations\n", "!pip install albumentations==0.4.3\n", "\n", @@ -66,67 +68,15 @@ "# for TTA\n", "!pip install ttach==0.0.2\n", "\n", - "################\n", - "# Catalyst itself\n", - "!pip install catalyst==20.07\n", - "# For specific version of catalyst, uncomment:\n", - "# ! pip install git+http://github.com/catalyst-team/catalyst.git@{master/commit_hash}\n", - "################\n", - "\n", "# for tensorboard\n", "!pip install tensorflow\n", "\n", - "# for alchemy experiment logging integration, uncomment this 2 lines below\n", - "# !pip install -U alchemy\n", - "# is_alchemy_used = True\n", - "\n", "# if Your machine support Apex FP16, uncomment this 3 lines below\n", "# !git clone https://github.com/NVIDIA/apex\n", "# !pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./apex\n", "# is_fp16_used = True" ] }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "MncoA-G0Y25p" - }, - "source": [ - "### Colab extras – Plotly\n", - "\n", - "To intergate visualization library `plotly` to colab, run cell below" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import IPython\n", - "\n", - "def configure_plotly_browser_state():\n", - " display(IPython.core.display.HTML('''\n", - " \n", - " \n", - " '''))\n", - "\n", - "\n", - "IPython.get_ipython().events.register('pre_run_cell', configure_plotly_browser_state)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -150,8 +100,7 @@ "import os\n", "import torch\n", "import catalyst\n", - "\n", - "from catalyst.dl import utils\n", + "from catalyst import utils\n", "\n", "print(f\"torch: {torch.__version__}, catalyst: {catalyst.__version__}\")\n", "\n", @@ -162,63 +111,6 @@ "utils.prepare_cudnn(deterministic=True)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Reproducibility\n", - "\n", - "[![Alchemy logo](https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master/pics/alchemy_logo.png)](https://github.com/catalyst-team/alchemy)\n", - "\n", - "To make your research more reproducible and easy to monitor, Catalyst has an integration with [Alchemy](https://alchemy.host) – experiment tracking tool for deep learning.\n", - "\n", - "To use monitoring, goto [Alchemy](https://alchemy.host/) and get your personal token." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# for alchemy experiment logging integration, uncomment this 2 lines below\n", - "# !pip install -U alchemy\n", - "# is_alchemy_used = True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "if is_alchemy_used:\n", - " monitoring_params = {\n", - " \"token\": None, # insert your personal token here\n", - " \"project\": \"segmentation_example\",\n", - " \"group\": \"first_trials\",\n", - " \"experiment\": \"first_experiment\",\n", - " }\n", - " assert monitoring_params[\"token\"] is not None\n", - "else:\n", - " monitoring_params = None" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "-------" - ] - }, { "cell_type": "markdown", "metadata": { @@ -910,7 +802,7 @@ }, "outputs": [], "source": [ - "from catalyst.dl.callbacks import DiceCallback, IouCallback, \\\n", + "from catalyst.dl import DiceCallback, IouCallback, \\\n", " CriterionCallback, MetricAggregationCallback\n", "\n", "callbacks = [\n", @@ -944,10 +836,6 @@ " IouCallback(input_key=\"mask\"),\n", "]\n", "\n", - "if is_alchemy_used:\n", - " from catalyst.dl import AlchemyLogger\n", - " callbacks.append(AlchemyLogger(**monitoring_params))\n", - "\n", "runner.train(\n", " model=model,\n", " criterion=criterion,\n", @@ -971,34 +859,6 @@ ")" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Training analysis\n", - "\n", - "The `utils.plot_metrics` method reads tensorboard logs from the logdir and plots beautiful metrics with `plotly` package." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# tensorboard should be enought, uncomment to check plotly version\n", - "# it can take a while (colab's issue)\n", - "# utils.plot_metrics(\n", - "# logdir=logdir, \n", - "# # specify which metrics we want to plot\n", - "# metrics=[\"loss\", \"accuracy01\", \"auc/_mean\", \"f1_score\", \"_base/lr\"]\n", - "# )" - ] - }, { "cell_type": "markdown", "metadata": { @@ -1116,7 +976,7 @@ "metadata": {}, "outputs": [], "source": [ - "from catalyst.dl.utils import trace\n", + "from catalyst.utils import trace\n", "\n", "if is_fp16_used:\n", " model = trace.load_traced_model(\n", @@ -1204,7 +1064,7 @@ "outputs": [], "source": [ "from collections import OrderedDict\n", - "from catalyst.dl.callbacks import CheckpointCallback\n", + "from catalyst.dl import CheckpointCallback\n", "\n", "\n", "infer_loaders = {\"infer\": loaders[\"valid\"]}\n", @@ -1365,7 +1225,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.7" } }, "nbformat": 4, diff --git a/examples/notebooks/table-data-tutorial.ipynb b/examples/notebooks/table-data-tutorial.ipynb deleted file mode 100644 index fae8aadf8e..0000000000 --- a/examples/notebooks/table-data-tutorial.ipynb +++ /dev/null @@ -1,334 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Catalyst example on table-data\n", - "@DBusAI" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from collections import OrderedDict\n", - "import numpy as np\n", - "from matplotlib.pylab import plt\n", - "%matplotlib inline\n", - "from sklearn.datasets.california_housing import fetch_california_housing\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.preprocessing import StandardScaler\n", - "from sklearn.metrics import mean_squared_error\n", - "\n", - "import torch\n", - "from torch import nn\n", - "import torch.nn.functional as F\n", - "from torch.utils.data import TensorDataset, DataLoader\n", - "\n", - "from catalyst.dl import SupervisedRunner\n", - "from catalyst.utils import set_global_seed" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Reproduce all\n", - "Catalyst provides a special utils for research results reproducibility." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "SEED=42\n", - "set_global_seed(SEED)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Get some data\n", - "In this tutorial we will use \n", - "[California dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_california_housing.html )
\n", - "Also, we split all data: 75/25 - for training /validation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "X, y = fetch_california_housing(return_X_y=True)\n", - "x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=SEED)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Dataset definition\n", - "\n", - "We have to normalize all X-data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mscl = StandardScaler()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "x_train = mscl.fit_transform(x_train)\n", - "x_test = mscl.transform(x_test)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And prepare PyTorch Datasets" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "train_ds = TensorDataset(torch.FloatTensor(x_train), torch.FloatTensor(y_train.reshape(-1,1)))\n", - "valid_ds = TensorDataset(torch.FloatTensor(x_test), torch.FloatTensor(y_test.reshape(-1,1)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### DataLoader definition\n", - "\n", - "We have to define bacth size and shuffle train data: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "batch = 128\n", - "\n", - "train_dl = DataLoader(train_ds, batch_size=batch, shuffle=True, num_workers=1)\n", - "valid_dl = DataLoader(valid_ds, batch_size=batch, shuffle=False, num_workers=1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Catalyst loader:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "data = OrderedDict()\n", - "data[\"train\"] = train_dl\n", - "data[\"valid\"] = valid_dl" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Define model\n", - "\n", - "Our Neural Network structure will be very simple. Just MLP with 40,20,1 linear layers. Also, default initialization. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class Net(nn.Module):\n", - " def __init__(self, num_features):\n", - " super(Net,self).__init__()\n", - " layers = [40, 20]\n", - " self.L1 = nn.Linear(num_features, layers[0])\n", - " torch.nn.init.xavier_uniform_(self.L1.weight) \n", - " torch.nn.init.zeros_(self.L1.bias)\n", - " \n", - " self.L2 = nn.Linear(layers[0], layers[1])\n", - " torch.nn.init.xavier_uniform_(self.L2.weight) \n", - " torch.nn.init.zeros_(self.L2.bias)\n", - " \n", - " self.L3 = nn.Linear(layers[1], 1)\n", - " torch.nn.init.xavier_uniform_(self.L3.weight) \n", - " torch.nn.init.zeros_(self.L3.bias)\n", - " def forward(self, x):\n", - " x = F.relu(self.L1(x))\n", - " x = F.relu(self.L2(x))\n", - " x = F.relu(self.L3(x))\n", - " return x" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model = Net(x_train.shape[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Default optimizer and L2 loss" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n", - "criterion = nn.MSELoss()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For model training we need SupervisedRunner and train method:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "runner = SupervisedRunner()\n", - "runner.train(\n", - " model=model,\n", - " criterion=criterion,\n", - " optimizer=optimizer,\n", - " loaders=data,\n", - " logdir=\"run\",\n", - " load_best_on_end=True,\n", - " num_epochs=20)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Inference\n", - "\n", - "Inference part is mush easier:
\n", - "/checkpoints/best.pth - is default dir for checkpoints
\n", - "run - our logdir" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "predictions = np.vstack(list(map(\n", - " lambda x: x[\"logits\"].cpu().numpy(), \n", - " runner.predict_loader(loader=data[\"valid\"])\n", - ")))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Results\n", - "\n", - "Let's calculate MSE error " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean_squared_error(y_test, predictions)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Prediction Viz\n", - "\n", - "And finally - show scatterplot for our predictions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.scatter(y_test, predictions.flatten())" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - }, - "pycharm": { - "stem_cell": { - "cell_type": "raw", - "source": [], - "metadata": { - "collapsed": false - } - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file From ded665d4b5afb1c6f37f3389bf84a4dc3fe74193 Mon Sep 17 00:00:00 2001 From: Sergey Kolesnikov Date: Sun, 18 Oct 2020 15:43:44 +0300 Subject: [PATCH 2/3] docs update --- .github/ISSUE_TEMPLATE/bug_report.md | 4 ++++ .github/ISSUE_TEMPLATE/documentation.md | 4 ++++ .github/ISSUE_TEMPLATE/feature_request.md | 4 ++++ .github/ISSUE_TEMPLATE/question.md | 4 ++++ .github/PULL_REQUEST_TEMPLATE.md | 6 +++++- .github/workflows/greetings.yml | 4 ++-- CHANGELOG.md | 5 +++-- README.md | 18 ++++++++++-------- 8 files changed, 36 insertions(+), 13 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 60213c3e43..beab074bb2 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -59,3 +59,7 @@ cuDNN version: No CUDA ### Additional context + + +PS +- [x] I know, that I could [join slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md index df7cce86dc..fa195e189d 100644 --- a/.github/ISSUE_TEMPLATE/documentation.md +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -16,3 +16,7 @@ For typos and documentation fixes, please go ahead and: 3. Submit a PR. Thank you! + + +PS +- [x] I know, that I could [join slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 96757363c2..dca79b4c7b 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -25,3 +25,7 @@ assignees: Scitator, bagxi, Ditwoo ### Additional context + + +PS +- [x] I know, that I could [join slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index d2594ce841..b1e3ddeaa2 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -52,3 +52,7 @@ cuDNN version: No CUDA ### Additional context + + +PS +- [x] I know, that I could [join slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 69234d9d37..3603747701 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -39,4 +39,8 @@ Anyone in the community is free to review the PR once the tests have passed. If we didn't discuss your PR in Github issues there's a high chance it will not be merged. - \ No newline at end of file + + + +PS +- [x] I know, that I could [join slack](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for pull request discussion. diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index c871c2dd2e..8842483cc6 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -9,5 +9,5 @@ jobs: - uses: actions/first-interaction@v1 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - issue-message: 'Hi! Thank you for your contribution! Great first issue!' - pr-message: 'Hey thanks for the pull request! Please give us a bit of time to review it!' + issue-message: 'Hi! Thank you for your contribution! Great first issue! Please [join our slack](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion (#__questions channel).' + pr-message: 'Hey thanks for the pull request! Please [join our slack](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for pull request discussion (#__questions channel).' diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b101c613d..0e361378cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,11 +13,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed -- +- update Catalyst version to `20.10.1` for tutorials ([#967](https://github.com/catalyst-team/catalyst/pull/967)) +- added link to dl-course ([#967](https://github.com/catalyst-team/catalyst/pull/967)) ### Removed -- +- several deprecated tutorials ([#967](https://github.com/catalyst-team/catalyst/pull/967)) ### Fixed diff --git a/README.md b/README.md index 5471786fad..ce35aa75d5 100644 --- a/README.md +++ b/README.md @@ -108,14 +108,16 @@ traced_model = runner.trace(loader=loaders["valid"]) ### Step by step guide 1. Start with [Catalyst 101 — Accelerated PyTorch](https://medium.com/pytorch/catalyst-101-accelerated-pytorch-bd766a556d92?source=friends_link&sk=d3dd9b2b23500eca046361187b4619ff) introduction. -2. Check [minimal examples](#minimal-examples). -3. Try [notebook tutorials with Google Colab](#tutorials). -4. Read [blogposts](#blogposts) with use-cases and guides (and Config API intro). -5. Go through advanced [classification](https://github.com/catalyst-team/classification), [detection](https://github.com/catalyst-team/detection) and [segmentation](https://github.com/catalyst-team/segmentation) pipelines with Config API. More pipelines available under [projects section](#projects). -6. Want more? See [Alchemy](https://github.com/catalyst-team/alchemy) and [Reaction](https://github.com/catalyst-team/reaction) packages. -7. For Catalyst.RL introduction, please follow [Catalyst.RL repo](https://github.com/catalyst-team/catalyst-rl). -8. If you would like to contribute to the project, follow our [contribution guidelines](https://github.com/catalyst-team/catalyst/blob/master/CONTRIBUTING.md). -If you want to support the project, feel free to donate on [patreon page](https://patreon.com/catalyst_team) or [write us]((#user-feedback)) with your proposals. +1. Check [minimal examples](#minimal-examples). +1. Try [notebook tutorials with Google Colab](#tutorials). +1. Read [blogposts](#blogposts) with use-cases and guides. +1. Learn machine learning with our ["Deep Learning with Catalyst" course](https://github.com/catalyst-team/dl-course). +1. Or go directly to advanced [classification](https://github.com/catalyst-team/classification), [detection](https://github.com/catalyst-team/detection) or [segmentation](https://github.com/catalyst-team/segmentation) pipelines with Config API. +1. Want more? See [Alchemy](https://github.com/catalyst-team/alchemy) and [Reaction](https://github.com/catalyst-team/reaction) packages. +1. RL fan? Please follow [Catalyst.RL repo](https://github.com/catalyst-team/catalyst-rl). +1. If you would like to contribute to the project, follow our [contribution guidelines](https://github.com/catalyst-team/catalyst/blob/master/CONTRIBUTING.md). +1. If you want to support the project, feel free to donate on [patreon page](https://patreon.com/catalyst_team) or [write us]((#user-feedback)) with your proposals. +1. **Finally, do not forget to [join our slack](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for collaboration**. ## Table of Contents From 1458a8c3fb0a3f689e5a593895db1c7dae60d809 Mon Sep 17 00:00:00 2001 From: Sergey Kolesnikov Date: Sun, 18 Oct 2020 15:45:01 +0300 Subject: [PATCH 3/3] docs update --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .github/ISSUE_TEMPLATE/documentation.md | 2 +- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- .github/ISSUE_TEMPLATE/question.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index beab074bb2..58b471ab88 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -62,4 +62,4 @@ cuDNN version: No CUDA PS -- [x] I know, that I could [join slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. +- [x] I know, that I could [join Catalyst slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md index fa195e189d..b115b60bba 100644 --- a/.github/ISSUE_TEMPLATE/documentation.md +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -19,4 +19,4 @@ Thank you! PS -- [x] I know, that I could [join slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. +- [x] I know, that I could [join Catalyst slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index dca79b4c7b..ad0eed26f8 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -28,4 +28,4 @@ assignees: Scitator, bagxi, Ditwoo PS -- [x] I know, that I could [join slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. +- [x] I know, that I could [join Catalyst slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index b1e3ddeaa2..b9b8e8c8dd 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -55,4 +55,4 @@ cuDNN version: No CUDA PS -- [x] I know, that I could [join slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion. +- [x] I know, that I could [join Catalyst slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion.