This is an automated email from the ASF dual-hosted git repository.

mykolabodnar pushed a commit to branch DATALAB-2409
in repository https://gitbox.apache.org/repos/asf/incubator-datalab.git

commit 7c06edf11311f977f53933aee00244f8272739a0
Merge: 645e7b6 46b6956
Author: bodnarmykola <[email protected]>
AuthorDate: Thu Sep 9 13:05:38 2021 +0300

    Merge branch 'develop' into DATALAB-2409
    
    # Conflicts:
    #   infrastructure-provisioning/scripts/deploy_datalab.py
    #   infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
    #   infrastructure-provisioning/src/general/lib/os/fab.py
    #   
infrastructure-provisioning/src/general/scripts/aws/common_create_role_policy.py
    #   infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
    #   infrastructure-provisioning/src/ssn/scripts/configure_docker.py

 .../scripts/deploy_datalab.py                      |  28 +-
 .../scripts/deploy_repository/deploy_repository.py |   2 +-
 .../scripts/jenkins/sonar.py                       |   4 +-
 .../scripts/configure_keycloak.py                  |  16 +-
 .../src/base/scripts/install_user_key.py           |   2 +-
 .../src/dataengine/scripts/configure_dataengine.py |   5 +-
 .../src/general/conf/datalab.ini                   |   8 +
 .../files/azure/deeplearning_description.json      |   2 +-
 .../src/general/lib/aws/actions_lib.py             |   7 +-
 .../src/general/lib/azure/actions_lib.py           |  77 ++-
 .../src/general/lib/os/debian/common_lib.py        | 215 ++++--
 .../src/general/lib/os/fab.py                      |  17 +-
 .../scripts/aws/common_create_role_policy.py       |   3 +-
 .../scripts/aws/dataengine-service_create.py       |   6 +-
 .../scripts/aws/dataengine-service_prepare.py      |   2 +
 .../src/general/scripts/aws/edge_configure.py      |   2 +-
 .../src/general/scripts/aws/project_prepare.py     |   4 +
 .../src/general/scripts/aws/ssn_configure.py       |  12 +
 .../src/general/scripts/aws/ssn_prepare.py         |   2 +
 .../src/general/scripts/aws/ssn_terminate.py       |  38 ++
 .../scripts/azure/common_prepare_notebook.py       |  35 +-
 .../general/scripts/azure/dataengine_configure.py  |   4 +-
 .../src/general/scripts/azure/edge_configure.py    |   2 +-
 .../src/general/scripts/azure/ssn_configure.py     |  21 +
 .../src/general/scripts/azure/ssn_terminate.py     |  38 ++
 .../src/general/scripts/gcp/edge_configure.py      |   2 +-
 .../src/general/scripts/gcp/ssn_configure.py       |  21 +
 .../src/general/scripts/gcp/ssn_terminate.py       |  22 +
 .../general/scripts/os/common_clean_instance.py    |   2 +
 .../scripts/os/update_inactivity_on_start.py       |   2 +
 .../src/ssn/scripts/configure_docker.py            |  36 +-
 .../src/ssn/scripts/configure_ui.py                |  12 +-
 .../zeppelin/scripts/configure_zeppelin_node.py    |   2 +-
 .../com/epam/datalab/dto/ResourceSysBaseDTO.java   |   8 +-
 .../com/epam/datalab/dto/base/edge/EdgeInfo.java   |   7 +-
 .../dto/exploratory/ExploratoryActionDTO.java      |   1 -
 .../response/handlers/EdgeCallbackHandler.java     |   3 +-
 .../response/handlers/ProjectCallbackHandler.java  |   1 -
 .../response/handlers/ResourceCallbackHandler.java |   1 -
 .../handlers/ResourcesStatusCallbackHandler.java   |  21 +-
 .../backendapi/domain/NotebookTemplate.java        |   5 +-
 .../backendapi/resources/ProjectResource.java      |   1 +
 .../resources/callback/ProjectCallback.java        |   6 +-
 .../resources/dto/ProjectActionFormDTO.java        |   2 +
 .../service/impl/ExploratoryServiceImpl.java       |   2 +-
 .../service/impl/LibraryServiceImpl.java           |   6 +-
 .../service/impl/ProjectServiceImpl.java           |   5 +-
 .../configuration/configuration.component.html     | 104 ++-
 .../configuration/configuration.component.scss     |  48 +-
 .../configuration/configuration.component.ts       | 203 +++---
 .../backup-dilog/backup-dilog.component.html       |  41 +-
 .../backup-dilog/backup-dilog.component.ts         |  41 +-
 .../management/endpoints/endpoints.component.html  | 122 +++-
 .../management/endpoints/endpoints.component.scss  |  24 +-
 .../management/endpoints/endpoints.component.ts    | 103 +--
 .../manage-environment-dilog.component.html        |  99 ++-
 .../manage-environment-dilog.component.scss        |   8 +-
 .../manage-environment-dilog.component.ts          |  34 +-
 .../management/management-data.service.ts          |   6 +-
 .../management-grid/management-grid.component.html | 337 +++++++---
 .../management-grid/management-grid.component.scss | 164 +++--
 .../management-grid/management-grid.component.ts   | 116 ++--
 .../management/management.component.html           |  59 +-
 .../management/management.component.ts             | 128 ++--
 .../administration/management/management.model.ts  |   1 -
 .../ssn-monitor/ssn-monitor.component.html         |   9 +-
 .../ssn-monitor/ssn-monitor.component.scss         |   8 +-
 .../ssn-monitor/ssn-monitor.component.ts           |   7 +-
 .../administration/project/project-data.service.ts |   7 -
 .../project-form/project-form.component.html       | 155 ++++-
 .../project-form/project-form.component.scss       |   8 +-
 .../project/project-form/project-form.component.ts |  15 +-
 .../project-list/project-list.component.html       | 193 +++---
 .../project-list/project-list.component.scss       |  68 +-
 .../project/project-list/project-list.component.ts |  88 +--
 .../administration/project/project.component.html  |  27 +-
 .../administration/project/project.component.ts    |  56 +-
 .../app/administration/roles/roles.component.html  | 137 ++--
 .../app/administration/roles/roles.component.scss  |  77 ++-
 .../app/administration/roles/roles.component.ts    | 259 +++++---
 .../resources/webapp/src/app/app.routing.module.ts | 153 +++--
 .../app/core/directives/click-outside.directive.ts |   9 +-
 .../src/app/core/directives/scrollTo.directive.ts  |   2 +-
 .../src/app/core/interceptors/error.interceptor.ts |   5 +-
 .../core/interceptors/http.token.interceptor.ts    |  12 +-
 ...mputationalResourceApplicationTemplate.model.ts |   7 +-
 .../models/computationalResourceImage.model.ts     |  17 +-
 .../app/core/models/resourceShapeTypes.model.ts    |   3 +-
 .../webapp/src/app/core/pipes/highlight.pipe.ts    |   7 +-
 .../app/core/pipes/lib-sort-pipe/lib-sort.pipe.ts  |   2 +-
 .../local-currency-pipe/local-currency.pipe.ts     |   4 +-
 .../src/app/core/pipes/local-date-pipe/index.ts    |   2 +-
 .../core/pipes/local-date-pipe/local-date.pipe.ts  |   6 +-
 .../core/pipes/long-date-pipe/long-date.pipe.ts    |   8 +-
 .../core/services/applicationSecurity.service.ts   |   3 +-
 .../services/applicationServiceFacade.service.ts   |   5 +-
 .../webapp/src/app/core/services/audit.service.ts  |   6 +
 .../src/app/core/services/authorization.guard.ts   |   6 +-
 .../app/core/services/bucket-browser.service.ts    |  11 +-
 .../src/app/core/services/checkParams.guard.ts     |   6 +-
 .../app/core/services/configutration.service.ts    |   3 +-
 .../src/app/core/services/healthStatus.service.ts  |  14 +-
 .../core/services/librariesInstallation.service.ts |   3 +-
 .../app/core/services/odahu-deployment.service.ts  |   8 +-
 .../src/app/core/services/progress-bar.service.ts  |   5 +-
 .../src/app/core/services/project.service.ts       |   3 +-
 .../src/app/core/services/userAccessKey.service.ts |  15 +-
 .../src/app/core/services/userResource.service.ts  |  15 +-
 .../webapp/src/app/core/util/checkUtils.ts         |   1 +
 .../webapp/src/app/core/util/compareUtils.ts       |   3 -
 .../webapp/src/app/core/util/dateUtils.ts          |   4 +-
 .../webapp/src/app/core/util/http-status-codes.ts  | 106 +--
 .../webapp/src/app/core/util/sortUtils.ts          |   3 +-
 .../webapp/src/app/login/login.component.css       |  38 +-
 .../audit/audit-grid/audit-grid.component.html     | 150 ++++-
 .../audit/audit-grid/audit-grid.component.scss     |  70 +-
 .../audit/audit-grid/audit-grid.component.ts       |  70 +-
 .../audit-toolbar/audit-toolbar.component.scss     |  11 +-
 .../audit/audit-toolbar/audit-toolbar.component.ts |   9 +-
 .../src/app/reports/audit/audit.component.ts       |   7 +-
 .../reporting-grid/reporting-grid.component.html   | 256 ++++++--
 .../reporting-grid/reporting-grid.component.scss   | 173 ++---
 .../reporting-grid/reporting-grid.component.ts     |  74 ++-
 .../app/reports/reporting/reporting.component.ts   |  39 +-
 .../src/app/reports/reporting/reporting.module.ts  |   4 +-
 .../reporting/toolbar/toolbar.component.html       |  18 +-
 .../reporting/toolbar/toolbar.component.scss       |  11 +-
 .../reports/reporting/toolbar/toolbar.component.ts |  16 +-
 .../bucket-browser/bucket-browser.component.html   | 276 ++++----
 .../bucket-browser/bucket-browser.component.scss   | 502 +++++++-------
 .../bucket-browser/bucket-browser.component.ts     | 165 ++---
 .../bucket-browser/bucket-browser.module.ts        |  46 +-
 .../bucket-confirmation-dialog.component.html      |  98 ++-
 .../bucket-confirmation-dialog.component.scss      |  93 +--
 .../bucket-confirmation-dialog.component.ts        |  13 +-
 .../bucket-browser/bucket-data.service.ts          |  83 ++-
 .../buckets-tree/bucket-tree.component.html        |  26 +-
 .../buckets-tree/bucket-tree.component.scss        |  44 +-
 .../buckets-tree/bucket-tree.component.ts          |   4 +-
 .../folder-tree/folder-tree.component.html         |  56 +-
 .../folder-tree/folder-tree.component.scss         |  45 +-
 .../folder-tree/folder-tree.component.ts           | 108 +--
 .../bucket-browser/upload-window.component.scss    |  94 +--
 .../cluster-details/cluster-details.component.html | 123 ++--
 .../cluster-details/cluster-details.component.scss |   6 +-
 .../cluster-details/cluster-details.component.ts   |  38 +-
 .../computational/cluster-details/index.ts         |  10 +-
 ...utational-resource-create-dialog.component.html | 378 ++++++-----
 ...utational-resource-create-dialog.component.scss |  43 +-
 ...mputational-resource-create-dialog.component.ts |  33 +-
 .../computational-resources-list.component.html    |  46 +-
 .../computational-resources-list.component.scss    | 208 +++---
 .../computational-resources-list.component.ts      |   2 +-
 .../ami-create-dialog.component.html               |  20 +-
 .../ami-create-dialog.component.scss               |   4 +-
 .../ami-create-dialog.component.ts                 |  15 +-
 .../cost-details-dialog.component.html             |  41 +-
 .../cost-details-dialog.component.scss             |  18 +-
 .../create-environment.component.html              | 311 +++++----
 .../create-environment.component.scss              |  11 +-
 .../create-environment.component.ts                |  31 +-
 .../detail-dialog/detail-dialog.component.html     | 378 ++++++-----
 .../detail-dialog/detail-dialog.component.scss     |  41 +-
 .../detail-dialog/detail-dialog.component.ts       |  53 +-
 .../install-libraries.component.html               | 730 +++++++++++----------
 .../install-libraries.component.scss               | 118 ++--
 .../install-libraries.component.ts                 |  68 +-
 .../install-libraries/install-libraries.model.ts   |  15 +-
 .../libraries-info.component.scss                  |  43 +-
 .../manage-ungit/manage-ungit.component.html       |  78 ++-
 .../manage-ungit/manage-ungit.component.scss       |  18 +-
 .../src/app/resources/resources-grid/index.ts      |  38 +-
 .../resources-grid/resources-grid.component.html   | 229 ++++---
 .../resources-grid/resources-grid.component.scss   | 119 ++--
 .../resources-grid/resources-grid.component.ts     | 123 ++--
 .../resources-grid/resources-grid.model.ts         |   4 +-
 .../src/app/resources/resources.component.html     |  42 +-
 .../src/app/resources/resources.component.ts       |  24 +-
 .../webapp/src/app/resources/resources.module.ts   |  30 +-
 .../resources/scheduler/scheduler.component.html   | 317 +++++----
 .../resources/scheduler/scheduler.component.scss   |  18 +-
 .../app/resources/scheduler/scheduler.component.ts |  21 +-
 .../src/app/shared/bubble/bubble.component.css     |   4 +-
 .../src/app/shared/bubble/bubble.component.ts      |  15 +-
 .../dropdown-list/dropdown-list.component.html     |   8 +-
 .../shared/form-controls/dropdowns.component.scss  |  36 +-
 .../multi-level-select-dropdown.component.html     | 167 +++--
 .../multi-level-select-dropdown.component.scss     |  72 +-
 .../multi-level-select-dropdown.component.ts       |  17 +-
 .../multi-select-dropdown.component.html           |  51 +-
 .../inform-message/inform-message.component.css    |  10 +-
 .../inform-message/inform-message.component.ts     |  12 +-
 .../src/app/shared/navbar/navbar.component.html    | 129 ++--
 .../src/app/shared/navbar/navbar.component.scss    |  56 +-
 .../src/app/shared/navbar/navbar.component.ts      |  17 +-
 .../src/app/shared/time-picker/ticker.component.ts |   9 +-
 .../app/shared/time-picker/time-cover.component.ts |   2 +-
 .../shared/time-picker/time-picker.component.scss  |  44 +-
 .../shared/time-picker/time-picker.component.ts    |  20 +-
 .../src/app/webterminal/webterminal.component.ts   |   3 +-
 .../webapp/src/assets/styles/_dialogs.scss         | 139 ++--
 .../webapp/src/assets/styles/_general.scss         | 161 +++--
 .../resources/webapp/src/assets/styles/_reset.scss |  12 +-
 .../resources/webapp/src/assets/styles/_theme.scss | 271 ++++----
 .../src/main/resources/webapp/src/styles.scss      | 116 ++--
 .../backendapi/resources/ProjectResourceTest.java  |   4 +-
 206 files changed, 6951 insertions(+), 4797 deletions(-)

diff --cc infrastructure-provisioning/scripts/deploy_datalab.py
index 5932d34,7e9541d..efeeaca
--- a/infrastructure-provisioning/scripts/deploy_datalab.py
+++ b/infrastructure-provisioning/scripts/deploy_datalab.py
@@@ -63,194 -24,165 +63,196 @@@
  import argparse
  import os
  import subprocess
 -from fabric import *
 -from invoke import task
 -
 -parser = argparse.ArgumentParser()
 -parser.add_argument('--conf_service_base_name', type=str, help='unique name 
for DataLab environment')
 -parser.add_argument('--conf_network_type', type=str, default='',
 -                    help='Define in which network DataLab will be deployed. '
 -                         'Possible options: public|private')
 -parser.add_argument('--conf_vpc_cidr', type=str, default='', help='CIDR of 
VPC')
 -parser.add_argument('--conf_vpc2_cidr', type=str, default='', help='CIDR of 
secondary VPC')
 -parser.add_argument('--conf_allowed_ip_cidr', type=str, default='', 
help='Comma-separated CIDR of IPs which will have '
 -                                                                         
'access to SSN')
 -parser.add_argument('--conf_user_subnets_range', type=str, default='', 
help='Range of subnets which will be using for '
 -                                                                            
'users environments. For example: '
 -                                                                            
'10.10.0.0/24 - 10.10.10.0/24')
 -parser.add_argument('--conf_private_subnet_prefix', type=str, default='24', 
help='Private subnet prefix')
 -parser.add_argument('--conf_additional_tags', type=str, default='', 
help='Additional tags in format '
 -                                                                         
'"Key1:Value1;Key2:Value2"')
 -parser.add_argument('--conf_image_enabled', type=str, default='', 
help='Enable or Disable creating image at first time')
 -parser.add_argument('--aws_user_predefined_s3_policies', type=str, 
default='', help='Predefined policies for users '
 -                                                                              
      'instances')
 -parser.add_argument('--aws_access_key', type=str, default='', help='AWS 
Access Key ID')
 -parser.add_argument('--aws_secret_access_key', type=str, default='', 
help='AWS Secret Access Key')
 -parser.add_argument('--aws_region', type=str, default='', help='AWS region')
 -parser.add_argument('--aws_zone', type=str, default='', help='AWS zone')
 -parser.add_argument('--azure_region', type=str, default='', help='Azure 
region')
 -parser.add_argument('--gcp_region', type=str, default='', help='GCP region')
 -parser.add_argument('--gcp_zone', type=str, default='', help='GCP zone')
 -parser.add_argument('--conf_os_family', type=str, default='',
 -                    help='Operating system type. Available options: debian, 
redhat')
 -parser.add_argument('--conf_cloud_provider', type=str, default='',
 -                    help='Where DataLab should be deployed. Available 
options: aws, azure, gcp')
 -parser.add_argument('--ssn_hosted_zone_name', type=str, default='', 
help='Name of hosted zone')
 -parser.add_argument('--ssn_hosted_zone_id', type=str, default='', help='ID of 
hosted zone')
 -parser.add_argument('--ssn_subdomain', type=str, default='', help='Subdomain 
name')
 -parser.add_argument('--ssn_assume_role_arn', type=str, default='', help='Role 
ARN for creating Route53 record in '
 -                                                                        
'different AWS account')
 -parser.add_argument('--ssl_cert_path', type=str, default='', help='Full path 
to SSL certificate')
 -parser.add_argument('--ssl_key_path', type=str, default='', help='Full path 
to key for SSL certificate')
 -parser.add_argument('--aws_vpc_id', type=str, default='', help='AWS VPC ID')
 -parser.add_argument('--conf_duo_vpc_enable', type=str, default='false', 
help='Duo VPC scheme enable(true|false)')
 -parser.add_argument('--aws_vpc2_id', type=str, default='', help='Secondary 
AWS VPC ID')
 -parser.add_argument('--aws_peering_id', type=str, default='', help='Amazon 
peering connection id')
 -parser.add_argument('--azure_vpc_name', type=str, default='', help='Azure VPC 
Name')
 -parser.add_argument('--gcp_vpc_name', type=str, default='', help='GCP VPC 
Name')
 -parser.add_argument('--aws_subnet_id', type=str, default='', help='AWS Subnet 
ID')
 -parser.add_argument('--azure_subnet_name', type=str, default='', help='Azure 
Subnet Name')
 -parser.add_argument('--gcp_subnet_name', type=str, default='', help='GCP 
Subnet Name')
 -parser.add_argument('--aws_security_groups_ids', type=str, default='', 
help='One of more comma-separated Security '
 -                                                                            
'groups IDs for SSN')
 -parser.add_argument('--azure_security_group_name', type=str, default='', 
help='One Security'
 -                                                                              
'group name for SSN')
 -parser.add_argument('--azure_edge_security_group_name', type=str, default='', 
help='One Security '
 -                                                                              
'group name for Edge node')
 -parser.add_argument('--gcp_firewall_name', type=str, default='', help='One of 
more comma-separated GCP Firewall rules '
 -                                                                      'for 
SSN')
 -parser.add_argument('--key_path', type=str, default='', help='Path to admin 
key (WITHOUT KEY NAME)')
 -parser.add_argument('--conf_key_name', type=str, default='', help='Admin key 
name (WITHOUT ".pem")')
 -parser.add_argument('--workspace_path', type=str, default='', help='Admin key 
name (WITHOUT ".pem")')
 -parser.add_argument('--conf_tag_resource_id', type=str, default='datalab', 
help='The name of user tag')
 -parser.add_argument('--conf_billing_tag', type=str, default='datalab', 
help='Billing tag')
 -parser.add_argument('--aws_ssn_instance_size', type=str, default='t2.large', 
help='The SSN instance shape')
 -parser.add_argument('--azure_ssn_instance_size', type=str, 
default='Standard_DS2_v2', help='The SSN instance shape')
 -parser.add_argument('--gcp_ssn_instance_size', type=str, 
default='n1-standard-2', help='The SSN instance shape')
 -parser.add_argument('--aws_account_id', type=str, default='', help='The ID of 
Amazon account')
 -parser.add_argument('--aws_billing_bucket', type=str, default='', help='The 
name of S3 bucket where billing reports '
 -                                                                       'will 
be placed.')
 -parser.add_argument('--aws_job_enabled', type=str, default='false', 
help='Billing format. Available options: '
 -                                                                         
'true (aws), false(epam)')
 -parser.add_argument('--aws_report_path', type=str, default='', help='The path 
to billing reports directory in S3 '
 -                                                                    'bucket')
 -parser.add_argument('--aws_permissions_boundary_arn', type=str, default='', 
help='Permission boundary to be attached to new roles')
 -parser.add_argument('--azure_resource_group_name', type=str, default='', 
help='Name of Resource group in Azure')
 -parser.add_argument('--azure_auth_path', type=str, default='', help='Full 
path to Azure credentials JSON file')
 -parser.add_argument('--azure_datalake_enable', type=str, default='', 
help='Provision DataLake storage account')
 -parser.add_argument('--azure_ad_group_id', type=str, default='', help='ID of 
Azure AD group')
 -parser.add_argument('--azure_offer_number', type=str, default='', help='Azure 
offer number')
 -parser.add_argument('--azure_currency', type=str, default='', help='Azure 
currency code')
 -parser.add_argument('--azure_locale', type=str, default='', help='Azure 
locale')
 -parser.add_argument('--azure_application_id', type=str, default='', 
help='Azure login application ID')
 -parser.add_argument('--azure_validate_permission_scope', type=str, 
default='true', help='Azure permission scope '
 -                                                                              
          'validation(true|false).')
 -parser.add_argument('--azure_oauth2_enabled', type=str, default='false', 
help='Using OAuth2 for logging in DataLab')
 -parser.add_argument('--azure_region_info', type=str, default='', help='Azure 
region info')
 -parser.add_argument('--azure_source_vpc_name', type=str, default='', 
help='Azure VPC source Name')
 -parser.add_argument('--azure_source_resource_group_name', type=str, 
default='', help='Azure source resource group')
 -parser.add_argument('--gcp_project_id', type=str, default='', help='The 
project ID in Google Cloud Platform')
 -parser.add_argument('--gcp_service_account_path', type=str, default='', 
help='The project ID in Google Cloud Platform')
 -parser.add_argument('--datalab_id', type=str, 
default="'resource_tags_user_user_tag'",
 -                    help='Column name in report file that contains '
 -                         'datalab id tag')
 -parser.add_argument('--usage_date', type=str, 
default='line_item_usage_start_date',
 -                    help='Column name in report file that contains '
 -                         'usage date tag')
 -parser.add_argument('--product', type=str, default='product_product_name',
 -                    help='Column name in report file that contains '
 -                         'product name tag')
 -parser.add_argument('--usage_type', type=str, default='line_item_usage_type',
 -                    help='Column name in report file that contains '
 -                         'usage type tag')
 -parser.add_argument('--usage', type=str, default='line_item_usage_amount',
 -                    help='Column name in report file that contains '
 -                         'usage tag')
 -parser.add_argument('--cost', type=str, default='line_item_blended_cost',
 -                    help='Column name in report file that contains cost tag')
 -parser.add_argument('--resource_id', type=str, 
default='line_item_resource_id',
 -                    help='Column name in report file that contains '
 -                         'datalab resource id tag')
 -parser.add_argument('--ldap_hostname', type=str, default='localhost', 
help='Ldap instance hostname')
 -parser.add_argument('--ldap_dn', type=str, default='dc=example,dc=com',
 -                    help='Ldap distinguished name')
 -parser.add_argument('--ldap_ou', type=str, default='ou=People', help='Ldap 
organisation unit')
 -parser.add_argument('--ldap_service_username', type=str, 
default='cn=service-user', help='Ldap service user name')
 -parser.add_argument('--ldap_service_password', type=str, 
default='service-user-password',
 -                    help='Ldap password for admin user')
 -parser.add_argument('--keycloak_realm_name', type=str, default='datalab', 
help='Keycloak Realm name')
 -parser.add_argument('--keycloak_auth_server_url', type=str, 
default='datalab', help='Keycloak auth server URL')
 -parser.add_argument('--keycloak_client_name', type=str, help='Keycloak client 
name')
 -parser.add_argument('--keycloak_client_secret', type=str, help='Keycloak 
client secret')
 -parser.add_argument('--keycloak_user', type=str, default='datalab', 
help='Keycloak user')
 -parser.add_argument('--keycloak_user_password', type=str, 
default='keycloak-user-password',
 -                    help='Keycloak user password')
 -parser.add_argument('--tags', type=str, 
default='line_item_operation,line_item_line_item_description',
 -                    help='Column name in report file that '
 -                         'contains tags')
 -parser.add_argument('--billing_dataset_name', type=str, default='', 
help='Name of GCP dataset (BigQuery service)'
 -                                                                         ' 
for billing')
 -parser.add_argument('--default_endpoint_name', type=str, default='local', 
help='Name of localhost provisioning service,'
 -                                                                              
 'that created by default')
 -parser.add_argument('--conf_stepcerts_enabled', type=str, default='false', 
help='Enable or disable step certificates')
 -parser.add_argument('--conf_stepcerts_root_ca', type=str, default='', 
help='Step root CA')
 -parser.add_argument('--conf_stepcerts_kid', type=str, default='', help='Step 
KID')
 -parser.add_argument('--conf_stepcerts_kid_password', type=str, default='', 
help='Step KID password')
 -parser.add_argument('--conf_stepcerts_ca_url', type=str, default='', 
help='Step CA URL')
 -parser.add_argument('--conf_letsencrypt_enabled', type=str, default='false',
 -                    help='Enable or disable Let`s Encrypt certificates')
 -parser.add_argument('--conf_repository_user', type=str, default='',
 -                    help='user to access repository (used for jars download)')
 -parser.add_argument('--conf_release_tag', type=str, default='2.5',
 -                    help='tag used for jars download')
 -parser.add_argument('--conf_repository_pass', type=str, default='',
 -                    help='password to access repository (used for jars 
download)')
 -parser.add_argument('--conf_repository_address', type=str, default='',
 -                    help='address to access repository (used for jars 
download)')
 -parser.add_argument('--conf_repository_port', type=str, default='',
 -                    help='port to access repository (used for jars download)')
 -parser.add_argument('--conf_download_jars', type=str, default='false',
 -                    help='whether to download jars from repository (True) or 
to build from sources (False)')
 -parser.add_argument('--conf_download_docker_images', type=str, 
default='false',
 -                    help='whether to download docker images from repository 
(True) or to build from sources (False)')
 -parser.add_argument('--conf_letsencrypt_domain_name', type=str, default='', 
help='Domain names to apply. '
 -                                                                              
   'For multiple domains enter a comma separated list of domains as a parameter'
 -                                                                              
   'ssn.domain_name will be used for ssn_node, DNS A record have to exist 
during deployment')
 -parser.add_argument('--conf_letsencrypt_email', type=str, default='', 
help='Email that will be entered during '
 -                                                                           
'certificate obtaining and can be user for urgent renewal and security notices. 
'
 -                                                                           
'Use comma to register multiple emails, e.g. 
[email protected],[email protected].')
 -parser.add_argument('--action', required=True, type=str, default='', 
choices=['build', 'deploy', 'create', 'terminate'],
 -                    help='Available options: build, deploy, create, 
terminate')
 -args = parser.parse_args()
 +import sys
 +
 +BOOL_CHOICES_LIST = ['true', 'false']
 +OS_DISTRO_LIST = ['debian', 'redhat']
 +NETWORK_TYPE_LIST = ['public', 'private']
 +
 +
 +def build_parser():
 +    parser = argparse.ArgumentParser(description='DataLab Self-Service Node 
deployment',
 +                                     prog='deploy_datalab')
 +    # optional arguments
 +    parser.add_argument('--conf_network_type', type=str, default='public',
 +                        help='''Type of network. Define in which network 
DataLab will be deployed.
 +                        (valid choices: %s)''' % NETWORK_TYPE_LIST,
 +                        choices=NETWORK_TYPE_LIST)
 +    parser.add_argument('--conf_vpc_cidr', type=str, default='172.31.0.0/16', 
help='CIDR of VPC')
 +    parser.add_argument('--conf_vpc2_cidr', type=str, help='CIDR of secondary 
VPC')
 +    parser.add_argument('--conf_allowed_ip_cidr', type=str, 
default='0.0.0.0/0',
 +                        help='Comma-separated CIDR of IPs which will have 
access to SSN')
 +    parser.add_argument('--conf_user_subnets_range', type=str,
 +                        help='''Range of subnets which will be using for 
users environments.
 +                        For example: 10.10.0.0/24 - 10.10.10.0/24''')
 +    parser.add_argument('--conf_private_subnet_prefix', type=str, 
default='24', help='Private subnet prefix')
 +    parser.add_argument('--conf_additional_tags', type=str,
 +                        help='Additional tags in format 
"Key1:Value1;Key2:Value2"')
 +    parser.add_argument('--conf_image_enabled', type=str,
 +                        help='Enable or Disable creating image at first time')
 +    parser.add_argument('--conf_os_family', type=str, default='debian', 
choices=OS_DISTRO_LIST,
 +                        help='Operating system distribution. (valid choices: 
%s)' % OS_DISTRO_LIST)
 +    parser.add_argument('--ssn_hosted_zone_name', type=str, help='Name of 
hosted zone')
 +    parser.add_argument('--ssn_hosted_zone_id', type=str, help='ID of hosted 
zone')
 +    parser.add_argument('--ssn_subdomain', type=str, help='Subdomain name')
 +    parser.add_argument('--ssl_cert_path', type=str, help='Full path to SSL 
certificate')
 +    parser.add_argument('--ssl_key_path', type=str, help='Full path to key 
for SSL certificate')
 +    parser.add_argument('--workspace_path', type=str, default='', 
help='Docker workspace path')
 +    parser.add_argument('--conf_tag_resource_id', type=str, 
default='datalab', help='The name of user tag')
 +    parser.add_argument('--conf_billing_tag', type=str, default='datalab', 
help='Billing tag')
 +    parser.add_argument('--datalab_id', type=str, 
default='resource_tags_user_user_tag',
 +                        help='Column name in report file that contains 
datalab id tag')
 +    parser.add_argument('--usage_date', type=str, 
default='line_item_usage_start_date',
 +                        help='Column name in report file that contains usage 
date tag')
 +    parser.add_argument('--product', type=str, default='product_product_name',
 +                        help='Column name in report file that contains 
product name tag')
 +    parser.add_argument('--usage_type', type=str, 
default='line_item_usage_type',
 +                        help='Column name in report file that contains usage 
type tag')
 +    parser.add_argument('--usage', type=str, default='line_item_usage_amount',
 +                        help='Column name in report file that contains usage 
tag')
 +    parser.add_argument('--cost', type=str, default='line_item_blended_cost',
 +                        help='Column name in report file that contains cost 
tag')
 +    parser.add_argument('--resource_id', type=str, 
default='line_item_resource_id',
 +                        help='Column name in report file that contains 
datalab resource id tag')
 +
 +    parser.add_argument('--tags', type=str, 
default='line_item_operation,line_item_line_item_description',
 +                        help='Column name in report file that contains tags')
 +    parser.add_argument('--conf_stepcerts_enabled', type=str, default='false',
 +                        help='Enable or disable step certificates. (valid 
choices: %s)' % BOOL_CHOICES_LIST,
 +                        choices=BOOL_CHOICES_LIST)
 +    parser.add_argument('--conf_stepcerts_root_ca', type=str, help='Step root 
CA')
 +    parser.add_argument('--conf_stepcerts_kid', type=str, help='Step KID')
 +    parser.add_argument('--conf_stepcerts_kid_password', type=str, help='Step 
KID password')
 +    parser.add_argument('--conf_stepcerts_ca_url', type=str, help='Step CA 
URL')
 +    parser.add_argument('--conf_letsencrypt_enabled', type=str, 
default='false',
 +                        help='Enable or disable Let`s Encrypt certificates. 
(valid choices: %s)' % BOOL_CHOICES_LIST,
 +                        choices=BOOL_CHOICES_LIST)
 +    parser.add_argument('--conf_letsencrypt_domain_name', type=str,
 +                        help='''Domain names to apply. For multiple domains 
enter a comma separated list of domains
 +        as a parameter. ssn.domain_name will be used for ssn_node,DNS A 
record have to exist during deployment''')
 +    parser.add_argument('--conf_letsencrypt_email', type=str, help='''Email 
that will be entered during
 +        certificate obtaining and can be user for urgent renewal and security 
notices. Use comma to register
 +        multiple emails, e.g. [email protected],[email protected].''')
 +    parser.add_argument('--conf_repository_user', type=str, default='',
 +                        help='user to access repository (used for jars 
download)')
 +    parser.add_argument('--conf_release_tag', type=str, default='2.5',
 +                        help='tag used for jars download')
 +    parser.add_argument('--conf_repository_pass', type=str, default='',
 +                        help='password to access repository (used for jars 
download)')
 +    parser.add_argument('--conf_repository_address', type=str, default='',
 +                        help='address to access repository (used for jars 
download)')
 +    parser.add_argument('--default_endpoint_name', type=str, default='local',
 +                               help='Name of localhost provisioning service, 
that created by default')
 +
 +    required_args = parser.add_argument_group('Required arguments')
 +    required_args.add_argument('--conf_service_base_name', type=str,
 +                               help='Unique name for DataLab environment', 
required=True)
 +    required_args.add_argument('--action', type=str, help='Action to perform',
 +                               choices=['build', 'deploy', 'create', 
'terminate'], required=True)
 +    required_args.add_argument('--key_path', type=str, help='Path to admin 
key (WITHOUT KEY NAME)', required=True)
 +    required_args.add_argument('--conf_key_name', type=str, help='Admin key 
name (WITHOUT ".pem")', required=True)
 +    required_args.add_argument('--keycloak_auth_server_url', type=str, 
default='datalab',
 +                               help='Keycloak auth server URL', required=True)
 +    required_args.add_argument('--keycloak_realm_name', type=str, 
help='Keycloak Realm name', required=True)
 +    required_args.add_argument('--keycloak_client_name', type=str, 
default='datalab',
 +                               help='Keycloak client name', required=True)
 +    required_args.add_argument('--keycloak_client_secret', type=str, 
default='datalab',
 +                               help='Keycloak client secret', required=True)
 +    required_args.add_argument('--keycloak_user', type=str, 
default='datalab', help='Keycloak user', required=True)
 +    required_args.add_argument('--keycloak_user_password', type=str, 
default='keycloak-user-password',
 +                               help='Keycloak user password', required=True)
 +
 +
 +    # subparsers
 +    subparsers = parser.add_subparsers(dest='conf_cloud_provider', 
required=True, help='sub-command help',
 +                                       description='''These are the 
subcommands for deploying resources
 +                                       in a specific cloud provider''')
 +
 +    # --------- aws subcommand ----------------------
 +    aws_parser = subparsers.add_parser('aws')
 +    aws_parser.add_argument('--aws_user_predefined_s3_policies', type=str,
 +                            help='Predefined policies for users instances')
 +    aws_parser.add_argument('--aws_access_key', type=str,
 +                            help='''AWS Access Key ID. reuqired in case of 
deployment with IAM user DataLab
 +                            deployment script is executed on local machine 
and uses
 +                            IAM user permissions to create resources in 
AWS.''')
 +    aws_parser.add_argument('--aws_secret_access_key', type=str, help='AWS 
Secret Access Key')
 +    aws_parser.add_argument('--aws_ssn_instance_size', type=str, 
default='t2.large',
 +                                   help='The SSN instance shape')
 +    aws_parser.add_argument('--ssn_assume_role_arn', type=str,
 +                            help='Role ARN for creating Route53 record in 
different AWS account')
 +    aws_parser.add_argument('--aws_vpc_id', type=str, help='AWS VPC ID')
 +    aws_parser.add_argument('--conf_duo_vpc_enable', type=str, 
default='false',
 +                            help='Duo VPC scheme enable. (valid choices: %s)' 
% BOOL_CHOICES_LIST,
 +                            choices=BOOL_CHOICES_LIST)
 +    aws_parser.add_argument('--aws_vpc2_id', type=str, help='Secondary AWS 
VPC ID')
 +    aws_parser.add_argument('--aws_peering_id', type=str, help='Amazon 
peering connection id')
 +    aws_parser.add_argument('--aws_subnet_id', type=str, help='AWS Subnet ID')
 +    aws_parser.add_argument('--aws_security_groups_ids', type=str,
 +                            help='One of more comma-separated Security groups 
IDs for SSN')
 +    aws_parser.add_argument('--aws_billing_bucket', type=str,
 +                            help='The name of S3 bucket where billing reports 
will be placed.')
 +    aws_parser.add_argument('--aws_job_enabled', type=str, default='false', 
choices=BOOL_CHOICES_LIST,
 +                            help='Billing format. (valid choices: %s)' % 
BOOL_CHOICES_LIST)
 +    aws_parser.add_argument('--aws_report_path', type=str, help='The path to 
billing reports directory in S3 bucket')
++    aws_parser.add_argument('--aws_permissions_boundary_arn', type=str, 
default='',
++                            help='Permission boundary to be attached to new 
roles')
 +
 +    aws_required_args = aws_parser.add_argument_group('Required arguments')
 +    aws_required_args.add_argument('--aws_region', type=str, required=True, 
help='AWS region')
 +    aws_required_args.add_argument('--aws_zone', type=str, required=True, 
help='AWS zone')
 +    aws_required_args.add_argument('--aws_account_id', type=str, 
required=True, help='The ID of Amazon account')
 +
 +    # --------azure subcommand -------------------------
 +    azure_parser = subparsers.add_parser('azure')
 +    azure_parser.add_argument('--azure_vpc_name', type=str, help='Azure VPC 
Name')
 +    azure_parser.add_argument('--azure_subnet_name', type=str, help='Azure 
Subnet Name')
 +    azure_parser.add_argument('--azure_security_group_name', type=str, 
help='One Security group name for SSN')
 +    azure_parser.add_argument('--azure_edge_security_group_name', type=str,
 +                              help='One Security group name for Edge node')
 +    azure_parser.add_argument('--azure_resource_group_name', type=str, 
help='Name of Resource group in Azure')
 +    azure_parser.add_argument('--azure_datalake_enable', type=str, 
default='false', choices=BOOL_CHOICES_LIST,
 +                              help='Provision DataLake storage account. 
(valid choices: %s)' % BOOL_CHOICES_LIST)
 +    azure_parser.add_argument('--azure_ad_group_id', type=str, help='ID of 
Azure AD group')
 +    azure_parser.add_argument('--azure_offer_number', type=str, help='Azure 
offer number')
 +    azure_parser.add_argument('--azure_currency', type=str, help='Azure 
currency code')
 +    azure_parser.add_argument('--azure_locale', type=str, help='Azure locale')
 +    azure_parser.add_argument('--azure_application_id', type=str, help='Azure 
login application ID')
 +    azure_parser.add_argument('--azure_validate_permission_scope', type=str, 
default='true',
 +                              choices=BOOL_CHOICES_LIST,
 +                              help='Azure permission scope validation. (valid 
choices: %s)' % BOOL_CHOICES_LIST)
 +    azure_parser.add_argument('--azure_oauth2_enabled', type=str, 
default='false', choices=BOOL_CHOICES_LIST,
 +                              help='Using OAuth2 for logging in DataLab. 
(valid choices: %s)' % BOOL_CHOICES_LIST)
 +    azure_parser.add_argument('--azure_region_info', type=str, help='Azure 
region info')
 +    azure_parser.add_argument('--azure_source_vpc_name', type=str, 
help='Azure VPC source Name')
 +    azure_parser.add_argument('--azure_source_resource_group_name', type=str, 
help='Azure source resource group')
 +
 +    azure_required_args = azure_parser.add_argument_group('Required 
arguments')
 +    azure_required_args.add_argument('--azure_region', type=str, 
required=True, help='Azure region')
 +    azure_required_args.add_argument('--azure_ssn_instance_size', type=str, 
default='Standard_DS2_v2', required=True,
 +                                     help='The SSN instance shape')
 +    azure_required_args.add_argument('--azure_auth_path', type=str, 
required=True,
 +                                     help='Full path to Azure credentials 
JSON file')
 +
 +    # --------gcp subcommand -----------------------------
 +    gcp_parser = subparsers.add_parser('gcp')
 +    gcp_parser.add_argument('--billing_dataset_name', type=str,
 +                            help='Name of GCP dataset (BigQuery service) for 
billing')
 +    gcp_parser.add_argument('--gcp_subnet_name', type=str, help='GCP Subnet 
Name')
 +    gcp_parser.add_argument('--gcp_vpc_name', type=str, help='GCP VPC Name')
 +    gcp_parser.add_argument('--gcp_firewall_name', type=str,
 +                            help='One of more comma-separated GCP Firewall 
rules for SSN')
 +
 +    gcp_required_args = gcp_parser.add_argument_group('Required arguments')
 +    gcp_required_args.add_argument('--gcp_region', type=str, required=True, 
help='GCP region')
 +    gcp_required_args.add_argument('--gcp_zone', type=str, required=True, 
help='GCP zone')
 +    gcp_required_args.add_argument('--gcp_ssn_instance_size', type=str, 
required=True, default='n1-standard-2',
 +                                   help='The SSN instance shape')
 +    gcp_required_args.add_argument('--gcp_project_id', type=str, 
required=True,
 +                                   help='The project ID in Google Cloud 
Platform')
 +    gcp_required_args.add_argument('--gcp_service_account_path', type=str, 
required=True,
 +                                   help='The project ID in Google Cloud 
Platform')
 +    return parser
  
  
  def generate_docker_command():
diff --cc infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
index 04fa905,1263904..37e47e5
--- a/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
@@@ -29,8 -29,114 +29,116 @@@ import o
  import time
  import subprocess
  import datalab.fab
 +from datalab.logger import logging
 +
+ def handle_dpkg_lock(error, rerun=False):
+     try:
+         count = 0
+         if 'E: Could not get lock ' and 'It is held by process ' in err:
+             log = datalab.fab.conn.sudo('cat /tmp/dpkg.log | grep "E: Could 
not get lock"').stdout
+             lock_path = log.split('\n')[0][22:log.find('.')]
+             pid = log.split('\n')[0][log.find('It is held by process ') + 
22:].split(' ')[0]
+             datalab.fab.conn.sudo('kill -9 {}'.format(pid))
+             datalab.fab.conn.sudo('rm -f {}'.format(lock_path))
+         while 'no_lock' not in error and count < 10:
+             pid = datalab.fab.conn.sudo('lsof /var/lib/dpkg/lock-frontend | 
grep dpkg | awk \'{print $2}\'').stdout.replace( '\n', '')
+             if pid != '':
+                 datalab.fab.conn.sudo('kill -9 {}'.format(pid))
+                 datalab.fab.conn.sudo('rm -f /var/lib/dpkg/lock-frontend')
+ 
+             pid = datalab.fab.conn.sudo('lsof /var/lib/dpkg/lock | grep dpkg 
| awk \'{print $2}\'').stdout.replace('\n', '')
+             if pid != '':
+                 datalab.fab.conn.sudo('kill -9 {}'.format(pid))
+                 datalab.fab.conn.sudo('rm -f /var/lib/dpkg/lock')
+ 
+             if rerun:
+                 datalab.fab.conn.sudo('dpkg --configure -a 2>&1 | tee 
/tmp/tee.tmp; '
+                                       'if ! grep -w -E "({0})" /tmp/tee.tmp; '
+                                       'then echo "no_lock" > /tmp/dpkg.log; '
+                                       'else cat /tmp/tee.tmp > 
/tmp/dpkg.log;fi; '
+                                       'if ! grep -w -E "({1})" /tmp/tee.tmp; '
+                                       'then echo "no_error" >> /tmp/dpkg.log; 
'
+                                       'else cat /tmp/tee.tmp >> 
/tmp/dpkg.log;fi'.format(lock_parser,
+                                                                               
          error_parser))
+                 error = datalab.fab.conn.sudo('cat /tmp/dpkg.log').stdout
+             else:
+                 error = 'no_lock'
+             count = count + 1
+         if 'no_error' not in error:
+             raise Exception
+     except:
+         sys.exit(1)
+ 
+ def handle_apt_lock(error, rerun=False):
+     try:
+         count = 0
+         if 'E: Could not get lock ' and 'It is held by process ' in err:
+             log = datalab.fab.conn.sudo('cat /tmp/apt.log | grep "E: Could 
not get lock"').stdout
+             lock_path = log.split('\n')[0][22:log.find('.')]
+             pid = log.split('\n')[0][log.find('It is held by process ') + 
22:].split(' ')[0]
+             datalab.fab.conn.sudo('kill -9 {}'.format(pid))
+             datalab.fab.conn.sudo('rm -f {}'.format(lock_path))
+         while 'no_lock' not in error and count < 10:
+             pid = datalab.fab.conn.sudo('lsof /var/lib/apt/lists/lock | grep 
apt | awk \'{print $2}\'').stdout.replace('\n', '')
+             if pid != '':
+                 datalab.fab.conn.sudo('kill -9 {}'.format(pid))
+                 datalab.fab.conn.sudo('rm -f /var/lib/apt/lists/lock')
+ 
+             if rerun:
+                 datalab.fab.conn.sudo('apt update 2>&1 | tee /tmp/tee.tmp; '
+                                       'if ! grep -w -E "({0})" /tmp/tee.tmp; '
+                                       'then echo "no_lock" > /tmp/apt.log; '
+                                       'else cat /tmp/tee.tmp > 
/tmp/apt.log;fi; '
+                                       'if ! grep -w -E "({1})" /tmp/tee.tmp; '
+                                       'then echo "no_error" >> /tmp/apt.log; '
+                                       'else cat /tmp/tee.tmp >> 
/tmp/apt.log;fi'.format(lock_parser,
+                                                                               
         error_parser))
+                 error = datalab.fab.conn.sudo('cat /tmp/apt.log').stdout
+             else:
+                 error = 'no_lock'
+             count = count + 1
+         if 'no_error' not in error:
+             raise Exception
+     except:
+         sys.exit(1)
+ 
+ def handle_apt_get_lock(error, rerun=False):
+     try:
+         count = 0
+         if 'E: Could not get lock ' and 'It is held by process ' in err:
+             log = datalab.fab.conn.sudo('cat /tmp/apt.log | grep "E: Could 
not get lock"').stdout
+             lock_path = log.split('\n')[0][22:log.find('.')]
+             pid = log.split('\n')[0][log.find('It is held by process ') + 
22:].split(' ')[0]
+             datalab.fab.conn.sudo('kill -9 {}'.format(pid))
+             datalab.fab.conn.sudo('rm -f {}'.format(lock_path))
+         while 'no_lock' not in error and count < 10:
+             datalab.fab.conn.sudo('lsof /var/lib/dpkg/lock')
+             datalab.fab.conn.sudo('lsof /var/lib/apt/lists/lock')
+             datalab.fab.conn.sudo('lsof /var/cache/apt/archives/lock')
+             datalab.fab.conn.sudo('rm -f /var/lib/apt/lists/lock')
+             datalab.fab.conn.sudo('rm -f /var/cache/apt/archives/lock')
+             datalab.fab.conn.sudo('rm -f /var/lib/dpkg/lock')
+ 
+             if rerun:
+                 datalab.fab.conn.sudo('apt-get {0} {1} 2>&1 | tee 
/tmp/tee.tmp; '
+                                       'if ! grep -w -E "({2})" /tmp/tee.tmp; '
+                                       'then echo "no_lock" > 
/tmp/apt_get.log; '
+                                       'else cat /tmp/tee.tmp > 
/tmp/apt_get.log;fi; '
+                                       'if ! grep -w -E "({3})" /tmp/tee.tmp; '
+                                       'then echo "no_error" >> 
/tmp/apt_get.log; '
+                                       'else cat /tmp/tee.tmp >> 
/tmp/apt_get.log;fi'.format(command,
+                                                                               
             requisites,
+                                                                               
             lock_parser,
+                                                                               
             error_parser))
+                 error = datalab.fab.conn.sudo('cat /tmp/apt_get.log').stdout
+             else:
+                 error = 'no_lock'
+             count = count + 1
+         if 'no_error' not in error:
+             raise Exception
+     except:
+         sys.exit(1)
+ 
  def manage_pkg(command, environment, requisites):
      try:
          allow = False
diff --cc infrastructure-provisioning/src/general/lib/os/fab.py
index 707bc60,435efd8..c14a867
--- a/infrastructure-provisioning/src/general/lib/os/fab.py
+++ b/infrastructure-provisioning/src/general/lib/os/fab.py
@@@ -38,275 -38,6 +38,278 @@@ from fabric import 
  from patchwork.files import exists
  from patchwork import files
  
 +
 +# general functions for all resources
 +def init_datalab_connection(hostname, username, keyfile):
 +    try:
 +        global conn
 +        attempt = 0
 +        while attempt < 15:
 +            logging.info('connection attempt {}'.format(attempt))
 +            conn = Connection(host=hostname, user=username, 
connect_kwargs={'banner_timeout': 200,
 +                                                                            
'key_filename': keyfile})
 +            conn.config.run.echo = True
 +            try:
-                 conn.run('ls')
++                conn.run('hostname')
 +                conn.config.run.echo = True
 +                return conn
 +            except:
 +                attempt += 1
 +                time.sleep(10)
++        if attempt == 15:
++            logging.info('Unable to establish connection')
++            raise Exception
 +    except Exception as err:
 +        logging.error('Function init_datalab_connection error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def ensure_pip(requisites):
 +    try:
 +        if not exists(conn, 
'/home/{}/.ensure_dir/pip_path_added'.format(os.environ['conf_os_user'])):
 +            conn.sudo('bash -l -c "echo 
PATH=$PATH:/usr/local/bin/:/opt/spark/bin/ >> /etc/profile"')
 +            conn.sudo('bash -l -c "echo export PATH >> /etc/profile"')
 +            conn.sudo('pip3 install -UI pip=={} 
--no-cache-dir'.format(os.environ['conf_pip_version']))
 +            conn.sudo('pip3 install -U 
setuptools=={}'.format(os.environ['notebook_setuptools_version']))
 +            conn.sudo('pip3 install -UI {} --no-cache-dir'.format(requisites))
 +            conn.sudo('touch 
/home/{}/.ensure_dir/pip_path_added'.format(os.environ['conf_os_user']))
 +    except Exception as err:
 +        logging.error('Function ensure_pip error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def id_generator(size=10, chars=string.digits + string.ascii_letters):
 +    return ''.join(random.choice(chars) for _ in range(size))
 +
 +
 +def replace_multi_symbols(string, symbol, symbol_cut=False):
 +    try:
 +        symbol_amount = 0
 +        for i in range(len(string)):
 +            if string[i] == symbol:
 +                symbol_amount = symbol_amount + 1
 +        while symbol_amount > 1:
 +            string = string.replace(symbol + symbol, symbol)
 +            symbol_amount = symbol_amount - 1
 +        if symbol_cut and string[-1] == symbol:
 +            string = string[:-1]
 +        return string
 +    except Exception as err:
 +        logging.error('Function replace_multi_symbols error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def append_result(error, exception=''):
 +    try:
 +        ts = time.time()
 +        st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
 +        if exception:
 +            error_message = "[Error-{}]: {}. Exception: {}".format(st, error, 
str(exception))
 +            logging.error(error_message)
 +        else:
 +            error_message = "[Error-{}]: {}.".format(st, error)
 +            logging.error(error_message)
 +        with open('/root/result.json', 'a+') as f:
 +            text = f.read()
 +        if len(text) == 0:
 +            res = '{"error": ""}'
 +            with open('/root/result.json', 'w') as f:
 +                f.write(res)
 +        with open("/root/result.json") as f:
 +            data = json.load(f)
 +        data['error'] = data['error'] + error_message
 +        with open("/root/result.json", 'w') as f:
 +            json.dump(data, f)
 +        logging.error(data)
 +    except Exception as err:
 +        logging.error('Function append_result error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def put_resource_status(resource, status, datalab_path, os_user, hostname):
 +    try:
 +        keyfile = os.environ['conf_key_dir'] + os.environ['conf_key_name'] + 
".pem"
 +        init_datalab_connection(hostname, os_user, keyfile)
 +        conn.sudo(
 +            'python3 ' + datalab_path + 'tmp/resource_status.py --resource {} 
--status {}'.format(resource, status))
 +        conn.close()
 +    except Exception as err:
 +        logging.error('Function put_resource_status error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def ensure_ciphers():
 +    try:
 +        conn.sudo(
 +            '''bash -c "echo -e '\nKexAlgorithms 
[email protected],diffie-hellman-group-exchange-sha256' >> 
/etc/ssh/sshd_config"''')
 +        conn.sudo(
 +            '''bash -c "echo -e 'Ciphers 
[email protected],[email protected],[email protected],aes256-ctr,aes192-ctr,aes128-ctr'
 >> /etc/ssh/sshd_config"''')
 +        conn.sudo(
 +            '''bash -c "echo -e '\tKexAlgorithms 
[email protected],diffie-hellman-group-exchange-sha256' >> 
/etc/ssh/ssh_config"''')
 +        conn.sudo(
 +            '''bash -c "echo -e '\tCiphers 
[email protected],[email protected],[email protected],aes256-ctr,aes192-ctr,aes128-ctr'
 >> /etc/ssh/ssh_config"''')
 +        try:
 +            conn.sudo('service ssh reload')
 +        except:
 +            conn.sudo('service sshd reload')
 +    except Exception as err:
 +        logging.error('Function pensure_ciphers error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def manage_npm_pkg(command):
 +    try:
 +        npm_count = 0
 +        installed = False
 +        npm_registry = ['https://registry.npmjs.org/', 
'https://registry.npmjs.com/']
 +        while not installed:
 +            if npm_count > 60:
 +                logging.error("NPM registry is not available, please try 
later")
 +                sys.exit(1)
 +            else:
 +                try:
 +                    if npm_count % 2 == 0:
 +                        conn.sudo('npm config set registry 
{}'.format(npm_registry[0]))
 +                    else:
 +                        conn.sudo('npm config set registry 
{}'.format(npm_registry[1]))
 +                    conn.sudo('{}'.format(command))
 +                    installed = True
 +                except:
 +                    npm_count += 1
 +                    time.sleep(50)
 +    except Exception as err:
 +        logging.error('Function manage_npm_pkg error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def update_hosts_file(os_user):
 +    try:
 +        if not exists(conn, 
'/home/{}/.ensure_dir/hosts_file_updated'.format(os_user)):
 +            conn.sudo('sed -i "s/^127.0.0.1 localhost/127.0.0.1 localhost 
localhost.localdomain/g" /etc/hosts')
 +            conn.sudo('touch 
/home/{}/.ensure_dir/hosts_file_updated'.format(os_user))
 +    except Exception as err:
 +        logging.error('Function update_hosts_file error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def install_certbot(os_user):
 +    try:
 +        if not exists(datalab.fab.conn, 
'/home/{}/.ensure_dir/certbot_ensured'.format(os_user)):
 +            datalab.fab.conn.sudo('snap install core')
 +            datalab.fab.conn.sudo('snap refresh core')
 +            datalab.fab.conn.sudo('snap install --classic certbot')
 +            datalab.fab.conn.sudo('ln -s /snap/bin/certbot /usr/bin/certbot')
 +            datalab.fab.conn.sudo('touch 
/home/{}/.ensure_dir/certbot_ensured'.format(os_user))
 +    except Exception as err:
 +        logging.error('Installing Certbot error: ' + str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def run_certbot(domain_name, node, email=''):
 +    try:
 +        if node == 'ssn':
 +            datalab.fab.conn.sudo('service nginx stop')
 +        else:
 +            datalab.fab.conn.sudo('service openresty stop')
 +        if email != '':
 +            datalab.fab.conn.sudo('certbot certonly --standalone -n -d {}.{} 
-m {} --agree-tos'.format(node,
 +                                                                              
                         domain_name,
 +                                                                              
                         email))
 +        else:
 +            datalab.fab.conn.sudo('certbot certonly --standalone -n -d {}.{} 
--register-unsafely-without-email '
 +                                  '--agree-tos'.format(node, domain_name))
 +    except Exception as err:
 +        logging.error('Running Certbot error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def configure_nginx_LE(domain_name, node):
 +    try:
 +        server_name_line ='    server_name {}.{};'.format(node, domain_name)
 +        cert_path_line = '    ssl_certificate  
/etc/letsencrypt/live/{}.{}/fullchain.pem;'.format(node, domain_name)
 +        cert_key_line = '    ssl_certificate_key 
/etc/letsencrypt/live/{}.{}/privkey.pem;'.format(node, domain_name)
 +        #certbot_service = "ExecStart = /usr/bin/certbot -q renew --pre-hook 
'service nginx stop' --post-hook 'service nginx start'"
 +        #certbot_service_path = '/lib/systemd/system/certbot.service'
 +        if node == 'ssn':
 +            nginx_config_path = '/etc/nginx/conf.d/nginx_proxy.conf'
 +        else:
 +            nginx_config_path = 
'/usr/local/openresty/nginx/conf/conf.d/proxy.conf'
 +        datalab.fab.conn.sudo('sed -i "s|.*    server_name .*|{}|" 
{}'.format(server_name_line, nginx_config_path))
 +        datalab.fab.conn.sudo('sed -i "s|.*    ssl_certificate .*|{}|" 
{}'.format(cert_path_line, nginx_config_path))
 +        datalab.fab.conn.sudo('sed -i "s|.*    ssl_certificate_key .*|{}|" 
{}'.format(cert_key_line, nginx_config_path))
 +        #datalab.fab.conn.sudo('sed -i "s|.*ExecStart.*|{}|" 
{}'.format(certbot_service, certbot_service_path))
 +        if node == 'ssn':
 +            datalab.fab.conn.sudo('systemctl restart nginx')
 +        else:
 +            datalab.fab.conn.sudo('systemctl restart openresty')
 +    except Exception as err:
 +        logging.error('Configuring Nginx Let’s Encrypt certs error:', 
str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +#function for edge node only
 +def configure_http_proxy_server(config):
 +    try:
 +        if not exists(datalab.fab.conn,'/tmp/http_proxy_ensured'):
 +            manage_pkg('-y install', 'remote', 'squid')
 +            template_file = config['template_file']
 +            proxy_subnet = config['exploratory_subnet']
 +            conn.put(template_file, '/tmp/squid.conf')
 +            conn.sudo('\cp /tmp/squid.conf /etc/squid/squid.conf')
 +            conn.sudo('sed -i "s|PROXY_SUBNET|{}|g" 
/etc/squid/squid.conf'.format(proxy_subnet))
 +            replace_string = ''
 +            for cidr in config['vpc_cidrs']:
 +                replace_string += 'acl AWS_VPC_CIDR dst {}\\n'.format(cidr)
 +            conn.sudo('sed -i "s|VPC_CIDRS|{}|g" 
/etc/squid/squid.conf'.format(replace_string))
 +            replace_string = ''
 +            for cidr in config['allowed_ip_cidr']:
 +                replace_string += 'acl AllowedCIDRS src {}\\n'.format(cidr)
 +            conn.sudo('sed -i "s|ALLOWED_CIDRS|{}|g" 
/etc/squid/squid.conf'.format(replace_string))
 +            conn.sudo('systemctl restart squid')
 +            fab.conn.sudo('touch /tmp/http_proxy_ensured')
 +    except Exception as err:
 +        logging.error('Fai to install and configure squid:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def configure_nftables(config):
 +    try:
 +        if not exists(datalab.fab.conn,'/tmp/nftables_ensured'):
 +            manage_pkg('-y install', 'remote', 'nftables')
 +            conn.sudo('systemctl enable nftables.service')
 +            conn.sudo('systemctl start nftables')
 +            conn.sudo('sysctl net.ipv4.ip_forward=1')
 +            if os.environ['conf_cloud_provider'] == 'aws':
 +                interface = 'eth0'
 +            elif os.environ['conf_cloud_provider'] == 'gcp':
 +                interface = 'ens4'
 +            conn.sudo('sed -i 
\'s/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g\' /etc/sysctl.conf')
 +            conn.sudo('sed -i \'s/EDGE_IP/{}/g\' 
/opt/datalab/templates/nftables.conf'.format(config['edge_ip']))
 +            conn.sudo('sed -i "s|INTERFACE|{}|g" 
/opt/datalab/templates/nftables.conf'.format(interface))
 +            conn.sudo(
 +                'sed -i "s|SUBNET_CIDR|{}|g" 
/opt/datalab/templates/nftables.conf'.format(config['exploratory_subnet']))
 +            conn.sudo('cp /opt/datalab/templates/nftables.conf /etc/')
 +            conn.sudo('systemctl restart nftables')
 +            conn.sudo('touch /tmp/nftables_ensured')
 +    except Exception as err:
 +        logging.error('Failed to configure nftables:', (err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +# functions for all computation resources
  def ensure_python_venv(python_venv_version):
      try:
          if not exists(conn, 
'/opt/python/python{}'.format(python_venv_version)):
@@@ -737,274 -791,7 +740,274 @@@ def install_inactivity_checker(os_user
              conn.sudo('systemctl start inactive.timer')
              conn.sudo('touch 
/home/{}/.ensure_dir/inactive_ensured'.format(os_user))
          except Exception as err:
 -            print('Failed to setup inactivity check service!', str(err))
 +            logging.error('Function install_inactivity_checker error:', 
str(err))
 +            traceback.print_exc()
 +            sys.exit(1)
 +
 +
 +def get_spark_memory(creds=False, os_user='', hostname='', keyfile=''):
 +    try:
 +        if creds:
 +            con = init_datalab_connection(hostname, os_user, keyfile)
 +            mem = con.sudo('free -m | grep Mem | tr -s " " ":" | cut -f 2 -d 
":"').stdout.replace('\n', '')
 +            instance_memory = int(mem)
 +        else:
 +            mem = conn.sudo('free -m | grep Mem | tr -s " " ":" | cut -f 2 -d 
":"').stdout.replace('\n', '')
 +            instance_memory = int(mem)
 +        spark_memory = round(instance_memory * 90 / 100)
 +        return spark_memory
 +    except Exception as err:
 +        logging.error('Function install_inactivity_checker error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +# functions for dataengine/dataengine-service resources
 +def ensure_dataengine_service_devtools():
 +    try:
 +        if not exists(conn, 
'/home/{}/dataengine-service-devtools-ensured'.format(os.environ['conf_os_user'])):
 +            if os.environ['conf_cloud_provider'] in 'aws':
 +                manage_pkg('-y install', 'remote', 'libcurl libcurl-devel')
 +            elif (os.environ['conf_cloud_provider'] in 'gcp') and (
 +                    '-w-' in conn.sudo('hostname').stdout.replace('\n', '')):
 +                # manage_pkg('-y build-dep', 'remote', 'libcurl4-gnutls-dev 
libxml2-dev')
 +                manage_pkg('-y install', 'remote', 'libxml2-dev 
libcurl4-openssl-dev pkg-config')
 +            conn.sudo('R -e "install.packages(\'devtools\', repos = 
\'cloud.r-project.org\')"')
 +            if (os.environ['conf_cloud_provider'] in 'gcp') and (
 +                    "R_LIBS_SITE" not in conn.sudo('cat 
/opt/conda/miniconda3/lib/R/etc/Renviron').stdout):
 +                conn.sudo(
 +                    '''bash -l -c 'echo 
"R_LIBS_SITE=${R_LIBS_SITE-'/usr/local/lib/R/site-library:/usr/lib/R/site-library:/usr/lib/R/library'}"
 >> /opt/conda/miniconda3/lib/R/etc/Renviron' ''')
 +            conn.sudo('touch 
/home/{}/dataengine-service-devtools-ensured'.format(os.environ['conf_os_user']))
 +    except Exception as err:
 +        logging.error('Function ensure_dataengine_service_devtools error:', 
str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def configure_data_engine_service_livy(hostname, os_user, keyfile):
 +    try:
 +        init_datalab_connection(hostname, os_user, keyfile)
 +        if exists(conn, '/usr/local/lib/livy'):
 +            conn.sudo('rm -r /usr/local/lib/livy')
-         conn.sudo('wget -P /tmp/  --user={} --password={} '
-                   '{}/repository/packages/livy.tar.gz --no-check-certificate'
-                   .format(os.environ['conf_repository_user'],
-                           os.environ['conf_repository_pass'], 
os.environ['conf_repository_address']))
++        conn.sudo('wget -P /tmp/ 
https://nexus.develop.dlabanalytics.com/repository/packages-public/livy.tar.gz '
++                  '--no-check-certificate')
 +        conn.sudo('tar -xzvf /tmp/livy.tar.gz -C /usr/local/lib/')
 +        conn.sudo('ln -s /usr/local/lib/incubator-livy /usr/local/lib/livy')
 +        conn.put('/root/templates/dataengine-service_livy-env.sh', 
'/usr/local/lib/livy/conf/livy-env.sh')
 +        conn.put('/root/templates/dataengine-service_livy.service', 
'/tmp/livy.service')
 +        conn.sudo("sed -i 's|OS_USER|{}|' /tmp/livy.service".format(os_user))
 +        conn.sudo('mv /tmp/livy.service /etc/systemd/system/livy.service')
 +        conn.sudo('systemctl daemon-reload')
 +        conn.sudo('systemctl enable livy.service')
 +        conn.sudo('systemctl start livy.service')
 +        conn.close()
 +    except Exception as err:
 +        logging.error('Function configure_data_engine_service_livy error:', 
str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def remove_rstudio_dataengines_kernel(cluster_name, os_user):
 +    try:
 +        cluster_re = ['-{}"'.format(cluster_name),
 +                      '-{}-'.format(cluster_name),
 +                      '-{}/'.format(cluster_name)]
 +        conn.get('/home/{}/.Rprofile'.format(os_user), 'Rprofile')
 +        data = open('Rprofile').read()
 +        conf = filter(None, data.split('\n'))
 +        # Filter config from any math of cluster_name in line,
 +        # separated by defined symbols to avoid partly matches
 +        conf = [i for i in conf if not any(x in i for x in cluster_re)]
 +        comment_all = lambda x: x if x.startswith('#master') else 
'#{}'.format(x)
 +        uncomment = lambda x: x[1:] if not x.startswith('#master') else x
 +        conf = [comment_all(i) for i in conf]
 +        conf = [uncomment(i) for i in conf]
 +        last_spark = max([conf.index(i) for i in conf if 'master=' in i] or 
[0])
 +        active_cluster = conf[last_spark].split('"')[-2] if last_spark != 0 
else None
 +        conf = conf[:last_spark] + [conf[l][1:] for l in range(last_spark, 
len(conf)) if conf[l].startswith("#")] \
 +               + [conf[l] for l in range(last_spark, len(conf)) if not 
conf[l].startswith('#')]
 +        with open('.Rprofile', 'w') as f:
 +            for line in conf:
 +                f.write('{}\n'.format(line))
 +        conn.put('.Rprofile', '/home/{}/.Rprofile'.format(os_user))
 +        conn.get('/home/{}/.Renviron'.format(os_user), 'Renviron')
 +        data = open('Renviron').read()
 +        conf = filter(None, data.split('\n'))
 +        comment_all = lambda x: x if x.startswith('#') else '#{}'.format(x)
 +        conf = [comment_all(i) for i in conf]
 +        # Filter config from any math of cluster_name in line,
 +        # separated by defined symbols to avoid partly matches
 +        conf = [i for i in conf if not any(x in i for x in cluster_re)]
 +        if active_cluster:
 +            activate_cluster = lambda x: x[1:] if active_cluster in x else x
 +            conf = [activate_cluster(i) for i in conf]
 +        else:
 +            last_spark = max([conf.index(i) for i in conf if 'SPARK_HOME' in 
i])
 +            conf = conf[:last_spark] + [conf[l][1:] for l in 
range(last_spark, len(conf)) if conf[l].startswith("#")]
 +        with open('.Renviron', 'w') as f:
 +            for line in conf:
 +                f.write('{}\n'.format(line))
 +        conn.put('.Renviron', '/home/{}/.Renviron'.format(os_user))
 +        if len(conf) == 1:
 +            conn.sudo('rm -f 
/home/{}/.ensure_dir/rstudio_dataengine_ensured'.format(os_user))
 +            conn.sudo('rm -f 
/home/{}/.ensure_dir/rstudio_dataengine-service_ensured'.format(os_user))
 +        conn.sudo('''R -e "source('/home/{}/.Rprofile')"'''.format(os_user))
 +    except Exception as err:
 +        logging.error('Function remove_rstudio_dataengines_kernel error:', 
str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +#following function should be checked if it needed
 +def configure_data_engine_service_pip(hostname, os_user, keyfile, emr=False):
 +    try:
 +        init_datalab_connection(hostname, os_user, keyfile)
 +        # datalab.common_lib.manage_pkg('-y install', 'remote', 'python3-pip')
 +        if not exists(conn, '/usr/bin/pip3') and conn.sudo("python3.9 -V 
2>/dev/null | awk '{print $2}'").stdout:
 +            conn.sudo('ln -s /usr/bin/pip-3.9 /usr/bin/pip3')
 +        elif not exists(conn, '/usr/bin/pip3') and conn.sudo("python3.8 -V 
2>/dev/null | awk '{print $2}'").stdout:
 +            conn.sudo('ln -s /usr/bin/pip-3.8 /usr/bin/pip3')
 +        elif not exists(conn, '/usr/bin/pip3') and conn.sudo("python3.7 -V 
2>/dev/null | awk '{print $2}'").stdout:
 +            conn.sudo('ln -s /usr/bin/pip-3.7 /usr/bin/pip3')
 +        elif not exists(conn, '/usr/bin/pip3') and conn.sudo("python3.6 -V 
2>/dev/null | awk '{print $2}'").stdout:
 +            conn.sudo('ln -s /usr/bin/pip-3.6 /usr/bin/pip3')
 +        elif not exists(conn, '/usr/bin/pip3') and conn.sudo("python3.5 -V 
2>/dev/null | awk '{print $2}'").stdout:
 +            conn.sudo('ln -s /usr/bin/pip-3.5 /usr/bin/pip3')
 +        if emr:
 +            conn.sudo('pip3 install -U 
pip=={}'.format(os.environ['conf_pip_version']))
 +            conn.sudo('ln -s /usr/local/bin/pip3.7 /bin/pip3.7')
 +        conn.sudo('''bash -c -l 'echo "export PATH=$PATH:/usr/local/bin" >> 
/etc/profile' ''')
 +        conn.sudo('bash -c -l "source /etc/profile"')
 +        conn.run('bash -c -l "source /etc/profile"')
 +        conn.close()
 +    except Exception as err:
 +        logging.error('Function configure_data_engine_service_pip error:', 
str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +## following function should be removed after configurion kernels connection 
via live for all computation resources:
 +def dataengine_dir_prepare(cluster_dir):
 +    try:
 +        subprocess.run('mkdir -p ' + cluster_dir, shell=True, check=True)
 +    except Exception as err:
 +        logging.error('Function dataengine_dir_prepare error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def ensure_dataengine_tensorflow_jars(jars_dir):
 +    subprocess.run('wget 
https://dl.bintray.com/spark-packages/maven/tapanalyticstoolkit/spark-tensorflow-connector/'
 +                   '1.0.0-s_2.11/spark-tensorflow-connector-1.0.0-s_2.11.jar'
 +                   ' -O 
{}spark-tensorflow-connector-1.0.0-s_2.11.jar'.format(jars_dir), shell=True, 
check=True)
 +
 +
 +def prepare(dataengine_service_dir, yarn_dir):
 +    try:
 +        subprocess.run('mkdir -p ' + dataengine_service_dir, shell=True, 
check=True)
 +        subprocess.run('mkdir -p ' + yarn_dir, shell=True, check=True)
 +        subprocess.run('sudo mkdir -p /opt/python/', shell=True, check=True)
 +        result = os.path.exists(dataengine_service_dir + 'usr/')
 +        return result
 +    except Exception as err:
 +        logging.error('Function prepare error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def configuring_notebook(dataengine_service_version):
 +    try:
 +        jars_path = '/opt/' + dataengine_service_version + '/jars/'
 +        subprocess.run("""sudo bash -c "find """ + jars_path + """ -name 
'*netty*' | xargs rm -f" """, shell=True,
 +                       check=True)
 +    except Exception as err:
 +        logging.error('Function configuring_notebook error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +def find_cluster_kernels():
 +    try:
 +        de = [i for i in conn.sudo(
 +            '''bash -l -c 'find /opt/ -maxdepth 1 -name "*-de-*" -type d | 
rev | cut -f 1 -d "/" | rev | xargs -r' ''').stdout.replace(
 +            '\n', '').split(' ') if i != '']
 +        des = [i for i in conn.sudo(
 +            '''bash -l -c 'find /opt/ -maxdepth 2 -name "*-des-*" -type d | 
rev | cut -f 1,2 -d "/" | rev | xargs -r' ''').stdout.replace(
 +            '\n', '').split(' ') if i != '']
 +        return (de, des)
 +    except Exception as err:
 +        logging.error('Function find_cluster_kernels error:', str(err))
 +        traceback.print_exc()
 +        sys.exit(1)
 +
 +
 +# functions for jupyter deeplearning and tensor notebooks
 +def configure_jupyter(os_user, jupyter_conf_file, templates_dir, 
jupyter_version, exploratory_name):
 +    if not exists(conn, '/home/' + os_user + '/.ensure_dir/jupyter_ensured'):
 +        try:
 +            if os.environ['conf_deeplearning_cloud_ami'] == 'false' or 
os.environ['application'] != 'deeplearning':
 +                conn.sudo('pip3 install notebook=={} 
--no-cache-dir'.format(jupyter_version))
 +                conn.sudo('pip3 install jupyter --no-cache-dir')
 +                conn.sudo('rm -rf {}'.format(jupyter_conf_file))
 +            conn.run('jupyter notebook --generate-config --config 
{}'.format(jupyter_conf_file))
 +            conn.run('mkdir -p ~/.jupyter/custom/')
 +            conn.run('echo "#notebook-container { width: auto; }" > 
~/.jupyter/custom/custom.css')
 +            conn.sudo('echo "c.NotebookApp.ip = \'0.0.0.0\'" >> 
{}'.format(jupyter_conf_file))
 +            conn.sudo('echo "c.NotebookApp.base_url = \'/{0}/\'" >> 
{1}'.format(exploratory_name, jupyter_conf_file))
 +            conn.sudo('echo c.NotebookApp.open_browser = False >> 
{}'.format(jupyter_conf_file))
 +            conn.sudo('echo \'c.NotebookApp.cookie_secret = b"{0}"\' >> 
{1}'.format(id_generator(), jupyter_conf_file))
 +            conn.sudo('''echo "c.NotebookApp.token = u''" >> 
{}'''.format(jupyter_conf_file))
 +            conn.sudo('echo \'c.KernelSpecManager.ensure_native_kernel = 
False\' >> {}'.format(jupyter_conf_file))
 +            if os.environ['conf_deeplearning_cloud_ami'] == 'true' and 
os.environ['application'] == 'deeplearning':
 +                conn.sudo(
 +                    '''echo "c.NotebookApp.kernel_spec_manager_class = 
'environment_kernels.EnvironmentKernelSpecManager'" >> {}'''.format(
 +                        jupyter_conf_file))
 +                conn.sudo(
 +                    '''echo 
"c.EnvironmentKernelSpecManager.conda_env_dirs=['/home/ubuntu/anaconda3/envs']" 
>> {}'''.format(
 +                        jupyter_conf_file))
 +            conn.put(templates_dir + 'jupyter-notebook.service', 
'/tmp/jupyter-notebook.service')
 +            conn.sudo("chmod 644 /tmp/jupyter-notebook.service")
 +            if os.environ['application'] == 'tensor':
 +                conn.sudo(
 +                    "sed -i '/ExecStart/s|-c \"|-c \"export 
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64; |g' 
/tmp/jupyter-notebook.service")
 +            elif os.environ['application'] == 'deeplearning' and 
os.environ['conf_deeplearning_cloud_ami'] == 'false':
 +                conn.sudo(
 +                    "sed -i '/ExecStart/s|-c \"|-c \"export 
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/cudnn/lib64:/usr/local/cuda/lib64:/usr/lib64/openmpi/lib:
 ; export PYTHONPATH=/home/" + os_user +
 +                    "/caffe/python:/home/" + os_user + 
"/pytorch/build:$PYTHONPATH ; |g' /tmp/jupyter-notebook.service")
 +            conn.sudo("sed -i 's|CONF_PATH|{}|' 
/tmp/jupyter-notebook.service".format(jupyter_conf_file))
 +            conn.sudo("sed -i 's|OS_USR|{}|' 
/tmp/jupyter-notebook.service".format(os_user))
-             java_home = conn.run(
-                 "update-alternatives --query java | grep -o --color=never 
\'/.*/java-8.*/jre\'").stdout.splitlines()[0]
++            if os.environ['application'] == 'deeplearning' and 
os.environ['conf_cloud_provider'] == 'azure':
++                java_home = conn.run("update-alternatives --query java | grep 
-o --color=never \'/.*/java-11.*/bin/java\'").stdout.splitlines()[0]
++            else:
++                java_home = conn.run("update-alternatives --query java | grep 
-o --color=never \'/.*/java-8.*/jre\'").stdout.splitlines()[0]
 +            conn.sudo('sed -i \'/\[Service\]/ 
a\Environment=\"JAVA_HOME={}\"\'  /tmp/jupyter-notebook.service'.format(
 +                java_home))
 +            conn.sudo('\cp /tmp/jupyter-notebook.service 
/etc/systemd/system/jupyter-notebook.service')
 +            conn.sudo('chown -R {0}:{0} /home/{0}/.local'.format(os_user))
 +            conn.sudo('mkdir -p /mnt/var')
 +            conn.sudo('chown {0}:{0} /mnt/var'.format(os_user))
 +            if os.environ['application'] == 'jupyter' or 
os.environ['application'] == 'deeplearning':
 +                try:
 +                    conn.sudo('jupyter-kernelspec remove -f python3 || echo 
"Such kernel doesnt exists"')
 +                    conn.sudo('jupyter-kernelspec remove -f python2 || echo 
"Such kernel doesnt exists"')
 +                except Exception as err:
 +                    logging.error('Error:', str(err))
 +            conn.sudo("systemctl daemon-reload")
 +            conn.sudo("systemctl enable jupyter-notebook")
 +            conn.sudo("systemctl start jupyter-notebook")
 +            conn.sudo('touch 
/home/{}/.ensure_dir/jupyter_ensured'.format(os_user))
 +        except Exception as err:
 +            logging.error('Function configure_jupyter error:', str(err))
 +            traceback.print_exc()
 +            sys.exit(1)
 +    else:
 +        try:
 +            conn.sudo(
 +                'sed -i "s/c.NotebookApp.base_url =.*/c.NotebookApp.base_url 
= \'\/{0}\/\'/" {1}'.format(
 +                    exploratory_name, jupyter_conf_file))
 +            conn.sudo("systemctl restart jupyter-notebook")
 +        except Exception as err:
 +            logging.error('Function configure_jupyter error:', str(err))
 +            traceback.print_exc()
              sys.exit(1)
  
  
diff --cc 
infrastructure-provisioning/src/general/scripts/aws/common_create_role_policy.py
index c8135a2,94489da..7c0901f
--- 
a/infrastructure-provisioning/src/general/scripts/aws/common_create_role_policy.py
+++ 
b/infrastructure-provisioning/src/general/scripts/aws/common_create_role_policy.py
@@@ -47,13 -47,13 +48,13 @@@ if __name__ == "__main__"
              if role_name == '':
                  tag = {"Key": args.infra_tag_name, "Value": 
args.infra_tag_value}
                  user_tag = {"Key": "user:tag", "Value": args.user_tag_value}
 -                print("Creating role {0}, profile name 
{1}".format(args.role_name, args.role_profile_name))
 +                logging.info("Creating role {0}, profile name 
{1}".format(args.role_name, args.role_profile_name))
-                 create_iam_role(args.role_name, args.role_profile_name, 
args.region, tag=tag, user_tag=user_tag)
+                 create_iam_role(args.role_name, args.role_profile_name, 
args.region, args.permissions_boundary_arn, tag=tag, user_tag=user_tag)
              else:
 -                print("ROLE AND ROLE PROFILE ARE ALREADY CREATED")
 -            print("ROLE {} created. IAM group {} 
created".format(args.role_name, args.role_profile_name))
 +                logging.info("ROLE AND ROLE PROFILE ARE ALREADY CREATED")
 +            logging.info("ROLE {} created. IAM group {} 
created".format(args.role_name, args.role_profile_name))
  
 -            print("ATTACHING POLICIES TO ROLE")
 +            logging.info("ATTACHING POLICIES TO ROLE")
              if args.policy_file_name != '':
                  create_attach_policy(args.policy_name, args.role_name, 
args.policy_file_name)
              else:
diff --cc infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
index 6a22967,43261d6..06a873e
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
@@@ -31,23 -31,60 +31,24 @@@ import o
  import sys
  import traceback
  import subprocess
+ import uuid
  from fabric import *
  
 -if __name__ == "__main__":
 -    local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], 
os.environ['request_id'])
 -    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + 
local_log_filename
 -    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
 -                        level=logging.DEBUG,
 -                        filename=local_log_filepath)
 -
 -    ssn_conf = dict()
 -    ssn_conf['instance'] = 'ssn'
 -
 -    def clear_resources():
 -        if ssn_conf['domain_created']:
 -            
datalab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'],
 -                                                       
os.environ['ssn_hosted_zone_name'],
 -                                                       
os.environ['ssn_subdomain'])
 -        datalab.actions_lib.remove_ec2(ssn_conf['tag_name'], 
ssn_conf['instance_name'])
 -        datalab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
 -        datalab.actions_lib.remove_s3(ssn_conf['instance'])
 -        if ssn_conf['pre_defined_sg']:
 -            datalab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
 -        if ssn_conf['pre_defined_subnet']:
 -            
datalab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], 
ssn_conf['tag_name'],
 -                                                         
ssn_conf['service_base_name'])
 -            datalab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
 -        if ssn_conf['pre_defined_vpc']:
 -            datalab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
 -            datalab.actions_lib.remove_route_tables(ssn_conf['tag_name'], 
True)
 -            datalab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
 -        if ssn_conf['pre_defined_vpc2']:
 -            datalab.actions_lib.remove_peering('*')
 -            try:
 -                
datalab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
 -            except:
 -                print("There are no VPC Endpoints")
 -            datalab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], 
True)
 -            datalab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
 +def cleanup_aws_resources(tag_name, service_base_name):
 +    try:
 +        params = "--tag_name {} --service_base_name {}".format(tag_name, 
service_base_name)
 +        subprocess.run("~/scripts/{}.py 
{}".format('ssn_terminate_aws_resources', params), shell=True, check=True)
 +    except:
 +        traceback.print_exc()
 +        raise Exception
  
 +if __name__ == "__main__":
 +    # deriving variables for ssn node deployment
      try:
          logging.info('[DERIVING NAMES]')
 -        print('[DERIVING NAMES]')
 +        ssn_conf = dict()
          ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] 
= datalab.fab.replace_multi_symbols(
 -            os.environ['conf_service_base_name'][:20], '-', True)
 -        if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in 
os.environ and \
 -                'ssn_subdomain' in os.environ:
 -            ssn_conf['domain_created'] = True
 -        else:
 -            ssn_conf['domain_created'] = False
 -        ssn_conf['pre_defined_vpc'] = False
 -        ssn_conf['pre_defined_subnet'] = False
 -        ssn_conf['pre_defined_sg'] = False
 -        ssn_conf['billing_enabled'] = True
 +                    os.environ['conf_service_base_name'][:20], '-', True)
          ssn_conf['role_name'] = 
'{}-ssn-role'.format(ssn_conf['service_base_name'])
          ssn_conf['role_profile_name'] = 
'{}-ssn-profile'.format(ssn_conf['service_base_name'])
          ssn_conf['policy_name'] = 
'{}-ssn-policy'.format(ssn_conf['service_base_name'])
@@@ -62,67 -98,96 +63,71 @@@
          ssn_conf['sg_name'] = 
'{}-ssn-sg'.format(ssn_conf['service_base_name'])
          ssn_conf['network_type'] = os.environ['conf_network_type']
          ssn_conf['datalab_ssh_user'] = os.environ['conf_os_user']
 -
 -        try:
 -            if os.environ['aws_vpc_id'] == '':
 -                raise KeyError
 -        except KeyError:
 -            ssn_conf['tag'] = {"Key": ssn_conf['tag_name'], "Value": 
"{}-subnet".format(ssn_conf['service_base_name'])}
 -            os.environ['aws_vpc_id'] = 
datalab.meta_lib.get_vpc_by_tag(ssn_conf['tag_name'],
 -                                                                       
ssn_conf['service_base_name'])
 -            ssn_conf['pre_defined_vpc'] = True
 -        try:
 -            if os.environ['aws_subnet_id'] == '':
 -                raise KeyError
 -        except KeyError:
 -            ssn_conf['tag'] = {"Key": ssn_conf['tag_name'], "Value": 
"{}-subnet".format(ssn_conf['service_base_name'])}
 -            os.environ['aws_subnet_id'] = 
datalab.meta_lib.get_subnet_by_tag(ssn_conf['tag'], True)
 -            ssn_conf['pre_defined_subnet'] = True
 -        try:
 -            if os.environ['conf_duo_vpc_enable'] == 'true' and not 
os.environ['aws_vpc2_id']:
 -                raise KeyError
 -        except KeyError:
 -            ssn_conf['tag'] = {"Key": ssn_conf['tag2_name'], "Value": 
"{}-subnet".format(ssn_conf['service_base_name'])}
 -            os.environ['aws_vpc2_id'] = 
datalab.meta_lib.get_vpc_by_tag(ssn_conf['tag2_name'],
 -                                                                        
ssn_conf['service_base_name'])
 -            ssn_conf['pre_defined_vpc2'] = True
 -        try:
 -            if os.environ['conf_duo_vpc_enable'] == 'true' and not 
os.environ['aws_peering_id']:
 -                raise KeyError
 -        except KeyError:
 -            os.environ['aws_peering_id'] = 
datalab.meta_lib.get_peering_by_tag(ssn_conf['tag_name'],
 -                                                                              
 ssn_conf['service_base_name'])
 -            ssn_conf['pre_defined_peering'] = True
 -        try:
 -            if os.environ['aws_security_groups_ids'] == '':
 -                raise KeyError
 -        except KeyError:
 -            os.environ['aws_security_groups_ids'] = 
datalab.meta_lib.get_security_group_by_name(ssn_conf['sg_name'])
 -            ssn_conf['pre_defined_sg'] = True
 -        try:
 -            if os.environ['aws_account_id'] == '':
 -                raise KeyError
 -            if os.environ['aws_billing_bucket'] == '':
 -                raise KeyError
 -        except KeyError:
 +        ssn_conf['ssn_datalab_path'] = os.environ['ssn_datalab_path']
 +        ssn_conf['conf_tag_resource_id'] = os.environ['conf_tag_resource_id']
 +        ssn_conf['instance_hostname'] = (lambda x: 
datalab.meta_lib.get_instance_ip_address(
 +            ssn_conf['tag_name'], ssn_conf['instance_name']).get(
 +            'Private') if x == 'private' else 
datalab.meta_lib.get_instance_hostname(
 +            ssn_conf['tag_name'], 
ssn_conf['instance_name']))(ssn_conf['network_type'])
 +        ssn_conf['initial_user'] = (lambda x: 'ubuntu' if x == 'debian' else 
'ec2-user')(os.environ['conf_os_family'])
 +        ssn_conf['sudo_group'] = (lambda x: 'sudo' if x == 'debian' else 
'wheel')(os.environ['conf_os_family'])
 +        ssn_conf['step_cert_sans'] = (lambda x: (lambda x: ' --san {0} --san 
{1}'.format(
 +            datalab.meta_lib.get_instance_hostname(ssn_conf['tag_name'], 
ssn_conf['instance_name']),
 +            datalab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'], 
ssn_conf['instance_name']).get(
 +                'Public')) if x == 'public' else ' --san 
{0}'.format(datalab.meta_lib.get_instance_ip_address(
 +            ssn_conf['tag_name'], ssn_conf['instance_name']).get('Private')))(
 +            ssn_conf['network_type']) if x == 'true' else 
'')(os.environ['conf_stepcerts_enabled'])
 +        if 'aws_vpc_id' in os.environ and os.environ['aws_vpc_id'] != '':
 +            ssn_conf['aws_vpc_id'] = os.environ['aws_vpc_id']
 +        else:
 +            ssn_conf['aws_vpc_id'] = 
datalab.meta_lib.get_vpc_by_tag(ssn_conf['tag_name'],
 +                                                                     
ssn_conf['service_base_name'])
 +        if os.environ['conf_duo_vpc_enable'] == 'true' and 'aws_vpc2_id' in 
os.environ\
 +                and os.environ['aws_vpc2_id'] != '':
 +            ssn_conf['aws_vpc2_id'] = os.environ['aws_vpc2_id']
 +        else:
 +            ssn_conf['aws_vpc2_id'] = 
datalab.meta_lib.get_vpc_by_tag(ssn_conf['tag2_name'],
 +                                                                      
ssn_conf['service_base_name'])
 +        if os.environ['conf_duo_vpc_enable'] == 'true' and not 
os.environ['aws_peering_id']:
 +            ssn_conf['aws_peering_id'] = 
datalab.meta_lib.get_peering_by_tag(ssn_conf['tag_name'],
 +                                                                           
ssn_conf['service_base_name'])
 +        elif os.environ['conf_duo_vpc_enable'] == 'true' and aws_peering_id 
in os.environ \
 +                and os.environ['aws_peering_id'] != '':
 +            ssn_conf['aws_peering_id'] = os.environ['aws_peering_id']
 +        else:
 +            ssn_conf['aws_peering_id'] = None
 +        if 'aws_subnet_id' in os.environ and os.environ['aws_subnet_id'] != 
'':
 +            ssn_conf['aws_subnet_id'] = os.environ['aws_subnet_ids']
 +        else:
 +            ssn_conf['aws_subnet_id'] = 
datalab.meta_lib.get_subnet_by_tag(ssn_conf['subnet_tag'], True)
 +        if 'aws_security_groups_ids' in os.environ and 
os.environ['aws_security_groups_ids'] != '':
 +            ssn_conf['aws_security_groups_ids'] = 
os.environ['aws_security_groups_ids']
 +        else:
 +            ssn_conf['aws_security_groups_ids'] = 
datalab.meta_lib.get_security_group_by_name(ssn_conf['sg_name'])
 +        if 'aws_billing_bucket' in os.environ and 
os.environ['aws_billing_bucket'] == '':
 +            ssn_conf['billing_enabled'] = True
 +            ssn_conf['aws_billing_bucket'] = os.environ['aws_billing_bucket']
 +        else:
              ssn_conf['billing_enabled'] = False
 -        if not ssn_conf['billing_enabled']:
 -            os.environ['aws_account_id'] = 'None'
 -            os.environ['aws_billing_bucket'] = 'None'
 -        try:
 -            if not os.environ['aws_report_path']:
 -                raise KeyError
 -        except KeyError:
 -            os.environ['aws_report_path'] = ''
 -
 +            ssn_conf['aws_billing_bucket'] = 'None'
 +        if 'aws_report_path' in os.environ and os.environ['aws_report_path'] 
== '':
 +            ssn_conf['aws_report_path'] = os.environ['aws_report_path']
 +        else:
 +            ssn_conf['aws_report_path'] = ''
+         if 'keycloak_client_name' not in os.environ:
+             os.environ['keycloak_client_name'] = 
'{}-ui'.format(ssn_conf['service_base_name'])
+         if 'keycloak_client_secret' not in os.environ:
+             os.environ['keycloak_client_secret'] = str(uuid.uuid4())
      except Exception as err:
 +        logging.error('Error: {0}'.format(err))
          datalab.fab.append_result("Failed to generate variables dictionary.", 
str(err))
 -        clear_resources()
 +        cleanup_aws_resources(ssn_conf['tag_name'], 
ssn_conf['service_base_name'])
 +        traceback.print_exc()
          sys.exit(1)
  
 +    #creating datalab ssh user
      try:
 -        if os.environ['conf_os_family'] == 'debian':
 -            ssn_conf['initial_user'] = 'ubuntu'
 -            ssn_conf['sudo_group'] = 'sudo'
 -        if os.environ['conf_os_family'] == 'redhat':
 -            ssn_conf['initial_user'] = 'ec2-user'
 -            ssn_conf['sudo_group'] = 'wheel'
 -
 -        if ssn_conf['network_type'] == 'private':
 -            ssn_conf['instance_hostname'] = 
datalab.meta_lib.get_instance_ip_address(
 -                ssn_conf['tag_name'], 
ssn_conf['instance_name']).get('Private')
 -        else:
 -            ssn_conf['instance_hostname'] = 
datalab.meta_lib.get_instance_hostname(
 -                ssn_conf['tag_name'], ssn_conf['instance_name'])
 -
 -        if os.environ['conf_stepcerts_enabled'] == 'true':
 -            ssn_conf['step_cert_sans'] = ' --san {0} 
'.format(datalab.meta_lib.get_instance_ip_address(
 -                ssn_conf['tag_name'], 
ssn_conf['instance_name']).get('Private'))
 -            if ssn_conf['network_type'] == 'public':
 -                ssn_conf['step_cert_sans'] += ' --san {0} --san {1}'.format(
 -                    
datalab.meta_lib.get_instance_hostname(ssn_conf['tag_name'], 
ssn_conf['instance_name']),
 -                    
datalab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
 -                                                             
ssn_conf['instance_name']).get('Public'))
 -        else:
 -            ssn_conf['step_cert_sans'] = ''
 -
          logging.info('[CREATING DATALAB SSH USER]')
 -        print('[CREATING DATALAB SSH USER]')
          params = "--hostname {} --keyfile {} --initial_user {} --os_user {} 
--sudo_group {}".format(
              ssn_conf['instance_hostname'], os.environ['conf_key_dir'] + 
os.environ['conf_key_name'] + ".pem",
              ssn_conf['initial_user'], ssn_conf['datalab_ssh_user'], 
ssn_conf['sudo_group'])
@@@ -213,23 -274,11 +218,30 @@@
              traceback.print_exc()
              raise Exception
      except Exception as err:
 -        datalab.fab.append_result("Unable to configure docker.", str(err))
 -        clear_resources()
 +        logging.error('Error: {0}'.format(err))
 +        datalab.fab.append_result("Failed to configure docker.", str(err))
 +        cleanup_aws_resources(ssn_conf['tag_name'], 
ssn_conf['service_base_name'])
 +        sys.exit(1)
 +
 +    #configuring keycloak client for ui
 +    try:
 +        logging.info('[CONFIGURE KEYCLOAK CLIENT FOR DATALAB UI]')
++        keycloak_params = "--service_base_name {} --keycloak_auth_server_url 
{} --keycloak_realm_name {} " \
++                          "--keycloak_user {} --keycloak_user_password {} 
--instance_public_ip {} --keycloak_client_secret {} " \
++            .format(ssn_conf['service_base_name'], 
os.environ['keycloak_auth_server_url'],
++                    os.environ['keycloak_realm_name'], 
os.environ['keycloak_user'],
++                    os.environ['keycloak_user_password'], 
datalab.meta_lib.get_instance_hostname(
++                ssn_conf['tag_name'], ssn_conf['instance_name']), 
os.environ['keycloak_client_secret'])
++        subprocess.run("~/scripts/{}.py {}".format('configure_keycloak', 
keycloak_params), shell=True, check=True)
 +    except Exception as err:
 +        logging.error('Error: {0}'.format(err))
 +        datalab.fab.append_result("Failed to configure Keycloak client for 
DataLab UI.", str(err))
 +        cleanup_aws_resources(ssn_conf['tag_name'], 
ssn_conf['service_base_name'])
          sys.exit(1)
  
 +    #configuring UI
      try:
 +        logging.info('[CONFIGURE SSN INSTANCE UI]')
          cloud_params = [
              {
                  'key': 'KEYCLOAK_REDIRECT_URI',
diff --cc infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
index 61bcf72,1482297..7e21cb1
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
@@@ -248,21 -279,36 +248,23 @@@ if __name__ == "__main__"
              except:
                  traceback.print_exc()
                  raise Exception
 -            with open('/tmp/ssn_sg_id', 'r') as f:
 -                os.environ['aws_security_groups_ids'] = f.read()
 -        except Exception as err:
 -            datalab.gab_lib.append_result("Failed creating security group for 
SSN.", str(err))
 -            if ssn_conf['pre_defined_vpc']:
 -                
datalab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], 
ssn_conf['tag_name'],
 -                                                             
ssn_conf['service_base_name'])
 -                datalab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
 -                datalab.actions_lib.remove_route_tables(ssn_conf['tag_name'], 
True)
 -                datalab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
 -            if ssn_conf['pre_defined_vpc2']:
 -                datalab.actions_lib.remove_peering('*')
 -                try:
 -                    
datalab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
 -                except:
 -                    print("There are no VPC Endpoints")
 -                
datalab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
 -                datalab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
 -            sys.exit(1)
 +            ssn_conf['aws_security_groups_ids'] = 
datalab.meta_lib.get_security_group_by_name(ssn_conf['sg_name'])
 +    except Exception as err:
 +        logging.error('Error: {0}'.format(err))
 +        datalab.fab.append_result("Failed to create security group for SSN", 
str(err))
 +        cleanup_aws_resources(ssn_conf['tag_name'], 
ssn_conf['service_base_name'])
 +        sys.exit(1)
  
 +    #creating roles
      try:
          logging.info('[CREATE ROLES]')
 -        print('[CREATE ROLES]')
          params = "--role_name {} --role_profile_name {} --policy_name {} 
--policy_file_name {} --region {} " \
 -                 "--infra_tag_name {} --infra_tag_value {} --user_tag_value 
{}".\
 +                 "--infra_tag_name {} --infra_tag_value {} --user_tag_value 
{}". \
              format(ssn_conf['role_name'], ssn_conf['role_profile_name'], 
ssn_conf['policy_name'],
 -                   ssn_conf['policy_path'], os.environ['aws_region'], 
ssn_conf['tag_name'],
 +                   ssn_conf['policy_path'], ssn_conf['region'], 
ssn_conf['tag_name'],
                     ssn_conf['service_base_name'], ssn_conf['user_tag'])
+         if 'aws_permissions_boundary_arn' in os.environ:
+             params = '{} --permissions_boundary_arn {}'.format(params, 
os.environ['aws_permissions_boundary_arn'])
          try:
              subprocess.run("~/scripts/{}.py 
{}".format('common_create_role_policy', params), shell=True, check=True)
          except:
diff --cc infrastructure-provisioning/src/ssn/scripts/configure_docker.py
index 8c0e4cb,3a1f062..1dd8673
--- a/infrastructure-provisioning/src/ssn/scripts/configure_docker.py
+++ b/infrastructure-provisioning/src/ssn/scripts/configure_docker.py
@@@ -111,23 -116,39 +111,37 @@@ def build_docker_images(image_list, reg
          host_string = '{}@{}'.format(args.os_user, args.hostname)
          if os.environ['conf_cloud_provider'] == 'azure':
              conn.local('scp -i {} /root/azure_auth.json 
{}:{}sources/infrastructure-provisioning/src/base/'
-                   'azure_auth.json'.format(args.keyfile, host_string, 
args.datalab_path))
+                        'azure_auth.json'.format(args.keyfile, host_string, 
args.datalab_path))
              conn.sudo('cp 
{0}sources/infrastructure-provisioning/src/base/azure_auth.json '
 -                      
'/home/{1}/keys/azure_auth.json'.format(args.datalab_path, args.os_user))
 -        if region == 'cn-north-1':
 -            add_china_repository(datalab_path)
 +                 '/home/{1}/keys/azure_auth.json'.format(args.datalab_path, 
args.os_user))
-         for image in image_list:
-             name = image['name']
-             tag = image['tag']
-             conn.sudo('cp 
{0}sources/infrastructure-provisioning/src/general/files/{1}/{2}_description.json
 '
+         if 'conf_repository_user' in os.environ and 'conf_repository_port' in 
os.environ and 'conf_repository_pass' in os.environ and 
'conf_repository_address' in os.environ and 
os.environ['conf_download_docker_images'] == 'true':
+             conn.sudo('sudo docker login -u {0} -p {1} {2}:{3}'
+                       .format(os.environ['conf_repository_user'], 
os.environ['conf_repository_pass'], os.environ['conf_repository_address'], 
os.environ['conf_repository_port']))
+             for image in image_list:
+                 name = image['name']
+                 tag = image['tag']
+                 conn.sudo('docker pull {0}:{4}/docker.datalab-{2}-{1}:{3}'
+                           .format(os.environ['conf_repository_address'], 
os.environ['conf_cloud_provider'], name, tag, 
os.environ['conf_repository_port']))
+                 conn.sudo('docker image tag 
{0}:{4}/docker.datalab-{2}-{1}:{3} docker.datalab-{2}:{3}'
+                           .format(os.environ['conf_repository_address'], 
os.environ['conf_cloud_provider'], name, tag, 
os.environ['conf_repository_port']))
+                 conn.sudo('docker image rm {0}:{4}/docker.datalab-{2}-{1}:{3}'
+                           .format(os.environ['conf_repository_address'], 
os.environ['conf_cloud_provider'], name, tag, 
os.environ['conf_repository_port']))
+             return True
+         else:
+             for image in image_list:
+                 name = image['name']
+                 tag = image['tag']
+                 conn.sudo('cp 
{0}sources/infrastructure-provisioning/src/general/files/{1}/{2}_description.json
 '
 -                     
'{0}sources/infrastructure-provisioning/src/{2}/description.json'.format(args.datalab_path,
 args.cloud_provider, name))
 +                 
'{0}sources/infrastructure-provisioning/src/{2}/description.json'.format(args.datalab_path,
 args.cloud_provider, name))
-             if name == 'base':
-                 conn.sudo("bash -c 'cd 
{4}sources/infrastructure-provisioning/src/; docker build --build-arg OS={2} "
+                 if name == 'base':
+                     conn.sudo("bash -c 'cd 
{4}sources/infrastructure-provisioning/src/; docker build --build-arg OS={2} "
 -                              "--build-arg SRC_PATH=\"\" --file 
general/files/{3}/{0}_Dockerfile -t docker.datalab-{0}:{1} "
 -                              ".'".format(name, tag, args.os_family, 
args.cloud_provider, args.datalab_path))
 +                          "--build-arg SRC_PATH=\"\" --file 
general/files/{3}/{0}_Dockerfile -t docker.datalab-{0}:{1} "
 +                          ".'".format(name, tag, args.os_family, 
args.cloud_provider, args.datalab_path))
-             else:
-                 conn.sudo("bash -c 'cd 
{4}sources/infrastructure-provisioning/src/; docker build --build-arg OS={2} "
+                 else:
+                     conn.sudo("bash -c 'cd 
{4}sources/infrastructure-provisioning/src/; docker build --build-arg OS={2} "
 -                              "--file general/files/{3}/{0}_Dockerfile -t 
docker.datalab-{0}:{1} .'".format(name, tag, args.os_family, 
args.cloud_provider, args.datalab_path))
 +                          "--file general/files/{3}/{0}_Dockerfile -t 
docker.datalab-{0}:{1} .'".format(name, tag, args.os_family, 
args.cloud_provider, args.datalab_path))
-         conn.sudo('rm -f 
{}sources/infrastructure-provisioning/src/base/azure_auth.json'.format(args.datalab_path))
-         return True
+             conn.sudo('rm -f 
{}sources/infrastructure-provisioning/src/base/azure_auth.json'.format(args.datalab_path))
+             return True
      except:
          return False
  

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to