- Dump node listing on unusable
- Update Gluster to 4.1 on Ubuntu/Debian/RemoteFS
- Update Python to 3.6.6 in Windows Docker images
- Update dependencies
- Minor doc updates
- Fix appveyor sha256 artifact upload
This commit is contained in:
Fred Park 2018-06-28 08:30:26 -07:00
Родитель 03aad7dd94
Коммит 0c3d492f1a
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 3C4D545F457737EB
12 изменённых файлов: 39 добавлений и 22 удалений

Просмотреть файл

@ -226,7 +226,7 @@ deploy:
description: 'Batch Shipyard release'
auth_token:
secure: +f4N6Qsv3HvJyii0Bs+8qBx3YS7+7FJUWbFSiAdEIUDubFQnNkJgFnBw0Ew2SLkv
artifact: /.*\.exe/,/.*\.exe.sha256/
artifact: /.*\.exe.*/
draft: true
prerelease: true
force_update: true

Просмотреть файл

@ -45,4 +45,4 @@ monitoring:
port: 9090
scrape_interval: 10s
grafana:
additional_dashboards: null
additional_dashboards: {}

Просмотреть файл

@ -686,6 +686,8 @@ def _block_for_nodes_ready(
suppress_confirm=True)
unusable_delete = True
else:
# list nodes to dump exact error
list_nodes(batch_client, config, pool_id=pool_id, nodes=nodes)
raise RuntimeError(
('Unusable nodes detected in pool {}. You can delete '
'unusable nodes with "pool nodes del --all-unusable" '

Просмотреть файл

@ -59,7 +59,7 @@ from .version import __version__
logger = logging.getLogger(__name__)
util.setup_logger(logger)
# global defines
_BLOBXFER_VERSION = '1.3.0'
_BLOBXFER_VERSION = '1.3.1'
_MEGABYTE = 1048576
_MAX_READ_BLOCKSIZE_BYTES = 4194304
_FILE_SPLIT_PREFIX = '_shipyard-'

Просмотреть файл

@ -1,7 +1,7 @@
# Dockerfile for Azure/batch-shipyard Cargo (Windows)
# Adapted from: https://github.com/StefanScherer/dockerfiles-windows/blob/master/python/Dockerfile
FROM python:3.6.5-windowsservercore-ltsc2016
FROM python:3.6.6-windowsservercore-ltsc2016
MAINTAINER Fred Park <https://github.com/Azure/batch-shipyard>
ENV chocolateyUseWindowsCompression false
@ -30,8 +30,8 @@ COPY --from=0 /batch-shipyard/cargo /batch-shipyard
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
ENV PYTHON_VERSION 3.6.5
ENV PYTHON_PIP_VERSION 9.0.3
ENV PYTHON_VERSION 3.6.6
ENV PYTHON_PIP_VERSION 10.0.1
RUN $env:PATH = 'C:\Python;C:\Python\Scripts;{0}' -f $env:PATH ; \
Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\' -Name Path -Value $env:PATH ; \

Просмотреть файл

@ -1,7 +1,7 @@
# Dockerfile for Azure/batch-shipyard CLI (Windows)
# Adapted from: https://github.com/StefanScherer/dockerfiles-windows/blob/master/python/Dockerfile
FROM python:3.6.5-windowsservercore-ltsc2016
FROM python:3.6.6-windowsservercore-ltsc2016
MAINTAINER Fred Park <https://github.com/Azure/batch-shipyard>
ENV chocolateyUseWindowsCompression false
@ -34,8 +34,8 @@ COPY --from=0 /batch-shipyard /batch-shipyard
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
ENV PYTHON_VERSION 3.6.5
ENV PYTHON_PIP_VERSION 9.0.3
ENV PYTHON_VERSION 3.6.6
ENV PYTHON_PIP_VERSION 10.0.1
RUN $env:PATH = 'C:\Python;C:\Python\Scripts;{0}' -f $env:PATH ; \
Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\' -Name Path -Value $env:PATH ; \

Просмотреть файл

@ -448,7 +448,13 @@ to both compute node A and B. However, if `job-1`:`task-3` is then run on
compute node A after `job-1`:`task-1`, then the `input_data` is not
transferred again. This object currently supports `azure_batch` and
`azure_storage` as members.
* `azure_batch` contains the following members:
* `azure_batch` will transfer data from a compute node that has run the
specified task. Note that there is no implied dependency. It is
recommended to specify a `depends_on` in order to ensure that the
specified task runs before this one (note that `depends_on` requires
that the upstream task must exist within the same job). Additionally,
the compute node which ran the task must not have been deleted or
resized out of the pool.
* (required) `job_id` the job id of the task
* (required) `task_id` the id of the task to fetch files from
* (optional) `include` is an array of include filters
@ -669,7 +675,13 @@ ingressed for this specific task. This object currently supports
`azure_batch` and `azure_storage` as members. Note for multi-instance
tasks, transfer of `input_data` is only applied to the task running the
application command.
* `azure_batch` contains the following members:
* `azure_batch` will transfer data from a compute node that has run the
specified task. Note that there is no implied dependency. It is
recommended to specify a `depends_on` in order to ensure that the
specified task runs before this one (note that `depends_on` requires
that the upstream task must exist within the same job). Additionally,
the compute node which ran the task must not have been deleted or
resized out of the pool.
* (required) `job_id` the job id of the task
* (required) `task_id` the id of the task to fetch files from
* (optional) `include` is an array of include filters

Просмотреть файл

@ -46,7 +46,7 @@ monitoring:
port: 9090
scrape_interval: 10s
grafana:
additional_dashboards: null
additional_dashboards: {}
```
The `monitoring` property has the following members:
@ -113,7 +113,8 @@ resource monitor.
be exclusive to the resource monitor and cannot be shared with other
resources, including Batch compute nodes. Batch compute nodes and storage
clusters can co-exist on the same virtual network, but should be in
separate subnets.
separate subnets. It's recommended that the monitor VM be in a separate
subnet as well.
* (required) `name` is the subnet name.
* (required) `address_prefix` is the subnet address prefix to use for
allocation of the resource monitor virtual machine to.
@ -126,7 +127,7 @@ to the resource monitoring virtual machine.
* (optional) `grafana` rule allows grafana HTTPS (443) server port to be
exposed to the specified address prefix. Multiple address prefixes
can be specified.
* (optional) `prometheus` rule allows the Prometheus server pot to be
* (optional) `prometheus` rule allows the Prometheus server port to be
exposed to the specified address prefix. Multiple address prefixes
can be specified.
* (optional) `custom_inbound_rules` are custom inbound rules for other

Просмотреть файл

@ -278,10 +278,12 @@ shipyard monitor add --poolid mybatchpool
```
After the monitor is added, you can point your web browser at the
monitoring resource FQDN emitted above. You can remove individual
resources to monitor with the command `shipyard monitor remove`.
Once you have no need for your monitoring resource, you can either suspend
it or remove it altogether.
monitoring resource FQDN emitted above. Note that there will be a delay
between `monitor add` and the resource showing up in Grafana.
You can remove individual resources to monitor with the command
`monitor remove`. Once you have no need for your monitoring resource, you
can either suspend it or destroy it entirely.
```shell
# remove the prior Batch pool monitor

Просмотреть файл

@ -1,7 +1,7 @@
adal==1.0.2
azure-batch==4.1.3
azure-cosmosdb-table==1.0.3
azure-keyvault==1.0.0b1
azure-keyvault==1.0.0
azure-mgmt-authorization==0.50.0
azure-mgmt-batch==5.0.1
azure-mgmt-compute==3.1.0rc3
@ -11,7 +11,7 @@ azure-mgmt-storage==2.0.0rc3
azure-storage-blob==1.3.0
azure-storage-common==1.3.0
azure-storage-file==1.3.0
blobxfer==1.3.0
blobxfer==1.3.1
click==6.7
future==0.16.0
futures==3.2.0; python_version < '3'

Просмотреть файл

@ -10,7 +10,7 @@ DOCKER_CE_VERSION_DEBIAN=18.03.1
DOCKER_CE_VERSION_CENTOS=18.03.1
DOCKER_CE_VERSION_SLES=17.09.1
NVIDIA_DOCKER_VERSION=2.0.3
GLUSTER_VERSION_DEBIAN=4.0
GLUSTER_VERSION_DEBIAN=4.1
GLUSTER_VERSION_CENTOS=40
# consts

Просмотреть файл

@ -6,7 +6,7 @@ set -o pipefail
export DEBIAN_FRONTEND=noninteractive
# constants
GLUSTER_VERSION=4.0
GLUSTER_VERSION=4.1
gluster_brick_mountpath=/gluster/brick
gluster_brick_location=$gluster_brick_mountpath/brick0
ipaddress=$(ip addr list eth0 | grep "inet " | cut -d' ' -f6 | cut -d/ -f1)