# qsub -q ftp -I -l walltime=10:01:00 -l mem=16gb
# datavisu0 has no public dns. add '134.246.184.10 datavisu0' to /etc/hosts to reach jupyter url
%load_ext autoreload
%autoreload 2
import numpy as np
import xsar
import logging
import time
import xarray as xr
import pandas as pd
import glob
import os
from dask.distributed import Client, progress
from dask_jobqueue import PBSCluster
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('xsar')
logger.setLevel(logging.INFO)
try:
client.close()
except:
pass
nprocs = 1
mem = 16 # in GB, for one proc
memory = "%dGB" % (mem * nprocs) # per proc
cluster = PBSCluster(cores=4,
memory=memory,
project='xsar',
queue='sequentiel',
processes=nprocs,
resource_spec='select=1:ncpus=%d:mem=%s' % (nprocs, memory),
local_directory=os.path.expandvars("$TMPDIR"),
interface='ib1', # workers interface
walltime='01:00:00',
scheduler_options={'interface': 'ib0'}) # if scheduler is on queue 'ftp'
cluster.scale(jobs=10)
client = Client(cluster)
client
DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://<insert-scheduler-address-here>:8786 --nthreads 4 --memory-limit 16.00GB --name dummy-name --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:asyncio:Using selector: EpollSelector DEBUG:asyncio:Using selector: EpollSelector DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 0 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 1 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 2 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 3 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 4 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 5 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 6 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 7 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 8 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.pbs:Job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 9 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Starting worker: 6 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 6 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmp7rdty57z.sh DEBUG:dask_jobqueue.core:Starting job: 2415880 DEBUG:dask_jobqueue.core:Starting worker: 0 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 0 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmpe8yld15t.sh DEBUG:dask_jobqueue.core:Starting job: 2415881 DEBUG:dask_jobqueue.core:Starting worker: 4 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 4 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmp41feiizq.sh DEBUG:dask_jobqueue.core:Starting job: 2415882 DEBUG:dask_jobqueue.core:Starting worker: 3 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 3 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmpuzxun6lv.sh DEBUG:dask_jobqueue.core:Starting job: 2415883 DEBUG:dask_jobqueue.core:Starting worker: 8 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 8 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmp97kcm3c4.sh DEBUG:dask_jobqueue.core:Starting job: 2415884 DEBUG:dask_jobqueue.core:Starting worker: 2 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 2 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmpk4fdxivm.sh DEBUG:dask_jobqueue.core:Starting job: 2415885 DEBUG:dask_jobqueue.core:Starting worker: 5 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 5 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmp3j02f5du.sh DEBUG:dask_jobqueue.core:Starting job: 2415886 DEBUG:dask_jobqueue.core:Starting worker: 9 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 9 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmp5iklfp2r.sh DEBUG:dask_jobqueue.core:Starting job: 2415887 DEBUG:dask_jobqueue.core:Starting worker: 1 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 1 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmp85wau_ye.sh DEBUG:dask_jobqueue.core:Starting job: 2415888 DEBUG:dask_jobqueue.core:Starting worker: 7 DEBUG:dask_jobqueue.core:writing job script: #!/usr/bin/env bash #PBS -N dask-worker #PBS -q sequentiel #PBS -A xsar #PBS -l select=1:ncpus=1:mem=16GB #PBS -l walltime=01:00:00 /home1/datahome/oarcher/conda-env/xsar/bin/python3.8 -m distributed.cli.dask_worker tcp://10.149.1.150:54215 --nthreads 4 --memory-limit 16.00GB --name 7 --nanny --death-timeout 60 --local-directory /dev/shm/pbs.2415415.datarmor0 --interface ib1 --protocol tcp:// DEBUG:dask_jobqueue.core:Executing the following command to command line qsub /dev/shm/pbs.2415415.datarmor0/tmp2_m8eyjf.sh DEBUG:dask_jobqueue.core:Starting job: 2415889
Client
|
Cluster
|
from dask import delayed
import dask.array as da
import dask.dataframe as dd
def open_mfdataset(datasets_paths, *args, client=None, **kwargs):
"""
Open multiples datasets. See `xsar.open_dataset` for more infos.
Parameters
----------
datasets_paths: iterable
iterable of datasets path
client: class that provide a `.map` function. if None, use default `map` function.
Returns
-------
list
list of xarray dataset (or list of futures if client is from `dask.distributed`)
"""
map_func = map
if client is not None:
map_func = client.map
def _open_dataset(filename):
return xsar.open_dataset(filename, *args, **kwargs)[0]
map_res = map_func(_open_dataset, datasets_paths)
return list(map_res)
listing = 'shoc.txt'
safes_path = open(listing).read().splitlines()
sar_list = open_mfdataset(safes_path[:10],pol_dim=True,client=client)
#client.gather(sar_list) # will evaluate all futures (take some time if many safes)
sar_list
[<Future: pending, key: _open_dataset-d70d689ba72ad4df0cfacfef55b8b8f6>, <Future: pending, key: _open_dataset-37fbe947209b43daedfa7eb069a51858>, <Future: pending, key: _open_dataset-9e45ee1c77a103e9986796ea22e61dfe>, <Future: pending, key: _open_dataset-0544934647869350c4a44eb2ab565c7a>, <Future: pending, key: _open_dataset-ef31eadb411f4265a82ce4088189ba18>, <Future: pending, key: _open_dataset-330abfc89890cb6a603f93cf10213b4c>, <Future: pending, key: _open_dataset-38e88dd73e618d92314b55a1bddc71af>, <Future: pending, key: _open_dataset-9bd4d0645910339b9de948381dfbead9>, <Future: pending, key: _open_dataset-0c1116e5a70a7afb66aa2954c4bc02b7>, <Future: pending, key: _open_dataset-d6fd138815ef6ca028b44282523966f1>]
sar_list[0].result()
<xarray.Dataset> Dimensions: (atrack: 16088, pol: 1, xtrack: 16794) Coordinates: * pol (pol) object 'HH' * atrack (atrack) int64 0 1 2 3 4 5 ... 16083 16084 16085 16086 16087 * xtrack (xtrack) int64 0 1 2 3 4 5 ... 16789 16790 16791 16792 16793 Data variables: digital_number (pol, atrack, xtrack) uint16 dask.array<chunksize=(1, 1000, 1000), meta=np.ndarray> time (atrack) datetime64[ns] dask.array<chunksize=(1000,), meta=np.ndarray> longitude (atrack, xtrack) float32 dask.array<chunksize=(1000, 1000), meta=np.ndarray> latitude (atrack, xtrack) float32 dask.array<chunksize=(1000, 1000), meta=np.ndarray> incidence (atrack, xtrack) float32 dask.array<chunksize=(1000, 1000), meta=np.ndarray> elevation (atrack, xtrack) float32 dask.array<chunksize=(1000, 1000), meta=np.ndarray> sigma0_raw (pol, atrack, xtrack) float64 dask.array<chunksize=(1, 1000, 1000), meta=np.ndarray> nesz (pol, atrack, xtrack) float64 dask.array<chunksize=(1, 1000, 1000), meta=np.ndarray> gamma0_raw (pol, atrack, xtrack) float64 dask.array<chunksize=(1, 1000, 1000), meta=np.ndarray> negz (pol, atrack, xtrack) float64 dask.array<chunksize=(1, 1000, 1000), meta=np.ndarray> Attributes: footprint: POLYGON ((-62.93737801452085 35.56607048873561, -67.5119... coverage: 418km * 406km (xtrack * atrack ) pixel_xtrack_m: 24.9 pixel_atrack_m: 25.3 ipf_version: 2.36 swath_type: EW polarizations: HH product_type: GRD mission: SENTINEL-1 satellite: A start_date: 2014-10-17 10:25:53.114391 stop_date: 2014-10-17 10:26:53.110488 path: /home/datawork-cersat-public/cache/project/mpc-sentinel1... denoised: False subdataset: EW_HH geometry: ge... Conventions: CF-1.7
array(['HH'], dtype=object)
array([ 0, 1, 2, ..., 16085, 16086, 16087])
array([ 0, 1, 2, ..., 16791, 16792, 16793])
|
|
|
|
|
|
|
|
|
|
def my_proc(ds,client=None):
ds_coars = ds.coarsen(atrack=20,xtrack=20,boundary='trim').mean()
zarr_file = '/home1/scratch/oarcher/tmp/%s.zarr' % os.path.basename(ds.attrs['path'])
return ds_coars.to_zarr(zarr_file,compute=True)
t1 = time.time()
lowres = client.map(my_proc,sar_list)
lowres_s0 = lowres[0].result()
time.time() - t1
98.06193971633911
lowres[0].result()
<xarray.backends.zarr.ZarrStore at 0x2aaaf136ae20>
xsar.open_dataset(safes_path[0],pol_dim=True)
# check ok
t1 = time.time()
my_proc(xsar.open_dataset(safes_path[0])[0],client=client)
time.time() - t1
DEBUG:rasterio.env:Entering env context: <rasterio.env.Env object at 0x2aaaf0d11520> DEBUG:rasterio.env:Starting outermost env DEBUG:rasterio.env:No GDAL environment exists DEBUG:rasterio.env:New GDAL environment <rasterio._env.GDALEnv object at 0x2aaaf0d11cd0> created DEBUG:rasterio._env:GDAL_DATA found in environment: '/home1/datahome/oarcher/conda-env/xsar/share/gdal'. DEBUG:rasterio._env:PROJ_LIB found in environment: '/home1/datahome/oarcher/conda-env/xsar/share/proj'. DEBUG:rasterio._env:Started GDALEnv <rasterio._env.GDALEnv object at 0x2aaaf0d11cd0>. DEBUG:rasterio.env:Entered env context: <rasterio.env.Env object at 0x2aaaf0d11520> DEBUG:rasterio._base:Sharing flag: 0 DEBUG:rasterio._base:Nodata success: 0, Nodata value: -10000000000.000000 DEBUG:rasterio._base:Dataset <open DatasetReader name='/home/datawork-cersat-public/cache/project/mpc-sentinel1/data/esa/sentinel-1a/L1/EW/S1A_EW_GRDH_1S/2014/290/S1A_EW_GRDH_1SSH_20141017T102553_20141017T102653_002868_0033E8_185C.SAFE' mode='r'> is started. DEBUG:rasterio.env:Exiting env context: <rasterio.env.Env object at 0x2aaaf0d11520> DEBUG:rasterio.env:Cleared existing <rasterio._env.GDALEnv object at 0x2aaaf0d11cd0> options DEBUG:rasterio._env:Stopped GDALEnv <rasterio._env.GDALEnv object at 0x2aaaf0d11cd0>. DEBUG:rasterio.env:Exiting outermost env DEBUG:rasterio.env:Exited env context: <rasterio.env.Env object at 0x2aaaf0d11520> DEBUG:rasterio.env:Entering env context: <rasterio.env.Env object at 0x2aaae7c10e20> DEBUG:rasterio.env:Starting outermost env DEBUG:rasterio.env:No GDAL environment exists DEBUG:rasterio.env:New GDAL environment <rasterio._env.GDALEnv object at 0x2aaae7c109d0> created DEBUG:rasterio._env:GDAL_DATA found in environment: '/home1/datahome/oarcher/conda-env/xsar/share/gdal'. DEBUG:rasterio._env:PROJ_LIB found in environment: '/home1/datahome/oarcher/conda-env/xsar/share/proj'. DEBUG:rasterio._env:Started GDALEnv <rasterio._env.GDALEnv object at 0x2aaae7c109d0>. DEBUG:rasterio.env:Entered env context: <rasterio.env.Env object at 0x2aaae7c10e20> DEBUG:rasterio._base:Sharing flag: 0 DEBUG:rasterio._base:Nodata success: 0, Nodata value: -10000000000.000000 DEBUG:rasterio._base:Dataset <open DatasetReader name='SENTINEL1_DS:/home/datawork-cersat-public/cache/project/mpc-sentinel1/data/esa/sentinel-1a/L1/EW/S1A_EW_GRDH_1S/2014/290/S1A_EW_GRDH_1SSH_20141017T102553_20141017T102653_002868_0033E8_185C.SAFE:EW_HH' mode='r'> is started. DEBUG:rasterio.env:Exiting env context: <rasterio.env.Env object at 0x2aaae7c10e20> DEBUG:rasterio.env:Cleared existing <rasterio._env.GDALEnv object at 0x2aaae7c109d0> options DEBUG:rasterio._env:Stopped GDALEnv <rasterio._env.GDALEnv object at 0x2aaae7c109d0>. DEBUG:rasterio.env:Exiting outermost env DEBUG:rasterio.env:Exited env context: <rasterio.env.Env object at 0x2aaae7c10e20> DEBUG:rasterio.env:Entering env context: <rasterio.env.Env object at 0x2aaaf18a7d30> DEBUG:rasterio.env:Starting outermost env DEBUG:rasterio.env:No GDAL environment exists DEBUG:rasterio.env:New GDAL environment <rasterio._env.GDALEnv object at 0x2aaaf0d11940> created DEBUG:rasterio._env:GDAL_DATA found in environment: '/home1/datahome/oarcher/conda-env/xsar/share/gdal'. DEBUG:rasterio._env:PROJ_LIB found in environment: '/home1/datahome/oarcher/conda-env/xsar/share/proj'. DEBUG:rasterio._env:Started GDALEnv <rasterio._env.GDALEnv object at 0x2aaaf0d11940>. DEBUG:rasterio.env:Entered env context: <rasterio.env.Env object at 0x2aaaf18a7d30> DEBUG:rasterio._base:Sharing flag: 0 DEBUG:rasterio._base:Nodata success: 0, Nodata value: -10000000000.000000 DEBUG:rasterio._base:Dataset <open DatasetReader name='SENTINEL1_DS:/home/datawork-cersat-public/cache/project/mpc-sentinel1/data/esa/sentinel-1a/L1/EW/S1A_EW_GRDH_1S/2014/290/S1A_EW_GRDH_1SSH_20141017T102553_20141017T102653_002868_0033E8_185C.SAFE:EW_HH' mode='r'> is started. DEBUG:rasterio.env:Exiting env context: <rasterio.env.Env object at 0x2aaaf18a7d30> DEBUG:rasterio.env:Cleared existing <rasterio._env.GDALEnv object at 0x2aaaf0d11940> options DEBUG:rasterio._env:Stopped GDALEnv <rasterio._env.GDALEnv object at 0x2aaaf0d11940>. DEBUG:rasterio.env:Exiting outermost env DEBUG:rasterio.env:Exited env context: <rasterio.env.Env object at 0x2aaaf18a7d30>
55.442471981048584
safes = open_mfdataset(safes_path[:50])
t1 = time.time()
safes_ = safes.compute()
print("elapsed: %.1f" % (time.time() - t1))
safes_
np.nanmean(safes_['open_time'])