sample_file <- function(x, ...) {
  dplyr::sample_n(x, 1L) %>% dplyr::select(date, fullname)
}

as_tib_slab <- function(trans, slab, activ) {
  tib <- list()
  tib[[activ]] <- as.vector(slab)
  tib <- as_tibble(tib)
  prod_dims <- 1
  total_prod <- prod(dim(slab))
  
  for (i in seq_along(trans)) {
    nm <- names(trans)[i]
    nr <- nrow(trans[[i]])
    tib[[nm]] <- rep(trans[[nm]][[nm]], each = prod_dims, length.out = total_prod)
    prod_dims <- prod_dims * nr
  }
  tib
}


library(ncdump)
library(tibble)
library(dplyr, warn.conflicts = FALSE)
library(maps)
library(raadtools)
## Loading required package: raster
## Loading required package: sp
## 
## Attaching package: 'raster'
## The following object is masked from 'package:dplyr':
## 
##     select
library(ggplot2)
set.seed(1)

files <- lapply(list(
sstfiles(time.resolution = "daily")
 ,sstfiles(time.resolution = "monthly")
 ,chlafiles()
 ,currentsfiles()
 ,derivicefiles()
 ,derivaadcfiles("si_200_interpolated_summer_climatology") %>% mutate(date = as.POSIXct(NA))
 #,icefiles(product = "amsr")
 ,ocfiles(time.resolution = "monthly", product = "SeaWiFS", varname = "CHL", type= "L3m")
 ,sshfiles(ssha = TRUE)
,(windfiles() %>% dplyr::transmute(fullname = ufullname, date))
), sample_file) %>% bind_rows()
f <-  files$fullname[1]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data/eclipse.ncdc.noaa.gov/pub/OI-daily-v2/NetCDF/1991/AVHRR/avhrr-only-v2.19910221.nc"
## the idea is to find out what's the file
NetCDF(f) %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 4 × 3
##    name      min      max
##   <chr>    <dbl>    <dbl>
## 1   lat  -89.875   89.875
## 2   lon    0.125  359.875
## 3  time 4799.000 4799.000
## 4  zlev    0.000    0.000
(x <- NetCDF(f) )
## Variables: sst, (anom, err, ice) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 4 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1           sst          4           3            lon             1440
## 2           sst          4           2            lat              720
## 3           sst          4           1           zlev                1
## 4           sst          4           0           time                1
## then build up a query, returning a subsett-ed set of tables of each dimension's coordinates
trans <- x %>% filtrate(lon = lon > 100, lat = between(lat, -30, 20))
## Joining, by = ".variable_"
## extract the hyperslab index from the transform tables
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

## (this will be wrapped up in an easy function but it's important to keep things general)
image(trans$lon$lon, trans$lat$lat, slab, col = viridis::viridis(100))
map("world2", add = TRUE)

f <-  files$fullname[2]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data/ftp.cdc.noaa.gov/Datasets/noaa.oisst.v2/sst.mnmean.nc"
(x <- NetCDF(f) )
## Variables: sst, (time_bnds) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 3 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1           sst          2           1            lon              360
## 2           sst          2           0            lat              180
## 3           sst          2           2           time              424
## the idea is to find out what's the file
x %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 3 × 3
##    name     min     max
##   <chr>   <dbl>   <dbl>
## 1   lat   -89.5    89.5
## 2   lon     0.5   359.5
## 3  time 66443.0 79317.0
## then build up a query, returning a subsett-ed set of tables of each dimension's coordinates
(trans <- x %>% filtrate(lon = lon > 250, lat = lat < -10))
## Joining, by = ".variable_"
## $lon
## # A tibble: 110 × 5
##    .dimension_    id   lon  name  step
##          <int> <int> <dbl> <chr> <int>
##  1           1     1 250.5   lon   251
##  2           1     1 251.5   lon   252
##  3           1     1 252.5   lon   253
##  4           1     1 253.5   lon   254
##  5           1     1 254.5   lon   255
##  6           1     1 255.5   lon   256
##  7           1     1 256.5   lon   257
##  8           1     1 257.5   lon   258
##  9           1     1 258.5   lon   259
## 10           1     1 259.5   lon   260
## # ... with 100 more rows
## 
## $lat
## # A tibble: 80 × 5
##    .dimension_    id   lat  name  step
##          <int> <int> <dbl> <chr> <int>
##  1           0     0 -10.5   lat   101
##  2           0     0 -11.5   lat   102
##  3           0     0 -12.5   lat   103
##  4           0     0 -13.5   lat   104
##  5           0     0 -14.5   lat   105
##  6           0     0 -15.5   lat   106
##  7           0     0 -16.5   lat   107
##  8           0     0 -17.5   lat   108
##  9           0     0 -18.5   lat   109
## 10           0     0 -19.5   lat   110
## # ... with 70 more rows
## 
## $time
## # A tibble: 424 × 5
##    .dimension_    id  time  name  step
##          <int> <int> <dbl> <chr> <int>
##  1           2     2 66443  time     1
##  2           2     2 66474  time     2
##  3           2     2 66505  time     3
##  4           2     2 66533  time     4
##  5           2     2 66564  time     5
##  6           2     2 66594  time     6
##  7           2     2 66625  time     7
##  8           2     2 66655  time     8
##  9           2     2 66686  time     9
## 10           2     2 66717  time    10
## # ... with 414 more rows
## extract the hyperslab index from the transform tables
## notice how this and the next block is completely devoid of references to the dimension or variable names
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

## (this will be wrapped up in an easy function but it's important to keep things general)
## note that this file uses south-up convention
image(trans$lon$lon, rev(trans$lat$lat), slab[,ncol(slab):1,1], col = viridis::viridis(100))
map("world2", add = TRUE)

## see how we got a 3D slab this time, because we didn't subset on time
dim(slab)
## [1] 110  80 424
## this time the slab will be 2D , in longitude ~ time
(trans <- x %>% filtrate(lon = lon == 147.5, lat = lat < 0 & lat > -70))
## Joining, by = ".variable_"
## $lon
## # A tibble: 1 × 5
##   .dimension_    id   lon  name  step
##         <int> <int> <dbl> <chr> <int>
## 1           1     1 147.5   lon   148
## 
## $lat
## # A tibble: 70 × 5
##    .dimension_    id   lat  name  step
##          <int> <int> <dbl> <chr> <int>
##  1           0     0  -0.5   lat    91
##  2           0     0  -1.5   lat    92
##  3           0     0  -2.5   lat    93
##  4           0     0  -3.5   lat    94
##  5           0     0  -4.5   lat    95
##  6           0     0  -5.5   lat    96
##  7           0     0  -6.5   lat    97
##  8           0     0  -7.5   lat    98
##  9           0     0  -8.5   lat    99
## 10           0     0  -9.5   lat   100
## # ... with 60 more rows
## 
## $time
## # A tibble: 424 × 5
##    .dimension_    id  time  name  step
##          <int> <int> <dbl> <chr> <int>
##  1           2     2 66443  time     1
##  2           2     2 66474  time     2
##  3           2     2 66505  time     3
##  4           2     2 66533  time     4
##  5           2     2 66564  time     5
##  6           2     2 66594  time     6
##  7           2     2 66625  time     7
##  8           2     2 66655  time     8
##  9           2     2 66686  time     9
## 10           2     2 66717  time    10
## # ... with 414 more rows
## extract the hyperslab index from the transform tables
## notice how this and the next block is completely devoid of references to the dimension or variable names
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)


image(rev(trans$lat$lat), trans$time$time, slab[nrow(slab):1, ], col = viridis::viridis(100))

f <-  files$fullname[3]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data_local/chl/johnson/modis/8d/A20081852008192.L3m_8D_SO_Chl_9km.Johnson_SO_Chl.nc"
(x <- NetCDF(f) )
## Variables: chlorophyll, (originalFileAttributes) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 3 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1   chlorophyll          0           0      longitude             4320
## 2   chlorophyll          0           1       latitude              720
## 3   chlorophyll          0           2           time                1
x %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 3 × 3
##        name        min        max
##       <chr>      <dbl>      <dbl>
## 1  latitude  -89.95834  -30.04167
## 2 longitude -179.95833  179.95835
## 3      time 6758.00000 6758.00000
(trans <- x %>% filtrate(longitude = longitude > 0, latitude = latitude > -50))
## Joining, by = ".variable_"
## $longitude
## # A tibble: 2,160 × 5
##    .dimension_    id  longitude      name  step
##          <int> <int>      <dbl>     <chr> <int>
##  1           0     0 0.04167712 longitude  2161
##  2           0     0 0.12501045 longitude  2162
##  3           0     0 0.20834379 longitude  2163
##  4           0     0 0.29167712 longitude  2164
##  5           0     0 0.37501046 longitude  2165
##  6           0     0 0.45834380 longitude  2166
##  7           0     0 0.54167713 longitude  2167
##  8           0     0 0.62501047 longitude  2168
##  9           0     0 0.70834380 longitude  2169
## 10           0     0 0.79167714 longitude  2170
## # ... with 2,150 more rows
## 
## $latitude
## # A tibble: 240 × 5
##    .dimension_    id  latitude     name  step
##          <int> <int>     <dbl>    <chr> <int>
##  1           1     1 -49.95833 latitude   481
##  2           1     1 -49.87500 latitude   482
##  3           1     1 -49.79167 latitude   483
##  4           1     1 -49.70833 latitude   484
##  5           1     1 -49.62500 latitude   485
##  6           1     1 -49.54167 latitude   486
##  7           1     1 -49.45833 latitude   487
##  8           1     1 -49.37500 latitude   488
##  9           1     1 -49.29167 latitude   489
## 10           1     1 -49.20833 latitude   490
## # ... with 230 more rows
## 
## $time
## # A tibble: 1 × 5
##   .dimension_    id  time  name  step
##         <int> <int> <dbl> <chr> <int>
## 1           2     2  6758  time     1
## extract the hyperslab index from the transform tables
## notice how this and the next block is completely devoid of references to the dimension or variable names
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

## (this will be wrapped up in an easy function but it's important to keep things general)
## note that this file uses south-up convention
image(trans$longitude$longitude, trans$latitude$latitude, log(slab), col = viridis::viridis(100))
map("world2", add = TRUE)

f <-  files$fullname[4]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data/ftp.aviso.altimetry.fr/global/delayed-time/grids/madt/all-sat-merged/uv/2015/dt_global_allsat_madt_uv_20150213_20150914.nc"
## now we really want to activate a different variable, because the default chosen is not of interest
(x <- NetCDF(f) )
## Variables: lat_bnds, (lon_bnds, crs, u, v) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 2 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1      lat_bnds          2           3             nv                2
## 2      lat_bnds          2           1            lat              720
(x <- NetCDF(f) %>% activate("u"))
## Variables: u, (lat_bnds, lon_bnds, crs, v) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 3 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1             u          7           2            lon             1440
## 2             u          7           1            lat              720
## 3             u          7           0           time                1
x %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 3 × 3
##    name       min       max
##   <chr>     <dbl>     <dbl>
## 1   lat   -89.875    89.875
## 2   lon     0.125   359.875
## 3  time 23784.000 23784.000
(trans <- x %>% filtrate(lon = between(lon, 100, 150), lat = lat > -50 & lat < 20))
## Joining, by = ".variable_"
## $lon
## # A tibble: 200 × 5
##    .dimension_    id     lon  name  step
##          <int> <int>   <dbl> <chr> <int>
##  1           2     2 100.125   lon   401
##  2           2     2 100.375   lon   402
##  3           2     2 100.625   lon   403
##  4           2     2 100.875   lon   404
##  5           2     2 101.125   lon   405
##  6           2     2 101.375   lon   406
##  7           2     2 101.625   lon   407
##  8           2     2 101.875   lon   408
##  9           2     2 102.125   lon   409
## 10           2     2 102.375   lon   410
## # ... with 190 more rows
## 
## $lat
## # A tibble: 280 × 5
##    .dimension_    id     lat  name  step
##          <int> <int>   <dbl> <chr> <int>
##  1           1     1 -49.875   lat   161
##  2           1     1 -49.625   lat   162
##  3           1     1 -49.375   lat   163
##  4           1     1 -49.125   lat   164
##  5           1     1 -48.875   lat   165
##  6           1     1 -48.625   lat   166
##  7           1     1 -48.375   lat   167
##  8           1     1 -48.125   lat   168
##  9           1     1 -47.875   lat   169
## 10           1     1 -47.625   lat   170
## # ... with 270 more rows
## 
## $time
## # A tibble: 1 × 5
##   .dimension_    id  time  name  step
##         <int> <int> <dbl> <chr> <int>
## 1           0     0 23784  time     1
## extract the hyperslab index from the transform tables
## notice how this and the next block is completely devoid of references to the dimension or variable names
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
uslab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

## get the other slab while we are at it
x <- activate(x, "v")
vslab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

## (this will be wrapped up in an easy function but it's important to keep things general)
## note that this file uses south-up convention
image(trans$lon$lon, trans$lat$lat, uslab, col = viridis::viridis(100))

## use u and v together
image(trans$lon$lon, trans$lat$lat, sqrt(uslab^2 + vslab^2), col = viridis::viridis(100))
map("world2", add = TRUE)

f <-  files$fullname[5]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data/webdav.data.aad.gov.au/data/environmental/smmr_ssmi_nasateam/time_since_melt/daily/1986/time_since_melt_19860514.nc"
(nc_object <- NetCDF(f) %>% activate("days_since_ice_melt"))
## Variables: days_since_ice_melt, (polar_stereographic) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 2 × 5
##         variable_name .variable_ .dimension_ dimension_name
##                 <chr>      <dbl>       <int>          <chr>
## 1 days_since_ice_melt          0           0              x
## 2 days_since_ice_melt          0           1              y
## # ... with 1 more variables: dimension_length <int>
nc_object %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 2 × 3
##    name      min     max
##   <chr>    <dbl>   <dbl>
## 1     x -3937500 3937500
## 2     y -3937500 4337500
## what's wrong here??

#(trans <- nc_object %>% filtrate(x = x > 0, y = y < 0))
f <-  files$fullname[6]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data/webdav.data.aad.gov.au/data/environmental/derived/antarctic/netcdf/si_200_interpolated_summer_climatology.nc"
## the idea is to find out what's the file
NetCDF(f) %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 2 × 3
##    name   min   max
##   <chr> <dbl> <dbl>
## 1   lat   -80   -30
## 2   lon  -180   180
(x <- NetCDF(f) )
## Variables: silicate 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 2 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1      silicate          2           1            lon             3601
## 2      silicate          2           0            lat              501
## then build up a query, returning a subsett-ed set of tables of each dimension's coordinates
trans <- x %>% filtrate(lon = lon > 100, lat = between(lat, -50, -30))
## Joining, by = ".variable_"
## extract the hyperslab index from the transform tables
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

## (this will be wrapped up in an easy function but it's important to keep things general)
image(trans$lon$lon, trans$lat$lat, slab, col = viridis::viridis(100))
map("world2", add = TRUE)

ggplot(as_tib_slab(trans, slab, "silicate"), aes(lon, lat, fill = silicate)) + geom_raster()

f <-  files$fullname[7]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data/oceandata.sci.gsfc.nasa.gov/SeaWiFS/Mapped/Monthly/9km/chlor/S20100912010120.L3m_MO_CHL_chlor_a_9km.nc"
## the idea is to find out what's the file
NetCDF(f) %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 2 × 3
##    name        min       max
##   <chr>      <dbl>     <dbl>
## 1   lat  -89.95834  89.95834
## 2   lon -179.95833 179.95836
(x <- NetCDF(f) )
## Variables: chlor_a, (palette) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 2 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1       chlor_a          0           1            lon             4320
## 2       chlor_a          0           0            lat             2160
## then build up a query, returning a subsett-ed set of tables of each dimension's coordinates
trans <- x %>% filtrate(lon = lon > 100, lat = between(lat, -50, -30))
## Joining, by = ".variable_"
## extract the hyperslab index from the transform tables
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

cpal <- palr::chlPal(palette = TRUE)
image(trans$lon$lon, rev(trans$lat$lat), slab[,ncol(slab):1], col = cpal$cols[-1], breaks = cpal$breaks)
map("world2", add = TRUE)

f <-  files$fullname[8]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data/ftp.aviso.altimetry.fr/global/delayed-time/grids/msla/all-sat-merged/h/2008/dt_global_allsat_msla_h_20081024_20140106.nc"
## the idea is to find out what's the file
NetCDF(f) %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 2 × 3
##    name     min    max
##   <chr>   <dbl>  <dbl>
## 1   lat -89.875 89.875
## 2    nv   0.000  1.000
(x <- NetCDF(f) %>% activate("sla") )
## Variables: sla, (lat_bnds, lon_bnds, crs, err) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 3 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1           sla          7           2            lon             1440
## 2           sla          7           1            lat              720
## 3           sla          7           0           time                1
## then build up a query, returning a subsett-ed set of tables of each dimension's coordinates
trans <- x %>% filtrate(lon = lon > 100 & lon < 340, lat = between(lat, 0, 50))
## Joining, by = ".variable_"
## extract the hyperslab index from the transform tables
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)


image(trans$lon$lon, trans$lat$lat, slab, col = viridis::viridis(100), asp = 1/cos(-40 * pi/180), xlim = c(0, 360))
map("world2", add = TRUE)

f <-  files$fullname[9]
print(gsub(getOption("default.datadir"), "", f))
## [1] "/data/ftp.cdc.noaa.gov/Datasets/ncep.reanalysis2/gaussian_grid/uwnd.10m.gauss.2002.nc"
## the idea is to find out what's the file
NetCDF(f) %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 4 × 3
##    name         min         max
##   <chr>       <dbl>       <dbl>
## 1   lat     -88.542      88.542
## 2 level      10.000      10.000
## 3   lon       0.000     358.125
## 4  time 1770696.000 1779450.000
(x <- NetCDF(f) )
## Variables: uwnd 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 4 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1          uwnd          4           0            lon              192
## 2          uwnd          4           1            lat               94
## 3          uwnd          4           2          level                1
## 4          uwnd          4           3           time             1460
## then build up a query, returning a subsett-ed set of tables of each dimension's coordinates
trans <- x %>% filtrate(lon = lon > 300, lat = between(lat, 0, 50), time = time < 1770732)
## Joining, by = ".variable_"
## extract the hyperslab index from the transform tables
hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step))))

## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

for (i in seq(nrow(trans$time))) {
image(trans$lon$lon, rev(trans$lat$lat), slab[,ncol(slab):1,i], col = viridis::viridis(100), asp = 1/cos(-40 * pi/180))
map("world2", add = TRUE)
}

ggplot(as_tib_slab(trans, slab, "uwnd"), aes(lon, lat, fill = uwnd)) + geom_raster() + facet_wrap(~time)

f <-  file.path(getOption("default.datadir"), "data_local/acecrc.org.au/ROMS/s_corney/cpolar/ocean_his_3101.nc")

(x <- NetCDF(f) %>% activate("salt"))
## Variables: salt, (ntimes, ndtfast, dt, dtfast, dstart, nHIS, ndefHIS, nRST, Falpha, Fbeta, Fgamma, nl_tnu2, nl_visc2, Akt_bak, Akv_bak, rdrg, rdrg2, Zob, Zos, Znudg, M2nudg, M3nudg, Tnudg, FSobc_in, FSobc_out, M2obc_in, M2obc_out, Tobc_in, Tobc_out, M3obc_in, M3obc_out, rho0, gamma2, spherical, xl, el, Vtransform, Vstretching, theta_s, theta_b, Tcline, hc, Cs_r, Cs_w, h, zice, f, pm, pn, lon_rho, lat_rho, lon_u, lat_u, lon_v, lat_v, lon_psi, lat_psi, angle, mask_rho, mask_u, mask_v, mask_psi, zeta, ubar, vbar, u, v, w, temp, rho, Hsbl, AKv, AKt, AKs, shflux, ssflux, swrad, sustr, svstr, bustr, bvstr) 
## Dimensions:
## Joining, by = ".dimension_"
## # A tibble: 4 × 5
##   variable_name .variable_ .dimension_ dimension_name dimension_length
##           <chr>      <dbl>       <int>          <chr>            <int>
## 1          salt         72           0         xi_rho             1443
## 2          salt         72           4        eta_rho              392
## 3          salt         72           8          s_rho               31
## 4          salt         72          12     ocean_time               31
x %>% dimension_values() %>% group_by(name) %>% summarize_at("vals", funs(min, max))
## Joining, by = ".variable_"
## # A tibble: 4 × 3
##         name           min           max
##        <chr>         <dbl>         <dbl>
## 1    eta_rho  1.000000e+00  3.920000e+02
## 2 ocean_time  9.465984e+08  9.491904e+08
## 3      s_rho -9.838710e-01 -1.612903e-02
## 4     xi_rho  1.000000e+00  1.443000e+03
## then build up a query, returning a subsett-ed set of tables of each dimension's coordinates
trans <- x %>% filtrate(eta_rho = eta_rho < 100, xi_rho = between(xi_rho, 500, 600), ocean_time = ocean_time < 9.465984e+08 + 12, s_rho = s_rho > -0.17741935)
## Joining, by = ".variable_"
## extract the hyperslab index from the transform tables
(hslab <- bind_rows(lapply(trans, function(x) tibble(name = x$name[1], start = min(x$step), count = length(x$step)))))
## # A tibble: 4 × 3
##         name start count
##        <chr> <int> <int>
## 1     xi_rho   500   101
## 2    eta_rho     1    99
## 3      s_rho    27     5
## 4 ocean_time     1     1
## open the file and pull out that slab 
con <- ncdf4::nc_open(x$file$filename[1])
slab <- ncdf4::ncvar_get(con, nctive(x), 
                         start = hslab$start, 
                         count = hslab$count)

## much more to think about ...