library(tidyverse)
library(jsonlite)
EXPERIMENT_ROOT <- 'data/codex/parsed'
experiment_file <- function(filename) file.path(EXPERIMENT_ROOT, filename)
experiment_meta <- jsonlite::read_json(fs::dir_ls(EXPERIMENT_ROOT, glob='*.jsonl'))
n_leechers <- length(experiment_meta$nodes$nodes) - experiment_meta$seeders
cat(paste0("Network has ", length(experiment_meta$nodes$nodes), " nodes with ", experiment_meta$seeders, " seeders and ", n_leechers, " leechers."))
Network has 10 nodes with 4 seeders and 6 leechers.
Read and extract run id and seed set from the dataset name.
downloads <- read_csv(
fs::dir_ls(EXPERIMENT_ROOT, glob='*download*'),
show_col_types = FALSE,
) |>
mutate(
temp = str_remove(dataset_name, '^dataset-'),
seed_set = as.numeric(str_extract(temp, '^\\d+')),
run = as.numeric(str_extract(temp, '\\d+$')),
value = value * experiment_meta$download_metric_unit_bytes
) |>
select(-temp, -name)
Since what we get are piece indices and they might be out of order, we need to actually count how many pieces were downloaded by the node up until a given instant:
downloads <- downloads |>
group_by(node, seed_set, run) |>
arrange(timestamp) |>
mutate(
piece_count = seq_along(timestamp)
) |>
ungroup() |>
mutate(completed = value / experiment_meta$file_size)
We can have a brief look at the data to see that it makes sense.
ggplot(downloads |>
filter(seed_set < 3) |>
group_by(seed_set, run) |>
mutate(timestamp = as.numeric(timestamp - min(timestamp))) |>
ungroup()) +
geom_line(aes(x = timestamp, y = completed, col = node), lwd=0.7) +
scale_y_continuous(labels = scales::percent_format()) +
facet_grid(run ~ seed_set, labeller = labeller(
run = as_labeller(function(x) paste0("run: ", x)),
seed_set = as_labeller(function(x) paste0("seed set: ", x)))) +
xlab('elapsed time (seconds)') +
ylab('download completion (%)') +
theme_bw(base_size = 15)
As we can see, the data seems to make sense. To the left we see the “download times” for seeders, which is almost instantaneous, followed by the downloads for the leechers. We see some variability across experiments, with some nodes seemingly struggling to complete their downloads at times.
Have any nodes failed to download the entire file?
downloads |>
group_by(node, seed_set, run) |>
summarise(completed = max(completed)) |>
filter(completed < 1.0)
`summarise()` has grouped output by 'node', 'seed_set'. You can override using
the `.groups` argument.
Do we have as many runs and seed sets as we expect?
downloads |>
select(seed_set, node, run) |>
distinct() |>
group_by(seed_set, node) |>
count() |>
filter(n != experiment_meta$repetitions)
We define the download time for a Deluge node \(d\) as the time elapsed from the client’s response to an \(\addtorrent\) request and the time at which the client reports having received the last piece of the downloaded file. Since seeders are already in possession of the file by construction, we only measure download times at leechers.
add_torrent_requests <- read_csv(
experiment_file('request_event.csv'), show_col_types = FALSE) |>
mutate(destination = gsub("\"", "", destination))
download_start <- add_torrent_requests |>
select(-request_id) |>
filter(name == 'leech', type == 'RequestEventType.end') |>
mutate(
# We didn't log those on the runner side so I have to reconstruct them.
run = rep(rep(
1:experiment_meta$repetitions - 1,
each = n_leechers), times=experiment_meta$seeder_sets),
seed_set = rep(
1:experiment_meta$seeder_sets - 1,
each = n_leechers * experiment_meta$repetitions),
) |>
transmute(node = destination, run, seed_set, seed_request_time = timestamp)
download_times <- downloads |>
left_join(download_start, by = c('node', 'run', 'seed_set')) |>
mutate(
elapsed_download_time = as.numeric(timestamp - seed_request_time)
) |>
group_by(node, run, seed_set) |>
mutate(lookup_time = as.numeric(min(timestamp) - seed_request_time)) |>
ungroup()
If we did this right, the elapsed download time can never be negative, and neither can the lookup time.
download_times |> filter(elapsed_download_time < 0 | lookup_time < 0)
We can now actually compute statistics on the download times.
download_time_stats <- download_times |>
filter(!is.na(elapsed_download_time)) |>
group_by(piece_count, completed) |>
summarise(
mean = mean(elapsed_download_time),
median = median(elapsed_download_time),
max = max(elapsed_download_time),
min = min(elapsed_download_time),
p90 = quantile(elapsed_download_time, p = 0.9),
p10 = quantile(elapsed_download_time, p = 0.1),
.groups = 'drop'
)
ggplot(download_time_stats) +
geom_ribbon(aes(xmin = p10, xmax = p90, y = completed),
fill = scales::alpha('blue', 0.5), col = 'lightgray') +
geom_line(aes(x = median, y = completed)) +
theme_minimal() +
ylab("completion") +
xlab("time (seconds)") +
ggtitle(paste0('download time (Codex, ',rlang::as_bytes(experiment_meta$file_size),' file)'))