Central Valley Enhanced

Acoustic Tagging Project

logo





Mokelumne Fall-run hatchery Chinook

2022-2023 Season (PROVISIONAL DATA)


Telemetry Study Template for this study can be found here



1. Project Status


Study is complete, all tags are no longer active as of 2023-06-16. All times in Pacific Standard Time.

Study began on 2023-04-19 10:00:00, see tagging details below:
Release First_release_time Last_release_time Number_fish_released Release_location Release_rkm Mean_length Mean_weight
VINO 2023-04-19 10:00:00 2023-05-10 09:40:00 348 VINO 194.1 87.2 7.8
WBW 2023-04-19 12:00:00 2023-05-10 10:30:00 345 WBW 168.4 87.1 7.8
CRC 2023-04-21 10:00:00 2023-05-05 10:00:00 410 CRC 147.0 87.4 7.8



2. Real-time Fish Detections


library(leaflet)
library(maps)
library(htmlwidgets)
library(leaflet.extras)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

## THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
   cat("No detections yet")

   gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F) %>% filter(is.na(stop))

   leaflet(data = gen_locs[is.na(gen_locs$stop),]) %>%
       # setView(-72.14600, 43.82977, zoom = 8) %>%
       addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
       addProviderTiles("Esri.WorldImagery", group = "Satellite") %>% 
       addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
       # Marker data are from the sites data frame. We need the ~ symbols
       # to indicate the columns of the data frame.
       addMarkers(~longitude, ~latitude, label = ~general_location, group = "Receiver Sites", popup = ~location) %>% 
       # addAwesomeMarkers(~lon_dd, ~lat_dd, label = ~locality, group = "Sites", icon=icons) %>%
       addScaleBar(position = "bottomleft") %>%
          addLayersControl(
          baseGroups = c("Street Map", "Satellite", "Relief"),
          overlayGroups = c("Receiver Sites"),
          options = layersControlOptions(collapsed = FALSE)) %>%
          addSearchFeatures(targetGroups = c("Receiver Sites"))
} else {

   gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)

   endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
                  max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))

   beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F) %>%
      mutate(day = as.Date(day)) %>%
      # Subset to only look at data for the correct beacon for that day
      filter(TagCode == beacon)  %>% 
      # Only keep beacon by day for days since fish were released
      filter(day >= as.Date(min(study_tagcodes$release_time)) & day <= endtime) %>%
      dplyr::left_join(., gen_locs[,c("location", "general_location","rkm")], by = "location")

   arrivals_per_day <- detects_study %>%
      group_by(general_location, TagCode) %>%
      summarise(DateTime_PST = min(DateTime_PST, na.rm = T)) %>%
      arrange(TagCode, general_location) %>%
      mutate(day = as.Date(DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8")) %>%
      group_by(day, general_location) %>%
      summarise(New_arrivals = length(TagCode)) %>%
      na.omit() %>%
      mutate(day = as.Date(day)) %>%
      dplyr::left_join(unique(beacon_by_day[,c("general_location", "day", "rkm")]), ., 
                       by = c("general_location", "day")) %>%
      arrange(general_location, day) %>%
      mutate(day = as.factor(day)) %>%
      filter(general_location != "Bench_test") %>% # Remove bench test
      filter(!(is.na(general_location))) # Remove NA locations

   ## Remove sites that were not operation the whole time
   #### FOR THE SEASONAL SURVIVAL PAGE, KEEP ALL SITES SINCE PEOPLE WANT TO SEE DETECTIONS OF LATER FISH AT NEWLY 
   #### DEPLOYED SPOTS
   gen_locs_days_in_oper <- arrivals_per_day %>%
      group_by(general_location) %>%
      summarise(days_in_oper = length(day))
   #gen_locs_days_in_oper <- gen_locs_days_in_oper[gen_locs_days_in_oper$days_in_oper ==
   #                                               max(gen_locs_days_in_oper$days_in_oper),]
   arrivals_per_day_in_oper <- arrivals_per_day %>%
      filter(general_location %in% gen_locs_days_in_oper$general_location)

   fish_per_site <- arrivals_per_day_in_oper %>%
      group_by(general_location) %>%
      summarise(fish_count = sum(New_arrivals, na.rm=T))

   gen_locs_mean_coords <- gen_locs %>%
      filter(is.na(stop) & general_location %in% fish_per_site$general_location) %>%
      group_by(general_location) %>%
      summarise(latitude = mean(latitude), # estimate mean lat and lons for each genloc
                longitude = mean(longitude))

   fish_per_site <- merge(fish_per_site, gen_locs_mean_coords)

   if(!is.na(release_stats$Release_lat[1])){
     leaflet(data = fish_per_site) %>%
       addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
       addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
       addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
       # Marker data are from the sites data frame. We need the ~ symbols
       # to indicate the columns of the data frame.
       addPulseMarkers(lng = fish_per_site$longitude, lat = fish_per_site$latitude, label = ~fish_count, 
                       labelOptions = labelOptions(noHide = T, textsize = "15px"), group = "Receiver Sites",
                       popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
       addCircleMarkers(data = release_stats, ~Release_lon, ~Release_lat, label = ~Number_fish_released, stroke = F, color = "blue", fillOpacity = 1, 
                        group = "Release Sites", popup = ~Release_location, labelOptions = labelOptions(noHide = T, textsize = "15px")) %>%
       addScaleBar(position = "bottomleft") %>%
       addLegend("bottomright", labels = c("Receivers", "Release locations"), colors = c("red","blue")) %>%
       addLayersControl(baseGroups = c("Street Map", "Satellite", "Relief"), options = layersControlOptions(collapsed = FALSE))
   } else {
     leaflet(data = fish_per_site) %>%
       addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
       addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
       addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
       # Marker data are from the sites data frame. We need the ~ symbols
       # to indicate the columns of the data frame.
       addPulseMarkers(lng = fish_per_site$longitude, lat = fish_per_site$latitude, label = ~fish_count, 
                       labelOptions = labelOptions(noHide = T, textsize = "15px"), group = "Receiver Sites",
                       popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
       addScaleBar(position = "bottomleft") %>%
       addLayersControl(baseGroups = c("Street Map", "Satellite", "Relief"),
                        options = layersControlOptions(collapsed = FALSE))
   }
}

2.1 Map of unique fish detections at operational realtime detection locations


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) > 0){

   detects_study <- detects_study[order(detects_study$TagCode, detects_study$DateTime_PST),]
   ## Now estimate the time in hours between the previous and next detection, for each detection. 
   detects_study$prev_genloc <- shift(detects_study$general_location, fill = NA, type = "lag")
   #detects_study$prev_genloc <- shift(detects_study$General_Location, fill = NA, type = "lag")
   ## Now make NA the time diff values when it's between 2 different tagcodes or genlocs
   detects_study[which(detects_study$TagCode != shift(detects_study$TagCode, fill = NA, type = "lag")), "prev_genloc"] <- NA
   detects_study[which(detects_study$general_location != detects_study$prev_genloc), "prev_genloc"] <- NA
   detects_study$mov_score <- 0
   detects_study[is.na(detects_study$prev_genloc), "mov_score"] <- 1
   detects_study$mov_counter <- cumsum(detects_study$mov_score)

   detects_summary <- aggregate(list(first_detect = detects_study$DateTime_PST), by = list(TagCode = detects_study$TagCode, length = detects_study$length, release_time = detects_study$release_time, mov_counter = detects_study$mov_counter ,general_location = detects_study$general_location, river_km = detects_study$river_km, release_rkm = detects_study$release_rkm), min)

   detects_summary <- detects_summary[is.na(detects_summary$first_detect) == F,]
   releases <- aggregate(list(first_detect = detects_summary$release_time), by = list(TagCode = detects_summary$TagCode, length = detects_summary$length, release_time = detects_summary$release_time, release_rkm = detects_summary$release_rkm), min)
   releases$river_km <- releases$release_rkm
   releases$mov_counter <- NA
   releases$general_location <- NA

   detects_summary <- rbindlist(list(detects_summary, releases), use.names = T)
   detects_summary <- detects_summary[order(detects_summary$TagCode, detects_summary$first_detect),]

   starttime <- as.Date(min(detects_study$release_time), "Etc/GMT+8")
   ## Endtime should be either now, or end of predicted tag life, whichever comes first
   endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d"))+1, max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
   #par(mar=c(6, 5, 2, 5) + 0.1)

   plot_ly(detects_summary, width = 900, height = 600, dynamicTicks = TRUE) %>%
      add_lines(x = ~first_detect, y = ~river_km, color = ~TagCode) %>%
      add_markers(x = ~first_detect, y = ~river_km, color = ~TagCode, showlegend = F) %>%
      layout(showlegend = T, 
         xaxis = list(title = "<b> Date <b>", mirror=T,ticks="outside",showline=T, range=c(starttime,endtime)),
         yaxis = list(title = "<b> Kilometers upstream of the Golden Gate <b>", mirror=T,ticks="outside",showline=T, range=c(max(detects_study$Rel_rkm)+10, min(gen_locs[is.na(gen_locs$stop),"rkm"])-10)),
         legend = list(title=list(text='<b> Tag Code </b>')),
         margin=list(l = 50,r = 100,b = 50,t = 50)
   )

}else{
   plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Kilometers upstream of the Golden Gate")
   text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}

2.2 Waterfall Detection Plot


_______________________________________________________________________________________________________

library(tidyr)

try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

detects_3 <- detects_study %>% filter(general_location == "Benicia_west" | general_location == "Benicia_east")

if(nrow(detects_3) == 0){
   plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
   text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
  detects_3 <- detects_3 %>%
    dplyr::left_join(., detects_3 %>%
                        group_by(TagCode) %>% 
                        summarise(first_detect = min(DateTime_PST))) %>%
                        mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))

  starttime <- as.Date(min(detects_3$release_time), "Etc/GMT+8")

  # Endtime should be either now, or end of predicted tag life, whichever comes first
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
                 max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))

  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))

  rels            <- unique(study_tagcodes$Release)
  rel_num         <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_3$Release))])

  tagcount1 <- detects_3 %>%
               group_by(Day, Release) %>%
               summarise(unique_tags = length(unique(TagCode))) %>%
               spread(Release, unique_tags)

  daterange1 <- merge(daterange, tagcount1, all.x=T)
  daterange1[is.na(daterange1)] <- 0

  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == "x"] <- paste(i)
    }
  }

  ## reorder columns in alphabetical order so its coloring in barplots is consistent
  daterange1 <- daterange1[,order(colnames(daterange1))]
  daterange2 <- daterange1
  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL

  par(mar=c(6, 5, 2, 5) + 0.1)

  daterange2$Date <- as.Date(row.names(daterange2))
  daterange3      <- melt(daterange2, id.vars = "Date", variable.name = ".", )
  daterange3$.    <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))

  plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
    add_bars(x = ~Date, y = ~value, color = ~.) %>%
    add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
                     x=0.01, xanchor="left",
                     y=1.056, yanchor="top",    # Same y as legend below
                     legendtitle=TRUE, showarrow=FALSE ) %>%
    layout(showlegend = T, 
           barmode = "stack",
           xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T), 
           yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
           legend = list(orientation = "h",x = 0.34, y = 1.066),
           margin=list(l = 50,r = 100,b = 50,t = 50))
}

2.3 Detections at Benicia Bridge for duration of tag life



3. Survival and Routing Probability


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

try(benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F))

detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
endtime         <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))

if(nrow(detects_benicia) == 0){
  if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
    WR.surv <- data.frame("Release"="ALL", "estimate"=0, "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)

  } else {
    WR.surv <- data.frame("Release"=NA, "estimate"="NO DETECTIONS YET", "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
  }

  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
  print(kable(WR.surv1, row.names = F, "html", caption = "3.1 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))

} else if(length(table(detects_benicia$general_location)) == 1){
  if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
    WR.surv <- data.frame("Release"="ALL", "estimate"=round(length(unique(detects_benicia$TagCode))/length(unique(detects_study$TagCode))*100,1),
                          "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)

  } else {
    WR.surv <- data.frame("Release" = NA, "estimate" = "NOT ENOUGH DETECTIONS", "se" = NA, "lcl" = NA, "ucl" = NA, "Detection_efficiency" = NA)
  }

  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
  print(kable(WR.surv1, row.names = F, "html", caption = "3.1 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
         kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))

} else {
  # Only do survival to Benicia here
  test3 <- detects_study[which(detects_study$river_km < 53),]

  # calculate mean and SD travel time
  travel <- aggregate(list(first_detect = test3$DateTime_PST), by = list(Release = test3$Release, TagCode = test3$TagCode, RelDT = test3$RelDT), min)
  travel$days <- as.numeric(difftime(travel$first_detect, travel$RelDT, units = "days"))

  travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
  travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
  travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
  travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days), n = nrow(travel)))

  # Create inp for survival estimation
  inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ river_km, fun.aggregate = length))

  # Sort columns by river km in descending order
  # Count number of genlocs
  gen_loc_sites <- ncol(inp)-1

  inp  <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
  inp  <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
  inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]

  inp2[is.na(inp2)] <- 0
  inp2[inp2 > 0]    <- 1

  inp    <- cbind(inp, inp2)
  groups <- as.character(sort(unique(inp$Release)))
  groups_w_detects <- names(table(test3$Release))

  inp[,groups] <- 0

  for(i in groups){
    inp[as.character(inp$Release) == i, i] <- 1
  }

  inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")

  if(length(groups) > 1){
    # make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
    inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)

    WR.process <- process.data(inp.df, model="CJS", begin.time=1)

    WR.ddl <- make.design.data(WR.process)

    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)

    inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
    inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)

    if(length(groups_w_detects) > 1){
      WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
      WR.ddl <- make.design.data(WR.process)
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)

    } else {
      WR.process <- process.data(inp.df, model="CJS", begin.time=1)
      WR.ddl <- make.design.data(WR.process)
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    }

    WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
    WR.surv.rel <- cbind(Release = groups_w_detects, round(WR.mark.rel$results$real[seq(from=1,to=length(groups_w_detects)*2,by = 2),
                                                                                    c("estimate", "se", "lcl", "ucl")] * 100,1))
    WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
    WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
    WR.surv <- rbind(WR.surv, WR.surv.rel)

  } else {
    inp.df      <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)
    WR.process  <- process.data(inp.df, model="CJS", begin.time=1) 
    WR.ddl      <- make.design.data(WR.process)
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    WR.surv     <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
  }

  WR.surv$Detection_efficiency <- NA
  WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
  WR.surv1 <- WR.surv

  colnames(WR.surv1)[1] <- "Release"
  WR.surv1 <- merge(WR.surv1, travel_final, by = "Release", all.x = T)
  WR.surv1$mean_travel_time <- round(WR.surv1$mean_travel_time,1)
  WR.surv1$sd_travel_time <- round(WR.surv1$sd_travel_time,1)
  colnames(WR.surv1) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", 
                          "95% upper C.I.", "Detection efficiency (%)", "Mean time to Benicia (days)", "SD of time to Benicia (days)", "Count")
  #colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")

  print(kable(WR.surv1, row.names = F, "html", caption = "3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model), and travel time") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}
3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model), and travel time
Release Survival (%) SE 95% lower C.I. 95% upper C.I. Detection efficiency (%) Mean time to Benicia (days) SD of time to Benicia (days) Count
ALL 14.8 1.1 12.8 17.0 98.1 20.1 7.3 163
CRC 16.4 1.8 13.1 20.3 NA 18.1 7.8 67
VINO 14.6 1.9 11.3 18.7 NA 21.8 6.2 51
WBW 13.0 1.8 9.9 17.0 NA 21.3 7.3 45
if(exists("benicia")==T & is.numeric(WR.surv1[1,2])){
  # Find mean release time per release group, and ALL
  reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
  reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))

  # Assign whether the results are tentative or final
  quality <- "tentative"
  if(endtime < as.Date(format(Sys.time(), "%Y-%m-%d"))){
    quality <- "final"
  }

  WR.surv       <- merge(WR.surv, reltimes, by = "Release", all.x = T)
  WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = "1970-01-01")
  benicia$RelDT <- as.POSIXct(benicia$RelDT)

  # remove old benicia record for this studyID
  benicia <- benicia[!benicia$StudyID == unique(detects_study$Study_ID),]
  benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))

  write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F) 
}



4. Detections statistics at all realtime receivers


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  "No detections yet"

} else {
  arrivals <- detects_study %>%
              group_by(general_location, TagCode) %>%
              summarise(DateTime_PST = min(DateTime_PST)) %>%
              arrange(TagCode)

  tag_stats <- arrivals %>%
               group_by(general_location) %>%
               summarise(First_arrival = min(DateTime_PST),
                         Mean_arrival = mean(DateTime_PST),
                         Last_arrival = max(DateTime_PST),
                         Fish_count = length(unique(TagCode))) %>%
               mutate(Percent_arrived = round(Fish_count/nrow(study_tagcodes) * 100,2)) %>%
               dplyr::left_join(., unique(detects_study[,c("general_location", "river_km")])) %>%
               arrange(desc(river_km)) %>%
               mutate(First_arrival = format(First_arrival, tz = "Etc/GMT+8"),
                      Mean_arrival = format(Mean_arrival, tz = "Etc/GMT+8"),
                      Last_arrival = format(Last_arrival, tz = "Etc/GMT+8")) %>%
               na.omit()

  print(kable(tag_stats, row.names = F,
              caption = "4.1 Detections for all releases combined",
              "html") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))

  count <- 0

  for(j in sort(unique(study_tagcodes$Release))){

    if(nrow(detects_study[detects_study$Release == j,]) > 0){
      count <- count + 1
      arrivals1 <- detects_study %>%
                   filter(Release == j) %>%
                   group_by(general_location, TagCode) %>%
                   summarise(DateTime_PST = min(DateTime_PST)) %>%
                   arrange(TagCode)

      rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])

      tag_stats1 <- arrivals1 %>%
                    group_by(general_location) %>%
                    summarise(First_arrival = min(DateTime_PST),
                              Mean_arrival = mean(DateTime_PST),
                              Last_arrival = max(DateTime_PST),
                              Fish_count = length(unique(TagCode))) %>%
                    mutate(Percent_arrived = round(Fish_count/rel_count * 100,2)) %>%
                    dplyr::left_join(., unique(detects_study[,c("general_location", "river_km")])) %>%
                    arrange(desc(river_km)) %>%
                    mutate(First_arrival = format(First_arrival, tz = "Etc/GMT+8"),
                           Mean_arrival = format(Mean_arrival, tz = "Etc/GMT+8"),
                           Last_arrival = format(Last_arrival, tz = "Etc/GMT+8")) %>%
                    na.omit()

      final_stats <- kable(tag_stats1, row.names = F,
            caption = paste("4.2.", count, " Detections for ", j, " release groups", sep = ""),
            "html")
      print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))

    } else {
      cat("\n\n\\pagebreak\n")
      print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
      cat("\n\n\\pagebreak\n")
    }
  }
}
4.1 Detections for all releases combined
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
MeridianBr 2023-04-27 17:05:47 2023-04-27 17:05:47 2023-04-27 17:05:47 1 0.09 290.848
TowerBridge 2023-04-24 12:04:05 2023-05-08 02:39:46 2023-06-06 02:35:59 5 0.45 172.000
I80-50_Br 2023-04-24 11:31:01 2023-05-11 03:45:08 2023-06-06 01:40:13 7 0.63 170.748
Holland_Cut_Quimby 2023-05-14 10:33:00 2023-05-14 10:33:00 2023-05-14 10:33:00 1 0.09 145.000
Old_River_Quimby 2023-04-28 23:41:50 2023-05-04 18:42:01 2023-05-10 13:42:13 2 0.18 141.000
Sac_BlwGeorgiana 2023-04-29 20:45:00 2023-04-29 20:45:00 2023-04-29 20:45:00 1 0.09 119.058
Sac_BlwGeorgiana2 2023-04-29 20:52:05 2023-04-29 20:52:05 2023-04-29 20:52:05 1 0.09 118.398
Benicia_east 2023-04-27 12:54:33 2023-05-18 06:46:14 2023-06-11 03:52:49 160 14.49 52.240
Benicia_west 2023-04-27 12:58:13 2023-05-18 08:29:34 2023-06-11 04:03:55 157 14.22 52.040
4.2.1 Detections for CRC release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
MeridianBr 2023-04-27 17:05:47 2023-04-27 17:05:47 2023-04-27 17:05:47 1 0.24 290.848
TowerBridge 2023-04-24 12:04:05 2023-04-30 20:40:43 2023-05-13 13:02:22 4 0.98 172.000
I80-50_Br 2023-04-24 11:31:01 2023-04-30 19:47:58 2023-05-13 12:06:57 4 0.98 170.748
Holland_Cut_Quimby 2023-05-14 10:33:00 2023-05-14 10:33:00 2023-05-14 10:33:00 1 0.24 145.000
Old_River_Quimby 2023-04-28 23:41:50 2023-04-28 23:41:50 2023-04-28 23:41:50 1 0.24 141.000
Benicia_east 2023-04-28 14:36:08 2023-05-15 18:20:17 2023-06-06 10:24:45 65 15.85 52.240
Benicia_west 2023-04-28 14:43:37 2023-05-16 03:48:26 2023-06-06 10:25:31 63 15.37 52.040
4.2.2 Detections for VINO release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
TowerBridge 2023-06-06 02:35:59 2023-06-06 02:35:59 2023-06-06 02:35:59 1 0.29 172.000
I80-50_Br 2023-06-06 01:40:13 2023-06-06 01:40:13 2023-06-06 01:40:13 1 0.29 170.748
Benicia_east 2023-04-27 12:54:33 2023-05-20 12:39:34 2023-06-06 06:54:59 51 14.61 52.240
Benicia_west 2023-04-27 12:58:13 2023-05-20 09:34:11 2023-06-06 06:56:20 49 14.04 52.040
4.2.3 Detections for WBW release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
I80-50_Br 2023-05-14 12:20:45 2023-05-18 20:41:57 2023-05-23 05:03:10 2 0.58 170.748
Old_River_Quimby 2023-05-10 13:42:13 2023-05-10 13:42:13 2023-05-10 13:42:13 1 0.29 141.000
Sac_BlwGeorgiana 2023-04-29 20:45:00 2023-04-29 20:45:00 2023-04-29 20:45:00 1 0.29 119.058
Sac_BlwGeorgiana2 2023-04-29 20:52:05 2023-04-29 20:52:05 2023-04-29 20:52:05 1 0.29 118.398
Benicia_east 2023-04-28 12:29:55 2023-05-19 09:35:00 2023-06-11 03:52:49 44 12.75 52.240
Benicia_west 2023-04-28 12:34:27 2023-05-19 04:48:47 2023-06-11 04:03:55 45 13.04 52.040


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

# THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  "No detections yet"

} else {
  arrivals <- detects_study %>%
              group_by(general_location, TagCode) %>%
              summarise(DateTime_PST = min(DateTime_PST)) %>%
              mutate(day = as.Date(DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8"))

  gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)

  beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F) %>%
                   mutate(day = as.Date(day)) %>%
                   filter(TagCode == beacon) %>% # Now subset to only look at data for the correct beacon for that day
                   filter(day >= as.Date(min(study_tagcodes$release_time)) & 
                          day <= endtime) %>% # Now only keep beacon by day for days since fish were released
                   dplyr::left_join(., gen_locs[,c("location", "general_location","rkm")], by = "location")

  arrivals_per_day <- arrivals %>%
                      group_by(day, general_location) %>%
                      summarise(New_arrivals = length(TagCode)) %>%
                      arrange(general_location) %>% na.omit() %>%
                      mutate(day = as.Date(day)) %>%
                      dplyr::left_join(unique(beacon_by_day[,c("general_location", "day", "rkm")]),
                                       ., by = c("general_location", "day")) %>%
                      arrange(general_location, day) %>%
                      mutate(day = factor(day)) %>%
                      filter(general_location != "Bench_test") %>% # Remove bench test and other NA locations
                      filter(!(is.na(general_location))) %>%
                      arrange(desc(rkm)) %>% # Change order of data to plot decreasing river_km
                      mutate(general_location = factor(general_location, unique(general_location)))

  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
                 max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))

  crosstab <- xtabs(formula = arrivals_per_day$New_arrivals ~ arrivals_per_day$day + arrivals_per_day$general_location,
                    addNA =T)
  crosstab[is.na(crosstab)] <- ""
  crosstab[crosstab==0] <- NA
  crosstab <- as.data.frame.matrix(crosstab)

  kable(crosstab, align = "c", caption = "4.3 Fish arrivals per day (\"NA\" means receivers were non-operational)") %>%
    kable_styling(c("striped", "condensed"), font_size = 11, full_width = F, position = "left", fixed_thead = TRUE) %>%
    column_spec(column = 1:ncol(crosstab),width_min = "50px",border_left = T, border_right = T) %>%
    column_spec(1, bold = T, width_min = "75px")%>%
    scroll_box(height = "700px")
}
4.3 Fish arrivals per day (“NA” means receivers were non-operational)
Blw_Salt_RT MeridianBr TowerBridge I80-50_Br MiddleRiver Clifton_Court_US_Radial_Gates Holland_Cut_Quimby CVP_Tank CVP_Trash_Rack_1 Clifton_Court_Intake_Canal Old_River_Quimby Sac_BlwGeorgiana Sac_BlwGeorgiana2 Benicia_east Benicia_west
2023-04-19
2023-04-20
2023-04-21
2023-04-22
2023-04-23
2023-04-24 1 1
2023-04-25
2023-04-26
2023-04-27 1 1 1 1 1
2023-04-28 1 1 1 2 2
2023-04-29 1 1 2 2
2023-04-30 1 1
2023-05-01 2 2
2023-05-02 2 2
2023-05-03
2023-05-04
2023-05-05
2023-05-06
2023-05-07 1 1
2023-05-08 1 1
2023-05-09 3 3
2023-05-10 1 13 12
2023-05-11 10 10
2023-05-12 8 8
2023-05-13 1 1 8 7
2023-05-14 1 1 8 9
2023-05-15 10 10
2023-05-16 8 8
2023-05-17 9 8
2023-05-18 4 5
2023-05-19 3 3
2023-05-20 9 9
2023-05-21 4 3
2023-05-22 6 5
2023-05-23 1 4 4
2023-05-24 3 3
2023-05-25 2 1
2023-05-26 7 7
2023-05-27 5 5
2023-05-28 5 5
2023-05-29 4 4
2023-05-30 2 2
2023-05-31
2023-06-01 1 2
2023-06-02 2 2
2023-06-03 2 2
2023-06-04 4 4
rm(list = ls())
cleanup(ask = F)



For questions or comments, please contact