try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
library(knitr)
library(kableExtra)
library(lubridate)
library(data.table)
library(ggplot2)
library(RMark)
library(scales)
library(viridis)
library(forcats)
library(reshape2)
library(png)
library(dataRetrieval)
library(rerddap)
library(plotly)
study <- "Georg_Sl_Barrier_2025_Feb"
detects_study <- fread("study_detections.csv", stringsAsFactors = F,
                       colClasses = c(DateTime_PST = "character", RelDT = "character")) %>%
   filter(Study_ID == study) %>%
   mutate(DateTime_PST = as.POSIXct(DateTime_PST, format = "%Y-%m-%d %H:%M:%S", tz="Etc/GMT+8"),
          release_time = as.POSIXct(RelDT, format = "%Y-%m-%d %H:%M:%S", tz="Etc/GMT+8")) %>%
   rename(., weight=Weight, length=Length, release_rkm=Rel_rkm, release_location=Rel_loc, river_km=rkm)
latest <- read.csv("latest_download.csv", stringsAsFactors = F)$x
##################################################################################################################
#### TO RUN THE FOLLOWING CODE CHUNKS FROM HERE ON DOWN USING R ERDDAP, UN-COMMENT THESE NEXT 9 LINES OF CODE ####
##################################################################################################################
# cache_delete_all()
# query = paste('&','Study_ID','="', study, '"', sep = '')
# datafile=URLencode(paste("https://oceanview.pfeg.noaa.gov/erddap/tabledap/","FEDcalFishTrack",".csv?",query,sep = ""))
# options(url.method = "libcurl", download.file.method = "libcurl", timeout = 180)
# detects_study <- data.frame(read.csv(datafile,row.names = NULL, stringsAsFactors = F))
# detects_study <- detects_study[-1,]
# detects_study$DateTime_PST <- as.POSIXct(detects_study$local_time, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")
# detects_study$release_time <- as.POSIXct(detects_study$release_time, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")
# detects_study$river_km <- as.numeric(detects_study$river_km)
##################################################################################################################Study is complete, all tags are no longer active as of 2025-05-31. All times in Pacific Standard Time.
if (nrow(detects_study) == 0){
   cat("Study has not yet begun  ")
} else {
   if (min(detects_study$release_time) > Sys.time()){
      cat("Study has not yet begun, below data is a placeholder:  ")
   }
   if (min(detects_study$release_time) < Sys.time()){
      cat(paste("Study began on ", min(detects_study$release_time), ", see tagging details below:", sep = ""))
   }
   ######################################
   #### RELEASE GROUPS ASSIGNED HERE ####
   ######################################
   detects_study$Release <- detects_study$release_location
   study_tagcodes <- as.data.frame(unique(detects_study[,c("TagCode", "release_time", "weight", "length", "release_rkm",
                                                        "release_location", "Release","Rel_latitude","Rel_longitude")]))
   release_stats <- study_tagcodes %>%
      group_by(Release) %>%
      summarise(First_release_time = min(release_time),
                Last_release_time = max(release_time),
                Number_fish_released = length(unique(TagCode)),
                Release_location = head(release_location, 1),
                Release_rkm = head(release_rkm,1),
                Mean_length = mean(length, na.rm=T),
                Mean_weight = mean(weight, na.rm=T),
                Release_lat = head(Rel_latitude,1),
                Release_lon = head(Rel_longitude,1)) %>%
      mutate(Mean_length = round(Mean_length, 1),
             Mean_weight = round(Mean_weight, 1),
             First_release_time = format(First_release_time, tz = "Etc/GMT+8"),
             Last_release_time = format(Last_release_time, tz = "Etc/GMT+8")) %>%
      arrange(First_release_time)
   release_stats_all <- study_tagcodes %>%
     summarise(First_release_time = min(release_time),
               Last_release_time = max(release_time),
               Number_fish_released = length(unique(TagCode)),
               Release_location = NA,
               Release_rkm = mean(release_rkm,na.rm = T),
               Mean_length = mean(length, na.rm=T),
               Mean_weight = mean(weight, na.rm=T),
               Release_lat = head(Rel_latitude,1),
               Release_lon = head(Rel_longitude,1)) %>%
     mutate(Mean_length = round(Mean_length, 1),
            Mean_weight = round(Mean_weight, 1),
            Release_rkm = round(Release_rkm,1),
            First_release_time = format(First_release_time, tz = "Etc/GMT+8"),
            Last_release_time = format(Last_release_time, tz = "Etc/GMT+8"))
   release_stats <- rbind(release_stats, data.frame(Release = "ALL", release_stats_all))
kable(release_stats[,!names(release_stats)%in% c("Release_lon","Release_lat","release_location")], format = "html", row.names = F) %>%
  kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left")
}| Release | First_release_time | Last_release_time | Number_fish_released | Release_location | Release_rkm | Mean_length | Mean_weight | 
|---|---|---|---|---|---|---|---|
| Sacramento_Miller_Park | 2025-02-12 17:04:00 | 2025-02-14 15:40:00 | 240 | Sacramento_Miller_Park | 162.6 | 168.4 | 56.4 | 
| ALL | 2025-02-12 17:04:00 | 2025-02-14 15:40:00 | 240 | NA | 162.6 | 168.4 | 56.4 | 
library(leaflet)
library(maps)
library(htmlwidgets)
library(leaflet.extras)
library(dplyr)
library(dbplyr)
library(DBI)
library(odbc)
library(data.table)
# Create connection with cloud database
con <- dbConnect(odbc(),
                Driver = "SQL Server",
                Server = "calfishtrack-server.database.windows.net",
                Database = "realtime_detections",
                UID = "realtime_user",
                PWD = "Pass@123",
                Port = 1433)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
## THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
   cat("No detections yet")
   # Use dbplyr to load realtime_locs and qryHexCodes sql table
   gen_locs <- tbl(con, "realtime_locs") %>% collect()
   # gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F) %>% filter(is.na(stop))
   leaflet(data = gen_locs[is.na(gen_locs$stop),]) %>%
       # setView(-72.14600, 43.82977, zoom = 8) %>%
       addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
       addProviderTiles("Esri.WorldImagery", group = "Satellite") %>% 
       addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
       # Marker data are from the sites data frame. We need the ~ symbols
       # to indicate the columns of the data frame.
       addMarkers(~longitude, ~latitude, label = ~general_location, group = "Receiver Sites", popup = ~location) %>% 
       # addAwesomeMarkers(~lon_dd, ~lat_dd, label = ~locality, group = "Sites", icon=icons) %>%
       addScaleBar(position = "bottomleft") %>%
          addLayersControl(
          baseGroups = c("Street Map", "Satellite", "Relief"),
          overlayGroups = c("Receiver Sites"),
          options = layersControlOptions(collapsed = FALSE)) %>%
          addSearchFeatures(targetGroups = c("Receiver Sites"))
} else {
   # Use dbplyr to load realtime_locs and qryHexCodes sql table
   gen_locs <- tbl(con, "realtime_locs") %>% collect()
   # gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
   endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
                  max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
   beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F) %>%
      mutate(day = as.Date(day)) %>%
      # Subset to only look at data for the correct beacon for that day
      filter(TagCode == beacon)  %>% 
      # Only keep beacon by day for days since fish were released
      filter(day >= as.Date(min(study_tagcodes$release_time)) & day <= endtime) %>%
      dplyr::left_join(., gen_locs[,c("location", "general_location","rkm")], by = "location")
   arrivals_per_day <- detects_study %>%
      group_by(general_location, TagCode) %>%
      summarise(DateTime_PST = min(DateTime_PST, na.rm = T)) %>%
      arrange(TagCode, general_location) %>%
      mutate(day = as.Date(DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8")) %>%
      group_by(day, general_location) %>%
      summarise(New_arrivals = length(TagCode)) %>%
      na.omit() %>%
      mutate(day = as.Date(day)) %>%
      dplyr::left_join(unique(beacon_by_day[,c("general_location", "day", "rkm")]), ., 
                       by = c("general_location", "day")) %>%
      arrange(general_location, day) %>%
      mutate(day = as.factor(day)) %>%
      filter(general_location != "Bench_test") %>% # Remove bench test
      filter(!(is.na(general_location))) # Remove NA locations
   ## Remove sites that were not operation the whole time
   #### FOR THE SEASONAL SURVIVAL PAGE, KEEP ALL SITES SINCE PEOPLE WANT TO SEE DETECTIONS OF LATER FISH AT NEWLY 
   #### DEPLOYED SPOTS
   gen_locs_days_in_oper <- arrivals_per_day %>%
      group_by(general_location) %>%
      summarise(days_in_oper = length(day))
   #gen_locs_days_in_oper <- gen_locs_days_in_oper[gen_locs_days_in_oper$days_in_oper ==
   #                                               max(gen_locs_days_in_oper$days_in_oper),]
   arrivals_per_day_in_oper <- arrivals_per_day %>%
      filter(general_location %in% gen_locs_days_in_oper$general_location)
   fish_per_site <- arrivals_per_day_in_oper %>%
      group_by(general_location) %>%
      summarise(fish_count = sum(New_arrivals, na.rm=T))
   gen_locs_mean_coords <- gen_locs %>%
      filter(is.na(stop) & general_location %in% fish_per_site$general_location) %>%
      group_by(general_location) %>%
      summarise(latitude = mean(latitude), # estimate mean lat and lons for each genloc
                longitude = mean(longitude))
   fish_per_site <- merge(fish_per_site, gen_locs_mean_coords)
   release_stats_agg <- aggregate(cbind(Release_lon, Release_lat) ~ Release_location, data = release_stats[release_stats$Release != "ALL",], FUN = mean)
   release_stats_agg <- merge(release_stats_agg, aggregate(Number_fish_released ~ Release_location, data = release_stats[release_stats$Release != "ALL",], FUN = sum))
   if(!is.na(release_stats$Release_lat[1])){
     leaflet(data = fish_per_site) %>%
       addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
       addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
       addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
       # Marker data are from the sites data frame. We need the ~ symbols
       # to indicate the columns of the data frame.
       addPulseMarkers(data = fish_per_site[seq(from = 1, to = nrow(fish_per_site), by = 2),], lng = ~longitude, lat = ~latitude, label = ~fish_count, 
                       labelOptions = labelOptions(noHide = T, direction = "left", textsize = "15px"), group = "Receiver Sites",
                       popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
       addPulseMarkers(data = fish_per_site[seq(from = 2, to = nrow(fish_per_site), by = 2),], lng = ~longitude, lat = ~latitude, label = ~fish_count, 
                       labelOptions = labelOptions(noHide = T, direction = "right", textsize = "15px"), group = "Receiver Sites",
                       popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
       addCircleMarkers(data = release_stats_agg, ~Release_lon, ~Release_lat, label = ~Number_fish_released, stroke = F, color = "blue", fillOpacity = 1, 
                        group = "Release Sites", popup = ~Release_location, labelOptions = labelOptions(noHide = T, textsize = "15px")) %>%
       addScaleBar(position = "bottomleft") %>%
       addLegend("bottomright", labels = c("Receivers", "Release locations"), colors = c("red","blue")) %>%
       addLayersControl(baseGroups = c("Street Map", "Satellite", "Relief"), options = layersControlOptions(collapsed = FALSE))
   } else {
     leaflet(data = fish_per_site) %>%
       addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
       addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
       addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
       # Marker data are from the sites data frame. We need the ~ symbols
       # to indicate the columns of the data frame.
       addPulseMarkers(lng = fish_per_site$longitude, lat = fish_per_site$latitude, label = ~fish_count, 
                       labelOptions = labelOptions(noHide = T, textsize = "15px"), group = "Receiver Sites",
                       popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
       addScaleBar(position = "bottomleft") %>%
       addLayersControl(baseGroups = c("Street Map", "Satellite", "Relief"),
                        options = layersControlOptions(collapsed = FALSE))
   }
}2.1 Map of unique fish detections at operational realtime detection locations
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) > 0){
   detects_study <- detects_study[order(detects_study$TagCode, detects_study$DateTime_PST),]
   ## Now estimate the time in hours between the previous and next detection, for each detection. 
   detects_study$prev_genloc <- shift(detects_study$general_location, fill = NA, type = "lag")
   #detects_study$prev_genloc <- shift(detects_study$General_Location, fill = NA, type = "lag")
   ## Now make NA the time diff values when it's between 2 different tagcodes or genlocs
   detects_study[which(detects_study$TagCode != shift(detects_study$TagCode, fill = NA, type = "lag")), "prev_genloc"] <- NA
   detects_study[which(detects_study$general_location != detects_study$prev_genloc), "prev_genloc"] <- NA
   detects_study$mov_score <- 0
   detects_study[is.na(detects_study$prev_genloc), "mov_score"] <- 1
   detects_study$mov_counter <- cumsum(detects_study$mov_score)
   detects_summary <- aggregate(list(first_detect = detects_study$DateTime_PST), by = list(TagCode = detects_study$TagCode, release_time = detects_study$release_time, mov_counter = detects_study$mov_counter ,general_location = detects_study$general_location, river_km = detects_study$river_km, release_rkm = detects_study$release_rkm), min)
   detects_summary <- detects_summary[is.na(detects_summary$first_detect) == F,]
   releases <- aggregate(list(first_detect = detects_study$release_time), by = list(TagCode = detects_study$TagCode, release_time = detects_study$release_time, release_location=detects_study$release_location, release_rkm = detects_study$release_rkm), min)
   releases$river_km <- releases$release_rkm
   releases$mov_counter <- NA
   releases$general_location <- paste(releases$release_location,"_RELEASE",sep = "")
   releases$release_location <- NULL
   detects_summary <- rbindlist(list(detects_summary, releases), use.names = T)
   detects_summary <- detects_summary[order(detects_summary$TagCode, detects_summary$first_detect),]
   starttime <- as.Date(min(detects_study$release_time), "Etc/GMT+8")
   ## Endtime should be either now, or end of predicted tag life, whichever comes first
   endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d"))+1, max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
   #par(mar=c(6, 5, 2, 5) + 0.1)
   plot_ly(detects_summary, x = ~first_detect, y = ~river_km, color = ~TagCode, width = 900, height = 600, dynamicTicks = TRUE, connectgaps = TRUE, mode = "lines+markers", type = "scatter",hoverinfo = "text",
      text = ~paste("</br> TagCode: ", TagCode,
                    "</br> Arrival: ", first_detect,
                    "</br> Location: ", general_location)) %>%
      layout(showlegend = T, 
         xaxis = list(title = "<b> Date <b>", mirror=T,ticks="outside",showline=T, range=c(starttime,endtime)),
         yaxis = list(title = "<b> Kilometers upstream of the Golden Gate <b>", mirror=T,ticks="outside",showline=T, range=c(max(detects_study$Rel_rkm)+10, min(gen_locs[is.na(gen_locs$stop),"rkm"])-10)),
         legend = list(title=list(text='<b> Tag Code </b>')),
         margin=list(l = 50,r = 100,b = 50,t = 50)
   )
}else{
   plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Kilometers upstream of the Golden Gate")
   text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}2.2 Waterfall Detection Plot
 _______________________________________________________________________________________________________ 
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_4 <- detects_study %>% filter(general_location == "Benicia_west" | general_location == "Benicia_east")
if(nrow(detects_4) == 0){
   plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
   text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
  detects_4 <- detects_4 %>%
    dplyr::left_join(., detects_4 %>%
                        group_by(TagCode) %>% 
                        summarise(first_detect = min(DateTime_PST))) %>%
                        mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
  starttime <- as.Date(min(detects_4$release_time), "Etc/GMT+8")
  # Endtime should be either now, or end of predicted tag life, whichever comes first
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
                 max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
  rels            <- unique(study_tagcodes$Release)
  rel_num         <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_4$Release))])
  tagcount1 <- detects_4 %>%
               group_by(Day, Release) %>%
               summarise(unique_tags = length(unique(TagCode))) %>%
               spread(Release, unique_tags)
  daterange1 <- merge(daterange, tagcount1, all.x=T)
  daterange1[is.na(daterange1)] <- 0
  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == "x"] <- paste(i)
    }
  }
  ## reorder columns in alphabetical order so its coloring in barplots is consistent
  daterange1 <- daterange1[,order(colnames(daterange1))]
  daterange2 <- daterange1
  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL
  par(mar=c(6, 5, 2, 5) + 0.1)
  daterange2$Date <- as.Date(row.names(daterange2))
  daterange3      <- melt(daterange2, id.vars = "Date", variable.name = ".", )
  daterange3$.    <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
  ay <- list(
    overlaying = "y",
    nticks = 5,
    color = "#947FFF",
    side = "right",
    title = "Flow (cfs) at Verona",
    automargin = TRUE
  )
  plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
          add_bars(x = ~Date, y = ~value, color = ~.) %>%
          add_annotations(text="Release (click on legend<br> items to isolate)", xref="paper", yref="paper",
                          x=0.01, xanchor="left",
                          y=1.02, yanchor="bottom",    # Same y as legend below
                          legendtitle=TRUE, showarrow=FALSE ) %>%
          layout(yaxis2 = ay,showlegend = T, 
          barmode = "stack",
          xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T), 
          yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
          legend = list(orientation = "h",x = 0.34, y = 1.01, yanchor="bottom", xanchor="left"),
          margin=list(l = 50,r = 100,b = 50,t = 50))
}2.3 Detections at Benicia Bridge for duration of tag life
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
route_results_possible <- FALSE
if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  results_short <- data.frame("Measure"=NA, "Estimate"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA,
                              "95% upper C.I."=NA)
  colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
  print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability
                                                               of entering Georgiana Slough") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
                        full_width = F, position = "left"))
} else {
  # Only do survival to Georg split for now
  test2 <- detects_study %>%
           filter(general_location %in% c("SacRiverWalnutGrove_2", "Sac_BlwGeorgiana",
                                          "Sac_BlwGeorgiana2", "Georg_Sl_1", "Georgiana_Slough2"))
  # We can only do a multi-state model if there is at least one detection in each route
  if(nrow(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2"),]) == 0 |
     nrow(test2[test2$general_location %in% c("Georg_Sl_1", "Georgiana_Slough2"),]) == 0){
    results_short <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA,
                                "95% upper C.I."=NA)
    colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
    print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of
                entering Georgiana Slough") %>%
            kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
                          full_width = F, position = "left"))
  } else {
    # Make tagcode character
    study_tagcodes$TagCode <- as.character(study_tagcodes$TagCode)
    # Make a crosstab query with frequencies for all tag/location combination
    test2$general_location <- factor(test2$general_location,
                                     levels = c("SacRiverWalnutGrove_2", "Sac_BlwGeorgiana",
                                                "Sac_BlwGeorgiana2", "Georg_Sl_1", "Georgiana_Slough2"))
    test2$TagCode <- factor(test2$TagCode, levels = study_tagcodes$TagCode)
    mytable <- table(test2$TagCode, test2$general_location) # A will be rows, B will be columns
    # Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections,
    # and then make another command that changes all detections=1 to 0
    mytable[mytable>0] <- "A"
    # Order in order of rkm
    mytable2 <- mytable[, c("SacRiverWalnutGrove_2", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2",
                            "Georg_Sl_1", "Georgiana_Slough2")]
    # Now sort the crosstab rows alphabetically
    mytable2 <- mytable2[order(row.names(mytable2)),]
    mytable2[which(mytable2[, "Sac_BlwGeorgiana"]=="A"), "Sac_BlwGeorgiana"]   <- "A"
    mytable2[which(mytable2[, "Sac_BlwGeorgiana2"]=="A"), "Sac_BlwGeorgiana2"] <- "A"
    mytable2[which(mytable2[, "Georg_Sl_1"]=="A"), "Georg_Sl_1"] <- "B"
    mytable2[which(mytable2[, "Georgiana_Slough2"]=="A"), "Georgiana_Slough2"] <- "B"
    # Now order the study_tagcodes table the same way
    study_tagcodes <- study_tagcodes[order(study_tagcodes$TagCode),]
    # Paste together (concatenate) the data from each column of the crosstab into one string per row, add to tagging_meta.
    # For this step, make sure both are sorted by FishID
    study_tagcodes$inp_part1 <- mytable2[,1]
    study_tagcodes$inp_partA <- apply(mytable2[,2:3],1,paste,collapse="")
    study_tagcodes$inp_partB <- apply(mytable2[,4:5],1,paste,collapse="")
    # find last detection at each genloc
    departure <- aggregate(list(depart = test2$DateTime_PST), by = list(TagCode = test2$TagCode, last_location = test2$general_location), FUN = max)
    # subset for just juncture locations
    departure <- departure[departure$last_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georg_Sl_1", "Georgiana_Slough2"),]
    # Find genloc of last known detection per tag
    last_depart    <- aggregate(list(depart = departure$depart), by = list(TagCode = departure$TagCode), FUN = max)
    last_depart1   <- merge(last_depart, departure)
    study_tagcodes <- merge(study_tagcodes, last_depart1[,c("TagCode", "last_location")], by = "TagCode", all.x = T)
    # Assume that the Sac is default pathway, and for fish that were detected in neither route, it would get a "00" in inp so does not matter anyway
    study_tagcodes$inp_final <- paste("A",study_tagcodes$inp_part1, study_tagcodes$inp_partA," 1 ;", sep = "")
    # now put in exceptions...fish that were seen in georgiana last
    study_tagcodes[study_tagcodes$last_location %in% c("Georg_Sl_1", "Georgiana_Slough2"), "inp_final"] <-
       paste("A",
             study_tagcodes[study_tagcodes$last_location %in% c("Georg_Sl_1", "Georgiana_Slough2"), "inp_part1"],
             study_tagcodes[study_tagcodes$last_location %in% c("Georg_Sl_1", "Georgiana_Slough2"), "inp_partB"],
             " 1 ;", 
             sep = "")
    # At this point, some fish might not have been deemed to ever take a route based on last visit analysis. If so, model cannot be run
    if(any(grepl(pattern = "A", study_tagcodes$inp_final)==T) & any(grepl(pattern = "B", study_tagcodes$inp_final)==T)){
      write.table(study_tagcodes$inp_final,"WRinp_multistate.inp",row.names = F, col.names = F, quote = F)
      WRinp <- convert.inp("WRinp_multistate.inp")
      dp <- process.data(WRinp, model="Multistrata") 
      ddl <- make.design.data(dp)
      #### p ####
      # Cannott be seen at 2B (walnut grove) and currently second georg is missing so set to 0 as well and to 1 for first line
      ddl$p$fix = NA
      ddl$p$fix[ddl$p$stratum == "B" & ddl$p$time %in% c(2,4)] = 0
      ddl$p$fix[ddl$p$stratum == "B" & ddl$p$time %in% c(3)] = 1
      #### Psi ####
      # Only 1 transition allowed:
      # from A to B at time interval 4 to 5
      ddl$Psi$fix = 0
      # A to B can only happen for interval 3-4
      ddl$Psi$fix[ddl$Psi$stratum == "A"&
                  ddl$Psi$tostratum == "B" & 
                  ddl$Psi$time == 2] = NA
      #### Phi a.k.a. S ####
      ddl$S$fix = NA
      # None in B for reaches 1,2 and fixing it to 1 for 3 (between two georg lines). All getting fixed to 1
      ddl$S$fix[ddl$S$stratum == "B" & ddl$S$time %in% c(1,2,3)] = 1
      # For route A, fixing it to 1 for 5 (between two blw_georg lines)
      ddl$S$fix[ddl$S$stratum == "A" & ddl$S$time == 3] = 1
      # We use -1 at beginning of formula to remove intercept. This is because different routes probably should not share the same intercept
      p.timexstratum   = list(formula=~-1+stratum:time)
      Psi.stratumxtime = list(formula=~-1+stratum:time)
      S.stratumxtime   = list(formula=~-1+stratum:time)
      # Run model a first time
      S.timexstratum.p.timexstratum.Psi.timexstratum = mark(dp, ddl,
                                                            model.parameters = list(S = S.stratumxtime,p = p.timexstratum,Psi = Psi.stratumxtime),
                                                            realvcv = T, silent = T, output = F)
      # Identify any parameter estimates at 1, which would likely have bad SE estimates.
      profile.intervals <- which(S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$estimate %in% c(0,1) &
                                 !S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$fixed == "Fixed")
      # Rerun model using profile interval estimation for the tricky parameters
      S.timexstratum.p.timexstratum.Psi.timexstratum = mark(dp, ddl,
                                                            model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime),
                                                            realvcv = T, profile.int = profile.intervals, silent = T, output = F)
      results <- S.timexstratum.p.timexstratum.Psi.timexstratum$results$real
      results_short <- results[rownames(results) %in% c("S sA g1 c1 a0 o1 t1",
                                                        "S sA g1 c1 a1 o2 t2",
                                                        "p sA g1 c1 a1 o1 t2",
                                                        "p sA g1 c1 a2 o2 t3",
                                                        "p sB g1 c1 a4 o4 t3",
                                                        "Psi sA toB g1 c1 a1 o2 t2"),]
      results_short <- round(results_short[,c("estimate", "se", "lcl", "ucl")] * 100,1)
      # Now find estimate and CIs for AtoA route at junction
      Psilist   = get.real(S.timexstratum.p.timexstratum.Psi.timexstratum,"Psi",vcv=TRUE)
      Psivalues = Psilist$estimates
      routes <- TransitionMatrix(Psivalues[Psivalues$time==2 & Psivalues$cohort==1,],vcv.real=Psilist$vcv.real)
      results_short$Measure <- c("% arrived from Release to Walnut Grove (not survival because
                                 fish may have taken Sutter/Steam)","Survival from Walnut Grove to Georgiana Slough confluence",
                                 "Detection probability at Walnut Grove", "Detection probability at Blw_Georgiana", 
                                 #"Detection probability at Georgiana Slough (fixed at 1)",
                                 "Routing probability into Georgiana Slough (Conditional on fish arriving to junction)")
      results_short <- results_short[,c("Measure", "estimate", "se", "lcl", "ucl")]
      colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
      print(kable(results_short, row.names = F, "html", caption = "3.1 Reach-specific survival and probability of entering Georgiana Slough") %>%
              kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
      route_results_possible <- TRUE
    } else {
      results_short <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
      colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
      print(kable(results_short, row.names = F, "html", caption = "3.1 Reach-specific survival and probability of entering Georgiana Slough") %>%
              kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
    }
  }
}| Measure | Estimate | SE | 95% lower C.I. | 95% upper C.I. | 
|---|---|---|---|---|
| % arrived from Release to Walnut Grove (not survival because fish may have taken Sutter/Steam) | 60.0 | 3.2 | 53.7 | 66.0 | 
| Survival from Walnut Grove to Georgiana Slough confluence | 100.0 | 0.0 | 98.7 | 100.0 | 
| Detection probability at Walnut Grove | 100.0 | 0.0 | 98.7 | 100.0 | 
| Detection probability at Blw_Georgiana | 97.2 | 1.6 | 91.7 | 99.1 | 
| Routing probability into Georgiana Slough (Conditional on fish arriving to junction) | 25.7 | 3.6 | 19.2 | 33.4 | 
# If you do not have access to local files, uncomment and run next lines of code
#download.file("https://raw.githubusercontent.com/CalFishTrack/real-time/master/data/georg.png",destfile = "georg.png", quiet = T, mode = "wb")
georg <- readPNG("georg.png")
par(mar = c(2,0,0,0))
# Set up the plot area
plot(1:2, type="n", xlab="", ylab="", xaxt = "n", yaxt = "n")
# Get the plot information so the image will fill the plot box, and draw it
lim <- par()
rasterImage(georg, lim$usr[1], lim$usr[3], lim$usr[2], lim$usr[4])
if(nrow(detects_study[is.na(detects_study$DateTime_PST) == F,]) == 0){
    legend(x = 1.55,y = 1.6,legend =  "No detections yet",col = "white", box.col = "light gray", bg = "light gray") 
    legend(x = 1.55,y = 1.45,legend =  "No detections yet",col = "white", box.col = "light gray", bg = "light gray")
} else if (route_results_possible == F){
    legend(x = 1.55,y = 1.6,legend =  "Too few detections",col = "white", box.col = "light gray", bg = "light gray") 
    legend(x = 1.55,y = 1.45,legend =  "Too few detections",col = "white", box.col = "light gray", bg = "light gray")
} else {
  legend(x = 1.55,y = 1.6,legend =  paste(round(routes$TransitionMat["A","A"],3)*100,
                                          "% (", round(routes$lcl.TransitionMat["A","A"],3)*100, "-",
                                          round(routes$ucl.TransitionMat["A","A"],3)*100,")", sep =""),
         col = "white", box.col = "light gray", bg = "light gray")
  legend(1.55,1.45, legend =  paste(round(routes$TransitionMat["A","B"],3)*100, 
                                    "% (", round(routes$lcl.TransitionMat["A","B"],3)*100, "-",
                                    round(routes$ucl.TransitionMat["A","B"],3)*100,")", sep =""),
         box.col = "light gray", bg = "light gray")
}
mtext(text = "3.3 Routing Probabilities at Georgiana Slough Junction (with 95% C.I.s)", cex = 1.3, side = 1, line = 0.2, adj = 0)if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  plot(1:2, type = "n",xaxt = "n", yaxt = "n",
       xlab = "Range of days study fish were present at Georgiana Sl Junction",
       ylab = "Routing probability into Georgiana Slough at the junction")
  text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else if(route_results_possible == F){
  plot(1:2, type = "n",xaxt = "n", yaxt = "n",
       xlab = "Range of days study fish were present at Georgiana Sl Junction",
       ylab = "Routing probability into Georgiana Slough at the junction")
  text(1.5,1.5, labels = "TOO FEW DETECTIONS", cex = 2)
} else {
  library(repmis)
  trytest <- try(source_data("https://code.usgs.gov/crrl_qfes/Enhanced_Acoustic_Telemetry_Project/raw/master/EAT_data_2024.Rdata?raw=True"))
  if (inherits(trytest, "try-error")){
    plot(1:2, type = "n",xaxt = "n", yaxt = "n",
         xlab = "Range of days study fish were present at Georgiana Sl Junction",
         ylab = "Routing probability into Georgiana Slough at the junction")
    text(1.5,1.5, labels = "ERROR DOWNLOADING STARS", cex = 2)
  } else {
    detects_5 <- detects_study %>% filter(general_location == "SacRiverWalnutGrove_2")
    detects_5 <- detects_5 %>%
      dplyr::left_join(., detects_5 %>%
                         group_by(TagCode) %>%
                         summarise(first_detect = min(DateTime_PST))) %>%
      mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
    tagcount5 <- detects_5 %>%
      group_by(Day) %>%
      summarise(unique_tags = length(unique(TagCode)))
    tagcount5$density <- tagcount5$unique_tags / sum(tagcount5$unique_tags)
    test2 <- as.data.frame(test2)
    # first, find min and max arrivals at georg for a study
    min_georg <- as.Date(format(min(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2","Georg_Sl_1",
                                                                        "Georgiana_Slough2"),"DateTime_PST"]), "%Y-%m-%d"))
    max_georg <- as.Date(format(max(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2","Georg_Sl_1",
                                                                        "Georgiana_Slough2"),"DateTime_PST"]), "%Y-%m-%d"))
    psi_study <- psi_GeoCond[psi_GeoCond$Date <= max_georg & psi_GeoCond$Date >=min_georg-1,]
    ggplot()+
      geom_bar(data = tagcount5, aes(x = Day, y = density), alpha = 0.4, stat = "identity", fill = "red") +
      geom_ribbon(data = psi_study, aes(x=Date, ymin = psi_geo.10, ymax = psi_geo.90), alpha = 0.3) +
      geom_line(data = psi_study, aes(x=Date, y=psi_geo.50), size = 1.2) +
      geom_point(aes(x = median(rep(tagcount5$Day, tagcount5$unique_tags)), y = tail(results_short$Estimate,1)/100), size = 4)+
      geom_errorbar(width = 0.2, size = 1.2, aes(ymin = tail(results_short$`95% lower C.I.`,1)/100, ymax = tail(results_short$`95% upper C.I.`,1)/100, x = median(rep(tagcount5$Day, tagcount5$unique_tags))))+
      xlim(c(min_georg, max_georg))+
      xlab("Range of days study fish were present at Georgiana Sl Junction") +
      geom_hline(yintercept=0, linetype="dashed") +
      geom_segment(aes(x=median(rep(tagcount5$Day, tagcount5$unique_tags)),y =-0.01,xend=median(rep(tagcount5$Day, tagcount5$unique_tags)),yend =0.01)) +
      geom_text(aes(x=median(rep(tagcount5$Day, tagcount5$unique_tags)),y =-0.02, label = "median"), color = "red")+
      scale_y_continuous(name = "Routing probability into Georgiana Slough at the junction",limits = c(-0.02,1), sec.axis = dup_axis(name="Histogram of arrivals to junction")) +
      theme_bw()+
      theme(axis.text.y.right = element_text(color = "red"), axis.title.y.right = element_text(color = "red"))
  }
}3.4 STARS prediction (ribbon) vs. empirical (point) estimate of Routing Probability at Georgiana Slough Junction
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
try(benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F))
detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
endtime         <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
if(nrow(detects_benicia) == 0){
  if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
    WR.surv <- data.frame("Release"="ALL", "estimate"=0, "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
  } else {
    WR.surv <- data.frame("Release"=NA, "estimate"="NO DETECTIONS YET", "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
  }
  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
  print(kable(WR.surv1, row.names = F, "html", caption = "3.6 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else if(length(table(detects_benicia$general_location)) == 1){
  if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
    WR.surv <- data.frame("Release"="ALL", "estimate"=round(length(unique(detects_benicia$TagCode))/length(unique(detects_study$TagCode))*100,1),
                          "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
  } else {
    WR.surv <- data.frame("Release" = NA, "estimate" = "NOT ENOUGH DETECTIONS", "se" = NA, "lcl" = NA, "ucl" = NA, "Detection_efficiency" = NA)
  }
  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
  print(kable(WR.surv1, row.names = F, "html", caption = "3.6 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
         kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
  # Only do survival to Benicia here
  test3 <- detects_study[which(detects_study$river_km < 53),]
  # calculate mean and SD travel time
  travel <- aggregate(list(first_detect = test3$DateTime_PST), by = list(Release = test3$Release, TagCode = test3$TagCode, RelDT = test3$RelDT), min)
  travel$days <- as.numeric(difftime(travel$first_detect, travel$RelDT, units = "days"))
  travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
  travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
  travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
  travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days), n = nrow(travel)))
  # Create inp for survival estimation
  inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ river_km, fun.aggregate = length))
  # Sort columns by river km in descending order
  # Count number of genlocs
  gen_loc_sites <- ncol(inp)-1
  inp  <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
  inp  <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
  inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
  inp2[is.na(inp2)] <- 0
  inp2[inp2 > 0]    <- 1
  inp    <- cbind(inp, inp2)
  groups <- as.character(sort(unique(inp$Release)))
  groups_w_detects <- names(table(test3$Release))
  inp[,groups] <- 0
  for(i in groups){
    inp[as.character(inp$Release) == i, i] <- 1
  }
  inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
  if(length(groups) > 1){
    # make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
    inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)
    WR.process <- process.data(inp.df, model="CJS", begin.time=1)
    WR.ddl <- make.design.data(WR.process)
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
    inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
    if(length(groups_w_detects) > 1){
      WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
      WR.ddl <- make.design.data(WR.process)
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
    } else {
      WR.process <- process.data(inp.df, model="CJS", begin.time=1)
      WR.ddl <- make.design.data(WR.process)
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    }
    WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
    WR.surv.rel <- cbind(Release = groups_w_detects, round(WR.mark.rel$results$real[seq(from=1,to=length(groups_w_detects)*2,by = 2),
                                                                                    c("estimate", "se", "lcl", "ucl")] * 100,1))
    WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
    WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
    WR.surv <- rbind(WR.surv, WR.surv.rel)
  } else {
    inp.df      <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)
    WR.process  <- process.data(inp.df, model="CJS", begin.time=1) 
    WR.ddl      <- make.design.data(WR.process)
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    WR.surv     <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
  }
  WR.surv$Detection_efficiency <- NA
  WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
  WR.surv1 <- WR.surv
  colnames(WR.surv1)[1] <- "Release"
  WR.surv1 <- merge(WR.surv1, travel_final, by = "Release", all.x = T)
  WR.surv1$mean_travel_time <- round(WR.surv1$mean_travel_time,1)
  WR.surv1$sd_travel_time <- round(WR.surv1$sd_travel_time,1)
  colnames(WR.surv1) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", 
                          "95% upper C.I.", "Detection efficiency (%)", "Mean time to Benicia (days)", "SD of time to Benicia (days)", "Count")
  #colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
  print(kable(WR.surv1, row.names = F, "html", caption = "3.2 Minimum survival to Benicia Bridge East Span (using CJS survival model), and travel time") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}| Release | Survival (%) | SE | 95% lower C.I. | 95% upper C.I. | Detection efficiency (%) | Mean time to Benicia (days) | SD of time to Benicia (days) | Count | 
|---|---|---|---|---|---|---|---|---|
| ALL | 66.8 | 3.1 | 60.6 | 72.5 | 95.4 | 3.9 | 3.8 | 160 | 
| Sacramento_Miller_Park | 66.8 | 3.1 | 60.6 | 72.5 | NA | 3.9 | 3.8 | 160 | 
if(exists("benicia")==T & is.numeric(WR.surv1[1,2])){
  # Find mean release time per release group, and ALL
  reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
  reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))
  # Assign whether the results are tentative or final
  quality <- "tentative"
  if(endtime < as.Date(format(Sys.time(), "%Y-%m-%d"))){
    quality <- "final"
  }
  WR.surv       <- merge(WR.surv, reltimes, by = "Release", all.x = T)
  WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = "1970-01-01")
  benicia$RelDT <- as.POSIXct(benicia$RelDT)
  # remove old benicia record for this studyID
  benicia <- benicia[!benicia$StudyID == unique(detects_study$Study_ID),]
  benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
  write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F) 
}try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  "No detections yet"
} else {
  arrivals <- detects_study %>%
              group_by(general_location, TagCode) %>%
              summarise(DateTime_PST = min(DateTime_PST)) %>%
              arrange(TagCode)
  tag_stats <- arrivals %>%
               group_by(general_location) %>%
               summarise(First_arrival = min(DateTime_PST),
                         Mean_arrival = mean(DateTime_PST),
                         Last_arrival = max(DateTime_PST),
                         Fish_count = length(unique(TagCode))) %>%
               mutate(Percent_arrived = round(Fish_count/nrow(study_tagcodes) * 100,2)) %>%
               dplyr::left_join(., unique(detects_study[,c("general_location", "river_km")])) %>%
               arrange(desc(river_km)) %>%
               mutate(First_arrival = format(First_arrival, tz = "Etc/GMT+8"),
                      Mean_arrival = format(Mean_arrival, tz = "Etc/GMT+8"),
                      Last_arrival = format(Last_arrival, tz = "Etc/GMT+8")) %>%
               na.omit()
  print(kable(tag_stats, row.names = F,
              caption = "4.1 Detections for all releases combined",
              "html") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
  count <- 0
  for(j in sort(unique(study_tagcodes$Release))){
    if(nrow(detects_study[detects_study$Release == j,]) > 0){
      count <- count + 1
      arrivals1 <- detects_study %>%
                   filter(Release == j) %>%
                   group_by(general_location, TagCode) %>%
                   summarise(DateTime_PST = min(DateTime_PST)) %>%
                   arrange(TagCode)
      rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])
      tag_stats1 <- arrivals1 %>%
                    group_by(general_location) %>%
                    summarise(First_arrival = min(DateTime_PST),
                              Mean_arrival = mean(DateTime_PST),
                              Last_arrival = max(DateTime_PST),
                              Fish_count = length(unique(TagCode))) %>%
                    mutate(Percent_arrived = round(Fish_count/rel_count * 100,2)) %>%
                    dplyr::left_join(., unique(detects_study[,c("general_location", "river_km")])) %>%
                    arrange(desc(river_km)) %>%
                    mutate(First_arrival = format(First_arrival, tz = "Etc/GMT+8"),
                           Mean_arrival = format(Mean_arrival, tz = "Etc/GMT+8"),
                           Last_arrival = format(Last_arrival, tz = "Etc/GMT+8")) %>%
                    na.omit()
      final_stats <- kable(tag_stats1, row.names = F,
            caption = paste("4.2.", count, " Detections for ", j, " release groups", sep = ""),
            "html")
      print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
    } else {
      cat("\n\n\\pagebreak\n")
      print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
      cat("\n\n\\pagebreak\n")
    }
  }
}| general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km | 
|---|---|---|---|---|---|---|
| TowerBridge | 2025-02-14 21:01:10 | 2025-02-14 21:01:10 | 2025-02-14 21:01:10 | 1 | 0.42 | 172.000 | 
| Holland_Cut_Quimby | 2025-02-14 22:05:38 | 2025-02-14 22:05:38 | 2025-02-14 22:05:38 | 1 | 0.42 | 145.000 | 
| SacRiverWalnutGrove_2 | 2025-02-13 03:38:01 | 2025-02-14 07:55:11 | 2025-02-15 19:17:15 | 144 | 60.00 | 120.300 | 
| Georg_Sl_1 | 2025-02-13 02:41:16 | 2025-02-14 05:55:58 | 2025-02-15 08:56:24 | 37 | 15.42 | 119.600 | 
| Sac_BlwGeorgiana | 2025-02-13 04:09:57 | 2025-02-14 08:19:20 | 2025-02-15 19:34:53 | 104 | 43.33 | 119.058 | 
| Sac_BlwGeorgiana2 | 2025-02-13 04:19:07 | 2025-02-14 09:06:03 | 2025-02-15 19:43:37 | 107 | 44.58 | 118.398 | 
| Benicia_east | 2025-02-14 06:29:58 | 2025-02-17 15:29:34 | 2025-03-12 11:32:21 | 153 | 63.75 | 52.240 | 
| Benicia_west | 2025-02-14 06:34:12 | 2025-02-17 13:21:14 | 2025-03-12 07:09:40 | 152 | 63.33 | 52.040 | 
| general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km | 
|---|---|---|---|---|---|---|
| TowerBridge | 2025-02-14 21:01:10 | 2025-02-14 21:01:10 | 2025-02-14 21:01:10 | 1 | 0.42 | 172.000 | 
| Holland_Cut_Quimby | 2025-02-14 22:05:38 | 2025-02-14 22:05:38 | 2025-02-14 22:05:38 | 1 | 0.42 | 145.000 | 
| SacRiverWalnutGrove_2 | 2025-02-13 03:38:01 | 2025-02-14 07:55:11 | 2025-02-15 19:17:15 | 144 | 60.00 | 120.300 | 
| Georg_Sl_1 | 2025-02-13 02:41:16 | 2025-02-14 05:55:58 | 2025-02-15 08:56:24 | 37 | 15.42 | 119.600 | 
| Sac_BlwGeorgiana | 2025-02-13 04:09:57 | 2025-02-14 08:19:20 | 2025-02-15 19:34:53 | 104 | 43.33 | 119.058 | 
| Sac_BlwGeorgiana2 | 2025-02-13 04:19:07 | 2025-02-14 09:06:03 | 2025-02-15 19:43:37 | 107 | 44.58 | 118.398 | 
| Benicia_east | 2025-02-14 06:29:58 | 2025-02-17 15:29:34 | 2025-03-12 11:32:21 | 153 | 63.75 | 52.240 | 
| Benicia_west | 2025-02-14 06:34:12 | 2025-02-17 13:21:14 | 2025-03-12 07:09:40 | 152 | 63.33 | 52.040 | 
library(dplyr)
library(dbplyr)
library(DBI)
library(odbc)
library(data.table)
# Create connection with cloud database
con <- dbConnect(odbc(),
                Driver = "SQL Server",
                Server = "calfishtrack-server.database.windows.net",
                Database = "realtime_detections",
                UID = "realtime_user",
                PWD = "Pass@123",
                Port = 1433)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
# THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  "No detections yet"
} else {
  arrivals <- detects_study %>%
              group_by(general_location, TagCode) %>%
              summarise(DateTime_PST = min(DateTime_PST)) %>%
              mutate(day = as.Date(DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8"))
  # Use dbplyr to load realtime_locs and qryHexCodes sql table
  gen_locs <- tbl(con, "realtime_locs") %>% collect()
  # gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
  beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F) %>%
                   mutate(day = as.Date(day)) %>%
                   filter(TagCode == beacon) %>% # Now subset to only look at data for the correct beacon for that day
                   filter(day >= as.Date(min(study_tagcodes$release_time)) & 
                          day <= endtime) %>% # Now only keep beacon by day for days since fish were released
                   dplyr::left_join(., gen_locs[,c("location", "general_location","rkm")], by = "location")
  arrivals_per_day <- arrivals %>%
                      group_by(day, general_location) %>%
                      summarise(New_arrivals = length(TagCode)) %>%
                      arrange(general_location) %>% na.omit() %>%
                      mutate(day = as.Date(day)) %>%
                      dplyr::left_join(unique(beacon_by_day[,c("general_location", "day", "rkm")]),
                                       ., by = c("general_location", "day")) %>%
                      arrange(general_location, day) %>%
                      mutate(day = factor(day)) %>%
                      filter(general_location != "Bench_test") %>% # Remove bench test and other NA locations
                      filter(!(is.na(general_location))) %>%
                      arrange(desc(rkm)) %>% # Change order of data to plot decreasing river_km
                      mutate(general_location = factor(general_location, unique(general_location)))
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
                 max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
  crosstab <- xtabs(formula = arrivals_per_day$New_arrivals ~ arrivals_per_day$day + arrivals_per_day$general_location,
                    addNA =T)
  crosstab[is.na(crosstab)] <- ""
  crosstab[crosstab==0] <- NA
  crosstab <- as.data.frame.matrix(crosstab)
  kable(crosstab, align = "c", caption = "4.3 Fish arrivals per day (\"NA\" means receivers were non-operational)") %>%
    kable_styling(c("striped", "condensed"), font_size = 11, full_width = F, position = "left", fixed_thead = TRUE) %>%
    column_spec(column = 1:ncol(crosstab),width_min = "50px",border_left = T, border_right = T) %>%
    column_spec(1, bold = T, width_min = "75px")%>%
    scroll_box(height = "700px")
}| Blw_Salt_RT | MeridianBr | Stan_Valley_Oak | Stan_Caswell | SJ_Vernalis | TowerBridge | I80-50_Br | MiddleRiver | Clifton_Court_US_Radial_Gates | Holland_Cut_Quimby | CVP_Tank | CVP_Trash_Rack_1 | Clifton_Court_Intake_Canal | Old_River_Quimby | SanJoaquinMcDonald | SanJoaquinMcDonald_2 | SacRiverWalnutGrove_2 | Georg_Sl_1 | Sac_BlwGeorgiana | Sac_BlwGeorgiana2 | Benicia_east | Benicia_west | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2025-02-13 | NA | 39 | 13 | 26 | 26 | |||||||||||||||||
| 2025-02-14 | NA | 1 | 1 | 51 | 10 | 40 | 41 | 22 | 18 | |||||||||||||
| 2025-02-15 | NA | 54 | 14 | 38 | 40 | 36 | 39 | |||||||||||||||
| 2025-02-16 | NA | 41 | 39 | |||||||||||||||||||
| 2025-02-17 | NA | 27 | 29 | |||||||||||||||||||
| 2025-02-18 | NA | 5 | 6 | |||||||||||||||||||
| 2025-02-19 | NA | 4 | 6 | |||||||||||||||||||
| 2025-02-20 | NA | 2 | 1 | |||||||||||||||||||
| 2025-02-21 | NA | 2 | 2 | |||||||||||||||||||
| 2025-02-22 | NA | 4 | 4 | |||||||||||||||||||
| 2025-02-23 | NA | |||||||||||||||||||||
| 2025-02-24 | NA | 2 | 2 | |||||||||||||||||||
| 2025-02-25 | NA | 1 | ||||||||||||||||||||
| 2025-02-26 | NA | 1 | ||||||||||||||||||||
| 2025-02-27 | NA | 2 | ||||||||||||||||||||
| 2025-02-28 | NA | |||||||||||||||||||||
| 2025-03-01 | NA | 1 | 1 | |||||||||||||||||||
| 2025-03-02 | NA | |||||||||||||||||||||
| 2025-03-03 | NA | 1 | 1 | |||||||||||||||||||
| 2025-03-04 | NA | |||||||||||||||||||||
| 2025-03-05 | NA | |||||||||||||||||||||
| 2025-03-06 | NA | 1 | 1 | |||||||||||||||||||
| 2025-03-07 | NA | 1 | ||||||||||||||||||||
| 2025-03-08 | NA | 1 | ||||||||||||||||||||
| 2025-03-09 | NA | |||||||||||||||||||||
| 2025-03-10 | NA | |||||||||||||||||||||
| 2025-03-11 | NA | |||||||||||||||||||||
| 2025-03-12 | NA | 1 | 1 | |||||||||||||||||||
| 2025-03-13 | NA | |||||||||||||||||||||
| 2025-03-14 | NA | |||||||||||||||||||||
| 2025-03-15 | NA | |||||||||||||||||||||
| 2025-03-16 | NA | |||||||||||||||||||||
| 2025-03-17 | NA | |||||||||||||||||||||
| 2025-03-18 | NA | |||||||||||||||||||||
| 2025-03-19 | NA | |||||||||||||||||||||
| 2025-03-20 | NA | |||||||||||||||||||||
| 2025-03-21 | NA | |||||||||||||||||||||
| 2025-03-22 | NA | |||||||||||||||||||||
| 2025-03-23 | NA | |||||||||||||||||||||
| 2025-03-24 | NA | |||||||||||||||||||||
| 2025-03-25 | NA | |||||||||||||||||||||
| 2025-03-26 | NA | |||||||||||||||||||||
| 2025-03-27 | NA | |||||||||||||||||||||
| 2025-03-28 | NA | |||||||||||||||||||||
| 2025-03-29 | NA | |||||||||||||||||||||
| 2025-03-30 | NA | |||||||||||||||||||||
| 2025-03-31 | NA | |||||||||||||||||||||
| 2025-04-01 | NA | |||||||||||||||||||||
| 2025-04-02 | NA | |||||||||||||||||||||
| 2025-04-03 | NA | |||||||||||||||||||||
| 2025-04-04 | NA | |||||||||||||||||||||
| 2025-04-05 | NA | |||||||||||||||||||||
| 2025-04-06 | NA | |||||||||||||||||||||
| 2025-04-07 | NA | |||||||||||||||||||||
| 2025-04-08 | NA | NA | ||||||||||||||||||||
| 2025-04-09 | NA | NA | ||||||||||||||||||||
| 2025-04-10 | NA | NA | ||||||||||||||||||||
| 2025-04-11 | NA | NA | ||||||||||||||||||||
| 2025-04-12 | NA | NA | ||||||||||||||||||||
| 2025-04-13 | NA | NA | ||||||||||||||||||||
| 2025-04-14 | NA | NA | ||||||||||||||||||||
| 2025-04-15 | NA | NA | ||||||||||||||||||||
| 2025-04-16 | NA | NA | ||||||||||||||||||||
| 2025-04-17 | NA | |||||||||||||||||||||
| 2025-04-18 | NA | |||||||||||||||||||||
| 2025-04-19 | NA | |||||||||||||||||||||
| 2025-04-20 | NA | |||||||||||||||||||||
| 2025-04-21 | NA | |||||||||||||||||||||
| 2025-04-22 | ||||||||||||||||||||||
| 2025-04-23 | ||||||||||||||||||||||
| 2025-04-24 | ||||||||||||||||||||||
| 2025-04-25 | ||||||||||||||||||||||
| 2025-04-26 | 
rm(list = ls())
cleanup(ask = F)
 
For questions or comments, please contact cyril.michel@noaa.gov