Central Valley Enhanced

Acoustic Tagging Project

logo





Hatchery-origin Chinook salmon Seasonal Survival Study

2021-2022 Season (PROVISIONAL DATA)



1. Project Status


Telemetry Study Template for this study can be found here

try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

library(knitr)
library(kableExtra)
library(lubridate)
library(data.table)
library(ggplot2)
library(RMark)
library(scales)
library(viridis)
library(forcats)
library(reshape2)
library(png)
library(dataRetrieval)
library(rerddap)
library(plotly)

##################################################################################################################
#### ASSIGN STUDY ID IN THIS NEXT LINE OF CODE ####
study <- "Seasonal_Survival_2022"
##################################################################################################################

detects_study <- fread("study_detections_archive.csv", stringsAsFactors = F, colClasses = c(DateTime_PST = "character", RelDT = "character"))
detects_study <- as.data.frame(detects_study[detects_study$Study_ID == study,])
detects_study$DateTime_PST <- as.POSIXct(detects_study$DateTime_PST, format = "%Y-%m-%d %H:%M:%S", tz="Etc/GMT+8")
detects_study$release_time <- as.POSIXct(detects_study$RelDT, format = "%Y-%m-%d %H:%M:%S", tz="Etc/GMT+8")

colnames(detects_study)[which(colnames(detects_study) == "Weight")] <- "weight"
colnames(detects_study)[which(colnames(detects_study) == "Length")] <- "length"
colnames(detects_study)[which(colnames(detects_study) == "Rel_rkm")] <- "release_rkm"
colnames(detects_study)[which(colnames(detects_study) == "Rel_loc")] <- "release_location"
colnames(detects_study)[which(colnames(detects_study) == "rkm")] <- "river_km"

latest <- read.csv("latest_download.csv", stringsAsFactors = F)$x

##################################################################################################################
#### TO RUN THE FOLLOWING CODE CHUNKS FROM HERE ON DOWN USING R ERDDAP, UN-COMMENT THESE NEXT 9 LINES OF CODE ####
##################################################################################################################
# cache_delete_all()
# query=paste('&',"Study_ID",'="',study,'"',sep = '')
# datafile=URLencode(paste("https://oceanview.pfeg.noaa.gov/erddap/tabledap/","FEDcalFishTrack",".csv?",query,sep = ''))
# options(url.method = "libcurl", download.file.method = "libcurl", timeout = 180)
# detects_study <- data.frame(read.csv(datafile,row.names = NULL, stringsAsFactors = F))
# detects_study <- detects_study[-1,]
# detects_study$DateTime_PST <- as.POSIXct(detects_study$local_time, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")
# detects_study$release_time <- as.POSIXct(detects_study$release_time, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")
# detects_study$river_km <- as.numeric(detects_study$river_km)
##################################################################################################################


if (nrow(detects_study) == 0){
  cat("Study has not yet begun\n  ")
}else{
  
  if (min(detects_study$release_time) > Sys.time()){
    cat("Study has not yet begun, below data is a placeholder:\n  ")
  }
  if (min(detects_study$release_time) < Sys.time()){
    cat(paste("Study began on ", min(detects_study$release_time), ", see tagging details below:\n ", sep = ""))
  }
  
  ########################################################################
  #### ASSIGN RELEASE GROUPS HERE ####
  #######################################################################
  detects_study$Release <- "Week 1"
  detects_study[detects_study$release_time > as.POSIXct("2022-01-31 12:00:00") & detects_study$release_time < as.POSIXct("2022-02-06 12:00:00"), "Release"] <- "Week 2"
  detects_study[detects_study$release_time > as.POSIXct("2022-02-07 12:00:00") & detects_study$release_time < as.POSIXct("2022-03-15 12:00:00"), "Release"] <- "Week 3"
  detects_study[detects_study$release_time > as.POSIXct("2022-03-15 12:00:00") & detects_study$release_time < as.POSIXct("2022-03-20 12:00:00"), "Release"] <- "Week 4"
   detects_study[detects_study$release_time > as.POSIXct("2022-03-19 12:00:00") & detects_study$release_time < as.POSIXct("2022-04-09 12:00:00"), "Release"] <- "Week 5"
   detects_study[detects_study$release_time > as.POSIXct("2022-04-23 12:00:00") & detects_study$release_time < as.POSIXct("2022-04-29 00:00:00"), "Release"] <- "Week 6"
   detects_study[detects_study$release_time > as.POSIXct("2022-05-01 12:00:00") & detects_study$release_time < as.POSIXct("2022-05-13 00:00:00"), "Release"] <- "Week 7"
  
  #detects_study[detects_study$release_time > as.POSIXct("2021-01-30 14:31:00"), "Release"] <- "Release 3"
  #######################################################################
  
  study_tagcodes <- unique(detects_study[,c("TagCode", "release_time", "weight", "length", "release_rkm", "release_location", "Release")])

  
  release_stats <- aggregate(list(First_release_time = study_tagcodes$release_time),
                             by= list(Release = study_tagcodes$Release),
                             FUN = min)
  release_stats <- merge(release_stats,
                         aggregate(list(Last_release_time = study_tagcodes$release_time),
                                   by= list(Release = study_tagcodes$Release),
                                   FUN = max),
                         by = c("Release"))
  
  
  release_stats <- merge(release_stats, aggregate(list(Number_fish_released =
                                                         study_tagcodes$TagCode),
                                                  by= list(Release = study_tagcodes$Release),
                                                  FUN = function(x) {length(unique(x))}),
                         by = c("Release"))
  
  release_stats <- merge(release_stats,
                         aggregate(list(Release_location = study_tagcodes$release_location),
                                   by= list(Release = study_tagcodes$Release),
                                   FUN = function(x) {head(x,1)}),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Release_rkm = study_tagcodes$release_rkm),
                                   by= list(Release = study_tagcodes$Release),
                                   FUN = function(x) {head(x,1)}),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Mean_length = as.numeric(study_tagcodes$length)),
                                   by= list(Release = study_tagcodes$Release),
                                   FUN = mean, na.rm = T),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Mean_weight = as.numeric(study_tagcodes$weight)),
                                   by= list(Release = study_tagcodes$Release),
                                   FUN = mean, na.rm = T),
                         by = c("Release"))
  

  release_stats[,c("Mean_length", "Mean_weight")] <- round(release_stats[,c("Mean_length", "Mean_weight")],1)
  
  release_stats$First_release_time <- format(release_stats$First_release_time, tz = "Etc/GMT+8")
  
  release_stats$Last_release_time <- format(release_stats$Last_release_time, tz = "Etc/GMT+8")
  
  release_stats <- release_stats[order(release_stats$First_release_time),]
  
  kable(release_stats, format = "html", row.names = F) %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left")

}                       
Study began on 2021-12-15 10:15:00, see tagging details below:
Release First_release_time Last_release_time Number_fish_released Release_location Release_rkm Mean_length Mean_weight
Week 1 2021-12-15 10:15:00 2021-12-16 10:15:00 200 RBDD_Rel 461.579 147.0 35.7
Week 2 2022-02-02 10:35:00 2022-02-03 10:20:00 200 RBDD_Rel 461.579 89.1 7.6
Week 3 2022-02-23 10:30:00 2022-02-24 09:15:00 200 RBDD_Rel 461.579 94.1 9.2
Week 4 2022-03-17 09:07:00 2022-03-18 07:43:00 200 RBDD_Rel 461.579 99.7 10.6
Week 5 2022-04-07 08:15:00 2022-04-08 08:20:00 367 RBDD_Rel 461.579 88.4 7.9
Week 6 2022-04-27 10:10:00 2022-04-28 09:25:00 361 RBDD_Rel 461.579 84.5 7.1
Week 7 2022-05-11 09:05:00 2022-05-12 07:52:00 375 RBDD_Rel 461.579 90.2 8.4



2. Real-time Fish Detections


Study is complete, all tags are no longer active as of 2022-08-26. All times in Pacific Standard Time.

try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

## THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  "No detections yet"
} else {
  arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
    
  beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F)
  beacon_by_day$day <- as.Date(beacon_by_day$day)
  
  gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
  
  arrivals$day <- as.Date(format(arrivals$DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8"))
  
  arrivals_per_day <- aggregate(list(New_arrivals = arrivals$TagCode), by = list(day = arrivals$day, general_location = arrivals$general_location), length)
  arrivals_per_day$day <- as.Date(arrivals_per_day$day)

  ## Now subset to only look at data for the correct beacon for that day
  beacon_by_day <- as.data.frame(beacon_by_day[which(beacon_by_day$TagCode == beacon_by_day$beacon),])
  
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
  ## Now only keep beacon by day for days since fish were released
  beacon_by_day <- beacon_by_day[beacon_by_day$day >= as.Date(min(study_tagcodes$release_time)) & beacon_by_day$day <= endtime,]  
  
  beacon_by_day <- merge(beacon_by_day, gen_locs[,c("location", "general_location","rkm")], by = "location", all.x = T)

  arrivals_per_day <- merge(unique(beacon_by_day[,c("general_location", "day", "rkm")]), arrivals_per_day, all.x = T, by = c("general_location", "day"))
  
  arrivals_per_day$day <- factor(arrivals_per_day$day)
  
  ## Remove bench test and other NA locations
  arrivals_per_day <- arrivals_per_day[!arrivals_per_day$general_location == "Bench_test",]
  arrivals_per_day <- arrivals_per_day[is.na(arrivals_per_day$general_location) == F,]

  ## Remove sites that were not operation the whole time
  #### FOR THE SEASONAL SURVIVAL PAGE, KEEP ALL SITES SINCE PEOPLE WANT TO SEE DETECTIONS OF LATER FISH AT NEWLY DEPLOYED SPOTS##
  gen_locs_days_in_oper <- aggregate(list(days_in_oper = arrivals_per_day$day), by = list(general_location = arrivals_per_day$general_location), FUN = length)
  #gen_locs_days_in_oper <- gen_locs_days_in_oper[gen_locs_days_in_oper$days_in_oper == max(gen_locs_days_in_oper$days_in_oper),]
  
  arrivals_per_day_in_oper <- arrivals_per_day[arrivals_per_day$general_location %in% gen_locs_days_in_oper$general_location,]
  fish_per_site <- aggregate(list(fish_count = arrivals_per_day_in_oper$New_arrivals), by = list(general_location = arrivals_per_day_in_oper$general_location), FUN = sum, na.rm = T)
  
  active_gen_locs <- gen_locs[is.na(gen_locs$stop),]
  active_gen_locs <- active_gen_locs[active_gen_locs$general_location %in% fish_per_site$general_location,]
  ## estimate mean lat and lons for each genloc
  gen_locs_mean_coords <- aggregate(list(latitude = active_gen_locs$latitude), by = list(general_location = active_gen_locs$general_location), FUN = mean)
  gen_locs_mean_coords <- merge(gen_locs_mean_coords, aggregate(list(longitude = active_gen_locs$longitude), by = list(general_location = active_gen_locs$general_location), FUN = mean))
  
  fish_per_site <- merge(fish_per_site, gen_locs_mean_coords)
  
  library(leaflet)
  library(maps)
  library(htmlwidgets)
  library(leaflet.extras)

  icons <- awesomeIcons(iconColor = "lightblue",
                      #library = "ion",
                      text = fish_per_site$fish_count)
  
  leaflet(data = fish_per_site) %>%
      # setView(-72.14600, 43.82977, zoom = 8) %>% 
      addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
      addProviderTiles("Esri.WorldImagery", group = "Satellite") %>% 
      addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
      # Marker data are from the sites data frame. We need the ~ symbols
      # to indicate the columns of the data frame.
      addMarkers(~longitude, ~latitude, label = ~fish_count, group = "Receiver Sites", popup = ~general_location, labelOptions = labelOptions(noHide = T, textsize = "15px")) %>% 
      #addAwesomeMarkers(~longitude, ~latitude, icon = icons, labelOptions(textsize = "15px")) %>%
      addScaleBar(position = "bottomleft") %>%
      addLayersControl(
          baseGroups = c("Street Map", "Satellite", "Relief"),
          options = layersControlOptions(collapsed = FALSE))
}

2.1 Map of unique fish detections at operational realtime detection locations


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

detects_saltcrk <- detects_study[detects_study$general_location == "Blw_Salt_RT",]

if (nrow(detects_saltcrk) == 0){
  plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
  text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
  
  detects_saltcrk <- merge(detects_saltcrk,aggregate(list(first_detect = detects_saltcrk$DateTime_PST), by = list(TagCode= detects_saltcrk$TagCode), FUN = min))
  
  detects_saltcrk$Day <- as.Date(detects_saltcrk$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_saltcrk$release_time), "Etc/GMT+8")
  ## Endtime should be either now, or end of predicted tag life, whichever comes first
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))

  
  #BTC_flow <- cdec_query("BTC", "20", "H", starttime, endtime+1)
  ## download bend bridge flow data

  BTC_flow <- readNWISuv(siteNumbers = "11377100", parameterCd="00060", startDate = starttime, endDate = endtime+1)
  
  BTC_flow$datetime <- as.Date(format(BTC_flow$dateTime, "%Y-%m-%d"))
  BTC_flow_day <- aggregate(list(parameter_value = BTC_flow$X_00060_00000),
                            by = list(Day = BTC_flow$datetime),
                            FUN = mean, na.rm = T)
  
  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
  
  #rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_butte$StudyID), "Release"])
  rels <- unique(study_tagcodes$Release)
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_saltcrk$Release))])
  
  tagcount <- aggregate(list(unique_tags = detects_saltcrk$TagCode), by = list(Day = detects_saltcrk$Day, Release = detects_saltcrk$Release), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
  
  daterange1 <- merge(daterange, tagcount1, all.x=T)
  daterange1[is.na(daterange1)] <- 0
  
  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }
  
  ## reorder columns in alphabetical order so its coloring in barplots is consistent
  daterange1 <- daterange1[,order(colnames(daterange1))]
    
  daterange2 <- merge(daterange1, BTC_flow_day, by = "Day", all.x = T)
  
  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL
  
  par(mar=c(6, 5, 2, 5) + 0.1)
  # barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
  # barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=brewer.pal(n = rel_num, name = "Set1"),
  #         xlab = "", ylab = "Number of fish arrivals per day",
  #         ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
  #         las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt ="n", border = NA)#,
  # #border=NA
  # #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
  # #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  # legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
  # ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
  # xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  # barpmeans <- colMeans(barp)
  # axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
  # axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
  # 
  # par(new=T)
  # 
  # plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "lightslateblue", type = "l", lwd=1.5, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
  # axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
  # mtext("Flow (cfs) at Bend Bridge", side=4, line=3, cex=1.5, col="lightslateblue")
  daterange2$Date <- as.Date(row.names(daterange2))
  daterange2_flow <- daterange2[,c("Date", "parameter_value")]
  daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))], id.vars = "Date", variable.name = ".")
  
  ay <- list(
    overlaying = "y",
    nticks = 5,
    color = "#947FFF",
    side = "right",
    title = "Flow (cfs) at Bend Bridge",
    automargin = TRUE
  )
  
  # p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
  #   geom_bar(stat='identity') +
  #   ylab("Number of fish arrivals per day") +
  #   #xlim(c(as.Date("2021-02-01"), as.Date("2021-02-05"))) +
  #   #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
  #   #scale_x_date(date_breaks = "5 days") +
  #   #scale_y_continuous(name = "Number of fish arrivals per day",
  #     # Add a second axis and specify its features
  #   #  sec.axis = sec_axis(~.*500, name="Second Axis")) +
  #   theme_bw() +
  #   theme(panel.border = element_rect(colour = "black", fill=NA))

  
  daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
  plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
    add_bars(x = ~Date, y = ~value, color = ~.) %>%
    add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
                     x=0.01, xanchor="left",
                     y=1.056, yanchor="top",    # Same y as legend below
                     legendtitle=TRUE, showarrow=FALSE ) %>%
    add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
    layout(yaxis2 = ay,showlegend = T, 
           barmode = "stack",
           xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
          legend = list(orientation = "h",x = 0.34, y = 1.066),
          margin=list(l = 50,r = 100,b = 50,t = 50)
           )

}

2.2 Detections at Salt Creek versus Sacramento River flows at Bend Bridge for duration of tag life


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

detects_butte <- detects_study[detects_study$general_location == "ButteBrRT",]

if (nrow(detects_butte) == 0){
  plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
  text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
  
  detects_butte <- merge(detects_butte,aggregate(list(first_detect = detects_butte$DateTime_PST), by = list(TagCode= detects_butte$TagCode), FUN = min))
  
  detects_butte$Day <- as.Date(detects_butte$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_butte$release_time), "Etc/GMT+8")
  ## Endtime should be either now, or end of predicted tag life, whichever comes first
  #endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_butte$release_time)+60), max(as.Date(detects_butte$release_time)+(as.numeric(detects_butte$tag_life)*1.5)))
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))

  
  #BTC_flow <- cdec_query("BTC", "20", "H", starttime, endtime+1)
  ## download bend bridge flow data

  BTC_flow <- readNWISuv(siteNumbers = "11377100", parameterCd="00060", startDate = starttime, endDate = endtime+1)
  
  BTC_flow$datetime <- as.Date(format(BTC_flow$dateTime, "%Y-%m-%d"))
  BTC_flow_day <- aggregate(list(parameter_value = BTC_flow$X_00060_00000),
                            by = list(Day = BTC_flow$datetime),
                            FUN = mean, na.rm = T)
  
  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
  
  #rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_butte$StudyID), "Release"])
  rels <- unique(study_tagcodes$Release)
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_butte$Release))])
  
  tagcount <- aggregate(list(unique_tags = detects_butte$TagCode), by = list(Day = detects_butte$Day, Release = detects_butte$Release), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
  
  daterange1 <- merge(daterange, tagcount1, all.x=T)
  daterange1[is.na(daterange1)] <- 0
  
  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }
  
  ## reorder columns in alphabetical order so its coloring in barplots is consistent
  daterange1 <- daterange1[,order(colnames(daterange1))]
    
  daterange2 <- merge(daterange1, BTC_flow_day, by = "Day", all.x = T)
  
  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL
  
  par(mar=c(6, 5, 2, 5) + 0.1)
  # barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
  # barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=brewer.pal(n = rel_num, name = "Set1"),
  #         xlab = "", ylab = "Number of fish arrivals per day",
  #         ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
  #         las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt ="n", border = NA)#,
  # #border=NA
  # #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
  # #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  # legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
  # ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
  # xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  # barpmeans <- colMeans(barp)
  # axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
  # axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
  # 
  # par(new=T)
  # 
  # plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "lightslateblue", type = "l", lwd=1.5, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
  # axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
  # mtext("Flow (cfs) at Bend Bridge", side=4, line=3, cex=1.5, col="lightslateblue")
  daterange2$Date <- as.Date(row.names(daterange2))
  daterange2_flow <- daterange2[,c("Date", "parameter_value")]
  daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))], id.vars = "Date", variable.name = ".")
  
  ay <- list(
    overlaying = "y",
    nticks = 5,
    color = "#947FFF",
    side = "right",
    title = "Flow (cfs) at Bend Bridge",
    automargin = TRUE
  )
  
  # p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
  #   geom_bar(stat='identity') +
  #   ylab("Number of fish arrivals per day") +
  #   #xlim(c(as.Date("2021-02-01"), as.Date("2021-02-05"))) +
  #   #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
  #   #scale_x_date(date_breaks = "5 days") +
  #   #scale_y_continuous(name = "Number of fish arrivals per day",
  #     # Add a second axis and specify its features
  #   #  sec.axis = sec_axis(~.*500, name="Second Axis")) +
  #   theme_bw() +
  #   theme(panel.border = element_rect(colour = "black", fill=NA))

  
  daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
  plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
    add_bars(x = ~Date, y = ~value, color = ~.) %>%
    add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
                     x=0.01, xanchor="left",
                     y=1.056, yanchor="top",    # Same y as legend below
                     legendtitle=TRUE, showarrow=FALSE ) %>%
    add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
    layout(yaxis2 = ay,showlegend = T, 
           barmode = "stack",
           xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
          legend = list(orientation = "h",x = 0.34, y = 1.066),
          margin=list(l = 50,r = 100,b = 50,t = 50)
           )

}

2.3 Detections at Butte City Bridge versus Sacramento River flows at Bend Bridge for duration of tag life


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

detects_tower <- detects_study[detects_study$general_location == "TowerBridge",]

#wlk_flow <- read.csv("wlk.csv")

if (nrow(detects_tower) == 0){
  plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
  text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
  
  detects_tower <- merge(detects_tower,aggregate(list(first_detect = detects_tower$DateTime_PST), by = list(TagCode= detects_tower$TagCode), FUN = min))
  
  detects_tower$Day <- as.Date(detects_tower$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_tower$release_time), "Etc/GMT+8")
  ## Endtime should be either now, or end of predicted tag life, whichever comes first
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))

  ## download wilkins slough flow data

  wlk_flow <- readNWISuv(siteNumbers = "11390500", parameterCd="00060", startDate = starttime, endDate = endtime+1)
  
  wlk_flow$datetime <- as.Date(format(wlk_flow$dateTime, "%Y-%m-%d"))
  wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$X_00060_00000),
                            by = list(Day = wlk_flow$datetime),
                            FUN = mean, na.rm = T)


  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))

  #rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_tower$StudyID), "Release"])
  rels <- unique(study_tagcodes$Release)
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_tower$Release))])

  tagcount <- aggregate(list(unique_tags = detects_tower$TagCode), by = list(Day = detects_tower$Day, Release = detects_tower$Release ), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)

  daterange1 <- merge(daterange, tagcount1, all.x=T)
  daterange1[is.na(daterange1)] <- 0

  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }
  
  ## reorder columns in alphabetical order so its coloring in barplots is consistent
  daterange1 <- daterange1[,order(colnames(daterange1))]
  
  daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)

  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL

  par(mar=c(6, 5, 2, 5) + 0.1)
  # barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
  # barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=brewer.pal(n = rel_num, name = "Set1"),
  #         xlab = "", ylab = "Number of fish arrivals per day",
  #         ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
  #         las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt ="n", border = NA)#,
  # #border=NA
  # #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
  # #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  # legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
  # ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
  # xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  # barpmeans <- colMeans(barp)
  # axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
  # axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
  # 
  # par(new=T)
  # 
  # plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "lightslateblue", type = "l", lwd=1.5, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
  # axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
  # mtext("Flow (cfs) at Wilkins Slough", side=4, line=3, cex=1.5, col="lightslateblue")
  daterange2$Date <- as.Date(row.names(daterange2))
  daterange2_flow <- daterange2[,c("Date", "parameter_value")]
  daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))], id.vars = "Date", variable.name = ".")
  
  ay <- list(
    overlaying = "y",
    nticks = 5,
    color = "#947FFF",
    side = "right",
    title = "Flow (cfs) at Wilkins Slough",
    automargin = TRUE
  )
  
  # p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
  #   geom_bar(stat='identity') +
  #   ylab("Number of fish arrivals per day") +
  #   #xlim(c(as.Date("2021-02-01"), as.Date("2021-02-05"))) +
  #   #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
  #   #scale_x_date(date_breaks = "5 days") +
  #   #scale_y_continuous(name = "Number of fish arrivals per day",
  #     # Add a second axis and specify its features
  #   #  sec.axis = sec_axis(~.*500, name="Second Axis")) +
  #   theme_bw() +
  #   theme(panel.border = element_rect(colour = "black", fill=NA))

  
  daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
  plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
    add_bars(x = ~Date, y = ~value, color = ~.) %>%
    add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
                     x=0.01, xanchor="left",
                     y=1.056, yanchor="top",    # Same y as legend below
                     legendtitle=TRUE, showarrow=FALSE ) %>%
    add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
    layout(yaxis2 = ay,showlegend = T, 
           barmode = "stack",
           xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
          legend = list(orientation = "h",x = 0.34, y = 1.066),
          margin=list(l = 50,r = 100,b = 50,t = 50)
           )

}

2.4 Detections at Tower Bridge (downtown Sacramento) versus Sacramento River flows at Wilkins Slough for duration of tag life


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]

if (nrow(detects_benicia)>0) {
  detects_benicia <- merge(detects_benicia,aggregate(list(first_detect = detects_benicia$DateTime_PST), by = list(TagCode= detects_benicia$TagCode), FUN = min))
  
  detects_benicia$Day <- as.Date(detects_benicia$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_benicia$release_time), "Etc/GMT+8")
  ## Endtime should be either now or end of predicted tag life, whichever comes first
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
  #wlk_flow <- cdec_query("COL", "20", "H", starttime, endtime+1)
  #wlk_flow$datetime <- as.Date(wlk_flow$datetime)
  #wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$parameter_value), by = list(Day = wlk_flow$datetime), FUN = mean, na.rm = T)
  
  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
  
  rels <- unique(study_tagcodes$Release)
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_benicia$Release))])
  
  tagcount <- aggregate(list(unique_tags = detects_benicia$TagCode), by = list(Day = detects_benicia$Day, Release = detects_benicia$Release ), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
                    
  daterange1 <- merge(daterange, tagcount1, all.x=T)
  daterange1[is.na(daterange1)] <- 0
  
  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }
  
  ## reorder columns in alphabetical order so its coloring in barplots is consistent
  daterange1 <- daterange1[,order(colnames(daterange1))]
    
  #daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
  daterange2 <- daterange1
  
  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL
  
  par(mar=c(6, 5, 2, 5) + 0.1)
  # barp <- barplot(t(daterange2[,1:ncol(daterange2)]), plot = FALSE, beside = T)
  # barplot(t(daterange2[,1:ncol(daterange2)]), beside = T, col=brewer.pal(n = rel_num, name = "Dark2"), 
  #         xlab = "", ylab = "Number of fish arrivals per day", 
  #         ylim = c(0,max(daterange2[,1:ncol(daterange2)], na.rm = T)*1.2), 
  #         las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n", border = NA)#, 
  #         #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
  #         #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  # legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
  # ybreaks <- if(max(daterange2[,1:ncol(daterange2)], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)], na.rm = T)} else {5}
  # xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  # barpmeans <- colMeans(barp)
  # axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2)[xbreaks], las = 2)
  # axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)], na.rm = T), ybreaks))
  # box()
  daterange2$Date <- as.Date(row.names(daterange2))
  daterange3 <- melt(daterange2, id.vars = "Date", variable.name = ".", )
  
  # p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
  #   geom_bar(stat='identity') +
  #   ylab("Number of fish arrivals per day") +
  #   #xlim(range(daterange$Day)) +
  #   #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
  #   #scale_x_date(date_breaks = "5 days") +
  #   #scale_y_continuous(name = "Number of fish arrivals per day",
  #     # Add a second axis and specify its features
  #   #  sec.axis = sec_axis(~.*500, name="Second Axis")) +
  #   theme_bw() +
  #   theme(panel.border = element_rect(colour = "black", fill=NA))

  
  daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
  plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
    add_bars(x = ~Date, y = ~value, color = ~.) %>%
    add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
                     x=0.01, xanchor="left",
                     y=1.056, yanchor="top",    # Same y as legend below
                     legendtitle=TRUE, showarrow=FALSE ) %>%
    #add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
    layout(showlegend = T, 
           barmode = "stack",
           xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
          legend = list(orientation = "h",x = 0.34, y = 1.066),
          margin=list(l = 50,r = 100,b = 50,t = 50)
           )



}else{
  plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
  text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}

2.5 Detections at Benicia Bridge for duration of tag life



3. Survival and Routing Probability


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

detects_tower <- detects_study %>% filter(general_location == "TowerBridge")

if(nrow(detects_tower) == 0){
  WR.surv <- data.frame("Release"=NA, "Survival (%)"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA,
                        "95% upper C.I."=NA, "Detection efficiency (%)"=NA)
  colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
                         "95% upper C.I.", "Detection efficiency (%)")
  print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
              survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken
              that route, and therefore this is a minimum estimate of survival") %>%
    kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
                  full_width = F, position = "left"))

} else {

  study_count <- nrow(study_tagcodes)

  # Only do survival to Sac for now
  surv <- detects_study %>% filter(river_km > 168 & river_km < 175)

  # calculate mean and SD travel time
  travel <- aggregate(list(first_detect = surv$DateTime_PST), by = list(Release = surv$Release, TagCode = surv$TagCode, RelDT = surv$RelDT), min)
  travel$days <- as.numeric(difftime(travel$first_detect, travel$RelDT, units = "days"))

  travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
  travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
  travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
  travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days),n = nrow(travel)))

  # Create inp for survival estimation
  inp <- as.data.frame(reshape2::dcast(surv, TagCode ~ river_km, fun.aggregate = length))

  # Sort columns by river km in descending order
  gen_loc_sites <- ncol(inp)-1 # Count number of genlocs
  if(gen_loc_sites < 2){
    WR.surv <- data.frame("Release"=NA, "Survival (%)"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA,
                          "95% upper C.I."=NA, "Detection efficiency (%)"=NA)
    colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.",
                           "Detection efficiency (%)")
    print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
                survival model). If Yolo Bypass Weirs are overtopping during migration, fish may
                have taken that route, and therefore this is a minimum estimate of survival") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), 
                        full_width = F,position = "left"))
  } else {
    inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)] %>%
            dplyr::left_join(study_tagcodes, ., by = "TagCode")

    inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)] %>%
            replace(is.na(.), 0) %>%
            replace(., . > 0, 1)

    inp          <- cbind(inp, inp2)
    groups       <- as.character(sort(unique(inp$Release)))
    surv$Release <- factor(surv$Release, levels = groups)
    inp[,groups] <- 0

    for (i in groups) {
      inp[as.character(inp$Release) == i, i] <- 1
    }

    inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")

    if(length(groups) > 1){
      # make sure factor levels have a release that has detections first. if first release in factor order
      # has zero detectins, model goes haywire
      inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1,
      rel = factor(inp$Release, levels = names(sort(table(surv$Release),decreasing = T))),
                   stringsAsFactors = F)

      WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")

      WR.ddl <- make.design.data(WR.process)

      WR.mark.all <- mark(WR.process, WR.ddl,
                          model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
                          silent = T, output = F)

      WR.mark.rel <- mark(WR.process, WR.ddl,
                          model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)),
                          silent = T, output = F)

      WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
      WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),
                       c("estimate", "se", "lcl", "ucl")] * 100,1))
      WR.surv$Detection_efficiency <- NA
      WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
      WR.surv <- cbind(c("ALL", names(sort(table(surv$Release),decreasing = T))), WR.surv)
    }
    if(length(intersect(colnames(inp),groups)) < 2){
      inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ", 1,sep = "")
      write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
      WRinp <- convert.inp("WRinp.inp")
      WR.process <- process.data(WRinp, model="CJS", begin.time=1)

      WR.ddl <- make.design.data(WR.process)

      WR.mark.all <- mark(WR.process, WR.ddl,
                          model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
                          silent = T, output = F)

      WR.mark.rel <- mark(WR.process, WR.ddl,
                          model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
                          silent = T, output = F)

      WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
      WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),
                                                               c("estimate", "se", "lcl", "ucl")] * 100,1))
      WR.surv$Detection_efficiency <- NA
      WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
      WR.surv <- cbind(c("ALL", groups), WR.surv)
    }

    colnames(WR.surv)[1] <- "Release"
    WR.surv <- merge(WR.surv, travel_final, by = "Release", all.x = T)
    WR.surv$mean_travel_time <- round(WR.surv$mean_travel_time,1)
    WR.surv$sd_travel_time <- round(WR.surv$sd_travel_time,1)
    colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", 
                           "95% upper C.I.", "Detection efficiency (%)", "Mean time to Tower (days)", "SD of time to Tower (days)","Count")


  WR.surv <- WR.surv %>% arrange(., Release)
  print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
        survival model), and travel time. If Yolo Bypass Weirs are overtopping during migration, fish may have taken 
        that route, and therefore this is a minimum estimate of survival") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), 
                        full_width = F, position = "left"))
  }
}
3.1 Minimum survival to Tower Bridge (using CJS survival model), and travel time. If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival
Release Survival (%) SE 95% lower C.I. 95% upper C.I. Detection efficiency (%) Mean time to Tower (days) SD of time to Tower (days) Count
ALL 10.9 0.7 9.6 12.4 100 8.6 11.5 207
Week 1 75.5 3.0 69.1 81.0 NA 4.3 2.0 151
Week 2 6.0 1.7 3.4 10.3 NA 47.7 11.3 12
Week 3 3.5 1.3 1.7 7.2 NA 29.6 9.6 7
Week 4 4.0 1.4 2.0 7.8 NA 11.5 7.4 8
Week 5 6.0 1.2 4.0 8.9 NA 8.4 1.1 22
Week 6 1.7 0.7 0.7 3.6 NA 10.0 1.8 6
Week 7 0.3 0.3 0.0 1.9 NA 5.3 NA 1
## Once Georgiana Sl receivers are back online, remove "eval = F" from header

try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

route_results_possible <- F
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  results_short <- data.frame("Measure"=NA, "Estimate"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
  colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
  print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
  
  ## Only do survival to Georg split for now
  test2 <- detects_study[detects_study$general_location %in% c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
  
  ## We can only do multistate model if there is at least one detection in each route
  
  if(nrow(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2"),]) == 0 |
     nrow(test2[test2$general_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"),]) == 0){
    results_short <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
    colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
    print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
            kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
  }else{
    
    ## Make tagcode character
    study_tagcodes$TagCode <- as.character(study_tagcodes$TagCode)
    ## Make a crosstab query with frequencies for all tag/location combination
    test2$general_location <- factor(test2$general_location, levels = c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"))
    test2$TagCode <- factor(test2$TagCode, levels = study_tagcodes$TagCode)
    mytable <- table(test2$TagCode, test2$general_location) # A will be rows, B will be columns
    
    ## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
    mytable[mytable>0] <- "A"
    
    ## Order in order of rkm
    mytable2 <- mytable[, c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2")]
    
    ## Now sort the crosstab rows alphabetically
    mytable2 <- mytable2[order(row.names(mytable2)),]
    
    mytable2[which(mytable2[, "Sac_BlwGeorgiana"]=="A"), "Sac_BlwGeorgiana"] <- "A"
    mytable2[which(mytable2[, "Sac_BlwGeorgiana2"]=="A"), "Sac_BlwGeorgiana2"] <- "A"
    mytable2[which(mytable2[, "Georgiana_Slough1"]=="A"), "Georgiana_Slough1"] <- "B"
    mytable2[which(mytable2[, "Georgiana_Slough2"]=="A"), "Georgiana_Slough2"] <- "B"
    
    ## Make a crosstab query with frequencies for all weekly Release groups
    #test2$Release <- factor(test2$Release)
    #mytable3 <- table(test2$TagCode, test2$Release) # A will be rows, B will be columns
    
    ## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
    #mytable3[mytable3>0] <- 1
    
    ## Order in order of rkm
    #mytable4 <- mytable3[, order(colnames(mytable3))]
    
    ## Now sort the crosstab rows alphabetically
    #mytable4 <- mytable4[order(row.names(mytable4)),]
    
    ## Now order the study_tagcodes table the same way
    study_tagcodes <- study_tagcodes[order(study_tagcodes$TagCode),]
    
    ## Paste together (concatenate) the data from each column of the crosstab into one string per row, add to tagging_meta.
    ## For this step, make sure both are sorted by FishID
    study_tagcodes$inp_part1 <- apply(mytable2[,1:3],1,paste,collapse="")
    study_tagcodes$inp_partA <- apply(mytable2[,4:5],1,paste,collapse="")
    study_tagcodes$inp_partB <- apply(mytable2[,6:7],1,paste,collapse="")
    #study_tagcodes$inp_group <- apply(mytable4,1,paste,collapse=" ")
    
    ## We need to have a way of picking which route to assign to a fish if it was detected by both georg and blw-georg recvs
    ## We will say that the last detection at that junction is what determines the route it took
    
    ## find last detection at each genloc
    departure <- aggregate(list(depart = test2$DateTime_PST), by = list(TagCode = test2$TagCode, last_location = test2$general_location), FUN = max)
    ## subset for just juncture locations
    departure <- departure[departure$last_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
    ## Find genloc of last known detection per tag
    last_depart <- aggregate(list(depart = departure$depart), by = list(TagCode = departure$TagCode), FUN = max)
    
    last_depart1 <- merge(last_depart, departure)
    study_tagcodes <- merge(study_tagcodes, last_depart1[,c("TagCode", "last_location")], by = "TagCode", all.x = T)
    
    ## Assume that the Sac is default pathway, and for fish that were detected in neither route, it would get a "00" in inp so doesn't matter anyway
    study_tagcodes$inp_final <- paste("A",study_tagcodes$inp_part1, study_tagcodes$inp_partA," 1 ;", sep = "")
    
    ## now put in exceptions...fish that were seen in georgiana last
    study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_final"] <- paste("A",study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_part1"], study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_partB"]," 1 ;", sep = "")
    
    ## At this point, some fish might not have been deemed to ever take a route based on last visit analysis. If so, model can't be run
    
    if(any(grepl(pattern = "A", study_tagcodes$inp_final)==T) & any(grepl(pattern = "B", study_tagcodes$inp_final)==T)){
      
      write.table(study_tagcodes$inp_final,"WRinp_multistate.inp",row.names = F, col.names = F, quote = F)
      
      WRinp <- convert.inp("WRinp_multistate.inp")
      
      dp <- process.data(WRinp, model="Multistrata") 
      
      ddl <- make.design.data(dp)
      
      #### p ####
      # Can't be seen at 2B or 3B or 4B (butte, tower or I80)
      ddl$p$fix=NA
      ddl$p$fix[ddl$p$stratum == "B" & ddl$p$time %in% c(2,3,4)]=0
      
      #### Psi ####
      # Only 1 transition allowed:
      # from A to B at time interval 4 to 5
      
      ddl$Psi$fix=0
      # A to B can only happen for interval 3-4
      ddl$Psi$fix[ddl$Psi$stratum=="A"&
                    ddl$Psi$tostratum=="B" & ddl$Psi$time==4]=NA
      
      #### Phi a.k.a. S ####
      ddl$S$fix=NA
      # None in B for reaches 1,2,3,4 and fixing it to 1 for 5 (between two georg lines). All getting fixed to 1
      ddl$S$fix[ddl$S$stratum=="B" & ddl$S$time %in% c(1,2,3,4,5)]=1
      
      # For route A, fixing it to 1 for 5 (between two blw_georg lines)
      ddl$S$fix[ddl$S$stratum=="A" & ddl$S$time==5]=1
      ## We use -1 at beginning of formula to remove intercept. This is because different routes probably shouldn't share the same intercept
      
      p.timexstratum=list(formula=~-1+stratum:time)
      Psi.stratumxtime=list(formula=~-1+stratum:time)
      S.stratumxtime=list(formula=~-1+stratum:time)
      
      ## Run model a first time
      S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, silent = T, output = F)
      
      ## Identify any parameter estimates at 1, which would likely have bad SE estimates.
      profile.intervals <- which(S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$estimate %in% c(0,1) & !S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$fixed == "Fixed")
      
      ## Rerun model using profile interval estimation for the tricky parameters
      S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, profile.int = profile.intervals, silent = T, output = F)
      
      results <- S.timexstratum.p.timexstratum.Psi.timexstratum$results$real
      
      results_short <- results[rownames(results) %in% c("S sA g1 c1 a0 o1 t1",
                                                        "S sA g1 c1 a1 o2 t2",
                                                        "S sA g1 c1 a2 o3 t3",
                                                        "S sA g1 c1 a3 o4 t4",
                                                        "p sA g1 c1 a1 o1 t2",
                                                        "p sA g1 c1 a2 o2 t3",
                                                        "p sA g1 c1 a3 o3 t4",
                                                        "p sA g1 c1 a4 o4 t5",
                                                        "p sB g1 c1 a4 o4 t5",
                                                        "Psi sA toB g1 c1 a3 o4 t4"
      ),]
      
      
      results_short <- round(results_short[,c("estimate", "se", "lcl", "ucl")] * 100,1)
      
      ## Now find estimate and CIs for AtoA route at junction
      Psilist=get.real(S.timexstratum.p.timexstratum.Psi.timexstratum,"Psi",vcv=TRUE)  
      Psivalues=Psilist$estimates
      
      routes <- TransitionMatrix(Psivalues[Psivalues$time==4 & Psivalues$cohort==1,],vcv.real=Psilist$vcv.real)
      
      
      
      
      
      results_short$Measure <- c("Survival from release to Butte City","Survival from Butte City to TowerBridge (minimum estimate since fish may have taken Yolo Bypass)", "Survival from TowerBridge to I80-50_Br", "% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam)","Detection probability at Butte City",
                                 "Detection probability at TowerBridge", "Detection probability at I80-50_Br", "Detection probability at Blw_Georgiana", "Detection probability at Georgiana Slough",
                                 "Routing probability into Georgiana Slough (Conditional on fish arriving to junction)")
      
      results_short <- results_short[,c("Measure", "estimate", "se", "lcl", "ucl")]
      colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
      
      print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
              kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
      
      route_results_possible <- T
      
    } else {
      results_short <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
      colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
      print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
              kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
    }
  }
}
## Once Georgiana Sl receivers are back online, remove "eval = F" from header
##____________________________________________________________________________
## If you don't have access to local files, uncomment and run next lines of code

#download.file("https://raw.githubusercontent.com/CalFishTrack/real-time/master/data/georg.png",destfile = "georg.png", quiet = T, mode = "wb")

##________________________________________________________________________________


georg <- readPNG("georg.png")
par(mar=c(2,0,0,0))
#Set up the plot area
plot(1:2, type='n', xlab="", ylab="", xaxt = "n", yaxt = "n")

#Get the plot information so the image will fill the plot box, and draw it
lim <- par()
rasterImage(georg, lim$usr[1], lim$usr[3], lim$usr[2], lim$usr[4])
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
    legend(x = 1.55,y = 1.6,legend =  "No detections yet",col = "white", box.col = "light gray", bg = "light gray") 
    legend(x = 1.55,y = 1.45,legend =  "No detections yet",col = "white", box.col = "light gray", bg = "light gray")
}else if (route_results_possible == F){
    legend(x = 1.55,y = 1.6,legend =  "Too few detections",col = "white", box.col = "light gray", bg = "light gray") 
    legend(x = 1.55,y = 1.45,legend =  "Too few detections",col = "white", box.col = "light gray", bg = "light gray") 
}else{  
  legend(x = 1.55,y = 1.6,legend =  paste(round(routes$TransitionMat["A","A"],3)*100,"% (",round(routes$lcl.TransitionMat["A","A"],3)*100,"-",round(routes$ucl.TransitionMat["A","A"],3)*100,")", sep =""),col = "white", box.col = "light gray", bg = "light gray")
  legend(1.55,1.45, legend =  paste(round(routes$TransitionMat["A","B"],3)*100,"% (",round(routes$lcl.TransitionMat["A","B"],3)*100,"-",round(routes$ucl.TransitionMat["A","B"],3)*100,")", sep =""), box.col = "light gray", bg = "light gray")
}  
  mtext(text = "3.3 Routing Probabilities at Georgiana Slough Junction (with 95% C.I.s)", cex = 1.3, side = 1, line = 0.2, adj = 0)
## Once Georgiana Sl receivers are back online, remove "eval = F" from header

if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  plot(1:2, type = "n",xaxt = "n", yaxt = "n", xlab = "Range of days study fish were present at Georgiana Sl Junction", ylab = "Routing probability into Georgiana Slough at the junction")
  text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}else if (route_results_possible == F){
  plot(1:2, type = "n",xaxt = "n", yaxt = "n", xlab = "Range of days study fish were present at Georgiana Sl Junction", ylab = "Routing probability into Georgiana Slough at the junction")
  text(1.5,1.5, labels = "TOO FEW DETECTIONS", cex = 2)
}else{  
  
  library(repmis)
  
  trytest <- try(source_data("https://code.usgs.gov/crrl_qfes/Enhanced_Acoustic_Telemetry_Project/raw/master/EAT_data_2021.Rdata?raw=True"))
  
  if (inherits(trytest, "try-error")){
    plot(1:2, type = "n",xaxt = "n", yaxt = "n", xlab = "Range of days study fish were present at Georgiana Sl Junction", ylab = "Routing probability into Georgiana Slough at the junction")
    text(1.5,1.5, labels = "ERROR DOWNLOADING STARS", cex = 2)
  }else{
  
    ## first, find min and max arrivals at georg for a study
    min_georg <- as.Date(format(min(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2","Georgiana_Slough1", "Georgiana_Slough2"),"DateTime_PST"]), "%Y-%m-%d"))
    max_georg <- as.Date(format(max(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2","Georgiana_Slough1", "Georgiana_Slough2"),"DateTime_PST"]), "%Y-%m-%d"))
    
    psi_study <- psi_GeoCond[psi_GeoCond$Date <= max_georg & psi_GeoCond$Date >=min_georg-1,]
    
    plot(psi_study$Date, psi_study$psi_geo.50, ylim = c(0,1), xlim = c(min_georg, max_georg), type = "n", xaxt = "n", xlab = "Range of days study fish were present at Georgiana Sl Junction", ylab = "Routing probability into Georgiana Slough at the junction")
    polygon(c(psi_study$Date, rev(psi_study$Date)), 
            c(psi_study$psi_geo.10,rev(psi_study$psi_geo.90)), density = 200, col ='grey90')
    lines(psi_study$Date, psi_study$psi_geo.50, lty = 3)
    points(mean(psi_study$Date), tail(results_short$Estimate,1)/100, pch = 16, cex = 1.3)
    arrows(mean(psi_study$Date), tail(results_short$`95% lower C.I.`,1)/100, mean(psi_study$Date), tail(results_short$`95% upper C.I.`,1)/100, length=0.05, angle=90, code=3)
    axis(side=1, at=psi_study$Date, labels=format(psi_study$Date, '%b-%d'))
    legend("topright", legend = c('STARS daily predictions during study (w/ 90% CI)', 'Empirical estimate over study period (w/ 95% CI)'), 
           bty = "n",
           col = c("black","black"),
           lty = c(3,1),
           fill = c("grey90", NA),
           border = c(NA,NA),
           pch = c(NA,16),
           seg.len =0.8,
           cex= 1.2
    )
  }
}  
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
  
try(benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F))

detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))

if (nrow(detects_benicia) == 0){
  if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
    WR.surv <- data.frame("Release"="ALL", "estimate"=0, "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
  }else{
    WR.surv <- data.frame("Release"=NA, "estimate"="NO DETECTIONS YET", "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
  }
  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
  print(kable(WR.surv1, row.names = F, "html", caption = "3.5 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))  
}else if (length(table(detects_benicia$general_location)) == 1){
  if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
    WR.surv <- data.frame("Release"="ALL", "estimate"=round(length(unique(detects_benicia$TagCode))/length(unique(detects_study$TagCode))*100,1), "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
  }else{
    WR.surv <- data.frame("Release"=NA, "estimate"="NOT ENOUGH DETECTIONS", "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
  }
    WR.surv1 <- WR.surv
    colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
    print(kable(WR.surv1, row.names = F, "html", caption = "3.5 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {  
  ## Only do survival to Benicia here
  test3 <- detects_study[which(detects_study$river_km < 53),]
  
  ## Create inp for survival estimation
  
  inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ river_km, fun.aggregate = length))
  
  ## Sort columns by river km in descending order
  # Count number of genlocs
  gen_loc_sites <- ncol(inp)-1
  
  inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]

  inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
  
  inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
  inp2[is.na(inp2)] <- 0
  inp2[inp2 > 0] <- 1
  
  inp <- cbind(inp, inp2)
  groups <- as.character(sort(unique(inp$Release)))
  groups_w_detects <- names(table(test3$Release))

  inp[,groups] <- 0
  for (i in groups) {
    inp[as.character(inp$Release) == i, i] <- 1
  }

  inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
  
  
  if(length(groups) > 1){
  ## make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
    inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)

    WR.process <- process.data(inp.df, model="CJS", begin.time=1) 
    
    WR.ddl <- make.design.data(WR.process)
    
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
    inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
    inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
    if(length(groups_w_detects) > 1){
      WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel") 
    
      WR.ddl <- make.design.data(WR.process)
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
    }else{  
      WR.process <- process.data(inp.df, model="CJS", begin.time=1) 
    
      WR.ddl <- make.design.data(WR.process)
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    }
    WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
    WR.surv.rel <- cbind(Release = groups_w_detects, round(WR.mark.rel$results$real[seq(from=1,to=length(groups_w_detects)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
    WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
    WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
    WR.surv <- rbind(WR.surv, WR.surv.rel)
    
  }else{
    inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)

    WR.process <- process.data(inp.df, model="CJS", begin.time=1) 
    
      
    WR.ddl <- make.design.data(WR.process)
    
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)

    WR.surv <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
    
  }
  WR.surv$Detection_efficiency <- NA
  WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)

  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")

  print(kable(WR.surv1, row.names = F, "html", caption = "3.5 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))    
  
}
3.5 Minimum survival to Benicia Bridge East Span (using CJS survival model)
Release Group Survival (%) SE 95% lower C.I. 95% upper C.I. Detection efficiency (%)
ALL 2.5 0.4 1.9 3.3 88.6
Week 1 17.1 2.7 12.5 23.0 NA
Week 2 0.5 0.5 0.1 3.5 NA
Week 3 1.6 0.9 0.5 4.7 NA
Week 4 0.5 0.5 0.1 3.5 NA
Week 5 1.9 0.7 0.9 3.9 NA
Week 6 0.3 0.3 0.0 1.9 NA
Week 7 0.0 NA NA NA NA
if(exists("benicia")==T & is.numeric(WR.surv1[1,2])){
  ## Find mean release time per release group, and ALL
  reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
  reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))

  ## Assign whether the results are tentative or final
  quality <- "tentative"
  if(endtime < as.Date(format(Sys.time(), "%Y-%m-%d"))) { quality <- "final"}

  WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
  
  WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
  benicia$RelDT <- as.POSIXct(benicia$RelDT)
  ## remove old benicia record for this studyID
  benicia <- benicia[!benicia$StudyID == unique(detects_study$Study_ID),]
  
  benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
  
  write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F) 
}
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

try(Delta <- read.csv("Delta_surv.csv", stringsAsFactors = F))

if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
    WR.surv1 <- data.frame("Measure"=NA, "Estimate"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
    colnames(WR.surv1) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
    print(kable(WR.surv1, row.names = F, "html", caption = "3.6 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
            kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}else{
  
  test4 <- detects_study[detects_study$general_location %in% c("TowerBridge", "I80-50_Br", "Benicia_west", "Benicia_east"),]
 
  if(nrow(test4[test4$general_location =="Benicia_west",]) == 0 |
     nrow(test4[test4$general_location =="Benicia_east",]) == 0){
    WR.surv1 <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
    colnames(WR.surv1) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
    print(kable(WR.surv1, row.names = F, "html", caption = "3.6 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
            kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))

  }else{
  
  inp <- as.data.frame(reshape2::dcast(test4, TagCode ~ general_location, fun.aggregate = length))
  
  ## add together detections at Tower and I80 to ensure good detection entering Delta
  if("I80-50_Br" %in% colnames(inp) & "TowerBridge" %in% colnames(inp)){
    inp$`I80-50_Br` <- inp$`I80-50_Br` + inp$TowerBridge
  }else if("TowerBridge" %in% colnames(inp)){
    inp$`I80-50_Br` <- inp$TowerBridge
  }
  ## Sort columns by river km in descending order, this also removes TowerBridge, no longer needed
  inp <- inp[,c("TagCode","I80-50_Br", "Benicia_east", "Benicia_west")]
  
  # Count number of genlocs
  gen_loc_sites <- ncol(inp)-1
  
  inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]

  inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
  
  inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
  inp2[is.na(inp2)] <- 0
  inp2[inp2 > 0] <- 1
  
  inp <- cbind(inp, inp2)
  groups <- as.character(sort(unique(inp$Release)))
  groups_w_detects <- names(table(detects_study[which(detects_study$river_km < 53),"Release"]))

  inp[,groups] <- 0
  for (i in groups) {
    inp[as.character(inp$Release) == i, i] <- 1
  }

  inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
  # if(length(groups) > 1){
  #   inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
  # }else{
  #   inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",inp[,groups]," ;",sep = "")
  # }
  
  
  
  if(length(groups) > 1){
  ## make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
    inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)

    WR.process <- process.data(inp.df, model="CJS", begin.time=1) 
    
    WR.ddl <- make.design.data(WR.process)
    
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
    inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
    inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
    if(length(groups_w_detects) > 1){
      WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel") 
    
      WR.ddl <- make.design.data(WR.process)
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
    }else{  
      WR.process <- process.data(inp.df, model="CJS", begin.time=1) 
    
      WR.ddl <- make.design.data(WR.process)
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    }
    WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1))
    WR.surv.rel <- cbind(Release = groups_w_detects, round(WR.mark.rel$results$real[seq(from=2,to=length(groups_w_detects)*3,by = 3),c("estimate", "se", "lcl", "ucl")] * 100,1))
    WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
    WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
    WR.surv <- rbind(WR.surv, WR.surv.rel)
    
  }else{
    inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)

    WR.process <- process.data(inp.df, model="CJS", begin.time=1) 
    
      
    WR.ddl <- make.design.data(WR.process)
    
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)

    WR.surv <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1))
    
  }

#   
#   
# #  write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
#   
#   if(length(groups) > 1){
#     
#     inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = factor(inp$Release, levels = names(sort(table(test$Release),decreasing = T))), stringsAsFactors = F)
# 
#     WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
#     WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel") 
#     
#     WR.ddl <- make.design.data(WR.process)
#     
#     WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
#     
#     WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
#     
#     WR.surv <- round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1)
#     WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=2,to=length(groups)*3,by = 3),c("estimate", "se", "lcl", "ucl")] * 100,1))
#     
#   }else{
#     
#     WRinp <- convert.inp("WRinp.inp")
#     WR.process <- process.data(WRinp, model="CJS", begin.time=1) 
#     
#       
#     WR.ddl <- make.design.data(WR.process)
#     
#     WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
# 
#     WR.surv <- round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1)
#     
#   }
  
    
#  WR.surv <- cbind(Release = c("ALL", groups), WR.surv)

  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.")

  print(kable(WR.surv1, row.names = F, "html", caption = "3.6 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))  
  
  if(exists("Delta")==T & is.numeric(WR.surv1[1,2])){
    reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
    reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))
  
    WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
  
    WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
    
    Delta$RelDT <- as.POSIXct(Delta$RelDT)
    ## remove old benicia record for this studyID
    Delta <- Delta[!Delta$StudyID %in% unique(detects_study$Study_ID),]
    
    Delta <- rbind(Delta, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
    
    write.csv(Delta, "Delta_surv.csv", row.names = F, quote = F) 
  }
  }
}
3.6 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)
Release Group Survival (%) SE 95% lower C.I. 95% upper C.I.
ALL 22.9 2.9 17.6 29.1
Week 1 22.7 3.4 16.7 30.1
Week 2 8.3 8.0 1.2 41.3
Week 3 42.9 18.7 14.4 77.0
Week 4 12.5 11.7 1.7 53.7
Week 5 32.2 10.0 16.1 53.9
Week 6 16.7 15.2 2.3 63.1
Week 7 0.0 NA NA NA



4. Detections statistics at all realtime receivers


try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  "No detections yet"
} else {

  arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
  
  tag_stats <- aggregate(list(First_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = min)
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Mean_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = mean), 
                     by = c("general_location"))
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Last_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = max), 
                     by = c("general_location"))
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Fish_count = arrivals$TagCode), 
                         by= list(general_location = arrivals$general_location), 
                         FUN = function(x) {length(unique(x))}), 
                     by = c("general_location"))
  tag_stats$Percent_arrived <- round(tag_stats$Fish_count/nrow(study_tagcodes) * 100,2)
      
  tag_stats <- merge(tag_stats, unique(detects_study[,c("general_location", "river_km")]))
  
  tag_stats <- tag_stats[order(tag_stats$river_km, decreasing = T),]
  
  tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
  
  tag_stats <- tag_stats[is.na(tag_stats$First_arrival)==F,]

  print(kable(tag_stats, row.names = F, 
              caption = "4.1 Detections for all releases combined",
              "html") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
  
  for (j in sort(unique(study_tagcodes$Release))) {
    
    if(nrow(detects_study[detects_study$Release == j,]) > 0 ) {
    
      temp <- detects_study[detects_study$Release == j,]
      
        arrivals1 <- aggregate(list(DateTime_PST = temp$DateTime_PST), by = list(general_location = temp$general_location, TagCode = temp$TagCode), FUN = min)
  
      rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])
  
      tag_stats1 <- aggregate(list(First_arrival = arrivals1$DateTime_PST), 
                             by= list(general_location = arrivals1$general_location), 
                             FUN = min)
      tag_stats1 <- merge(tag_stats1, 
                         aggregate(list(Mean_arrival = arrivals1$DateTime_PST), 
                             by= list(general_location = arrivals1$general_location), 
                             FUN = mean), 
                         by = c("general_location"))
      tag_stats1 <- merge(tag_stats1, 
                   aggregate(list(Last_arrival = arrivals1$DateTime_PST), 
                       by= list(general_location = arrivals1$general_location), 
                       FUN = max), 
                   by = c("general_location"))
      tag_stats1 <- merge(tag_stats1, 
                         aggregate(list(Fish_count = arrivals1$TagCode), 
                                   by= list(general_location = arrivals1$general_location), 
                                   FUN = function(x) {length(unique(x))}), 
                         by = c("general_location"))
      
      tag_stats1$Percent_arrived <- round(tag_stats1$Fish_count/rel_count * 100,2)
    
      tag_stats1 <- merge(tag_stats1, unique(detects_study[,c("general_location", "river_km")]))
    
      tag_stats1 <- tag_stats1[order(tag_stats1$river_km, decreasing = T),]
      
      tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
      
      tag_stats1 <- tag_stats1[is.na(tag_stats1$First_arrival)==F,]
      
      final_stats <- kable(tag_stats1, row.names = F, 
            caption = paste("4.2 Detections for",j,"release groups", sep = " "),
            "html")
      
      print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
      
    } else {
      cat("\n\n\\pagebreak\n")
      print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
      cat("\n\n\\pagebreak\n")
    }
  }
}
4.1 Detections for all releases combined
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
Blw_Salt_RT 2022-02-02 11:47:03 2022-04-05 15:25:09 2022-05-13 02:33:23 1560 81.98 457.000
Abv_Otter_Island 2022-03-05 06:28:51 2022-04-22 00:09:20 2022-05-16 12:15:34 706 37.10 419.600
ButteBrRT 2021-12-16 15:25:37 2022-02-22 07:54:48 2022-05-16 02:38:11 239 12.56 344.108
MeridianBr 2022-04-14 18:50:37 2022-05-03 14:07:42 2022-05-16 19:41:27 47 2.47 290.848
TowerBridge 2021-12-18 09:02:31 2022-01-18 19:39:18 2022-05-16 16:30:17 207 10.88 172.000
I80-50_Br 2021-12-18 09:28:42 2022-01-17 03:36:37 2022-05-16 16:58:42 202 10.61 170.748
Old River 2021-12-25 08:31:55 2021-12-25 19:37:45 2021-12-26 06:43:36 2 0.11 153.001
Holland_Cut_Quimby 2021-12-23 05:27:56 2021-12-23 07:11:02 2021-12-23 08:54:08 2 0.11 145.000
Sac_BlwGeorgiana 2021-12-19 10:21:42 2022-01-12 02:49:39 2022-05-14 01:18:38 80 4.20 119.058
Sac_BlwGeorgiana2 2021-12-19 10:43:09 2022-01-11 06:07:43 2022-05-14 02:09:44 77 4.05 118.398
Benicia_east 2021-12-25 12:16:42 2022-01-29 22:31:57 2022-05-15 18:56:21 42 2.21 52.240
Benicia_west 2021-12-25 12:22:13 2022-01-28 14:40:07 2022-05-15 19:03:17 44 2.31 52.040
4.2 Detections for Week 1 release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
ButteBrRT 2021-12-16 15:25:37 2021-12-17 13:25:07 2021-12-28 10:26:04 89 44.5 344.108
TowerBridge 2021-12-18 09:02:31 2021-12-20 05:48:20 2021-12-31 04:26:59 151 75.5 172.000
I80-50_Br 2021-12-18 09:28:42 2021-12-20 05:35:05 2021-12-31 05:10:22 150 75.0 170.748
Old River 2021-12-25 08:31:55 2021-12-25 19:37:45 2021-12-26 06:43:36 2 1.0 153.001
Holland_Cut_Quimby 2021-12-23 05:27:56 2021-12-23 07:11:02 2021-12-23 08:54:08 2 1.0 145.000
Sac_BlwGeorgiana 2021-12-19 10:21:42 2021-12-22 03:06:20 2022-01-07 06:35:35 65 32.5 119.058
Sac_BlwGeorgiana2 2021-12-19 10:43:09 2021-12-21 22:31:22 2022-01-07 08:06:29 63 31.5 118.398
Benicia_east 2021-12-25 12:16:42 2021-12-30 21:59:28 2022-02-18 18:10:51 30 15.0 52.240
Benicia_west 2021-12-25 12:22:13 2021-12-30 20:27:42 2022-02-18 18:13:36 32 16.0 52.040
4.2 Detections for Week 2 release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
Blw_Salt_RT 2022-02-02 11:47:03 2022-02-03 03:28:22 2022-02-13 13:13:08 188 94.0 457.000
Abv_Otter_Island 2022-03-06 06:46:59 2022-03-06 06:46:59 2022-03-06 06:46:59 1 0.5 419.600
ButteBrRT 2022-02-04 03:02:00 2022-02-25 04:04:43 2022-04-05 10:44:30 34 17.0 344.108
TowerBridge 2022-03-11 15:25:49 2022-03-22 16:24:12 2022-04-16 15:12:20 12 6.0 172.000
I80-50_Br 2022-03-11 16:15:53 2022-03-20 11:46:27 2022-04-09 18:28:52 11 5.5 170.748
Sac_BlwGeorgiana 2022-03-13 19:16:56 2022-03-30 11:51:45 2022-05-03 16:29:34 4 2.0 119.058
Sac_BlwGeorgiana2 2022-03-13 19:40:15 2022-03-30 11:21:07 2022-05-03 09:18:13 4 2.0 118.398
Benicia_east 2022-03-18 18:16:25 2022-03-18 18:16:25 2022-03-18 18:16:25 1 0.5 52.240
Benicia_west 2022-03-18 18:18:36 2022-03-18 18:18:36 2022-03-18 18:18:36 1 0.5 52.040
4.2 Detections for Week 3 release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
Blw_Salt_RT 2022-02-23 11:58:17 2022-02-25 02:51:17 2022-04-15 16:33:09 170 85.0 457.000
Abv_Otter_Island 2022-03-05 06:28:51 2022-03-06 07:46:23 2022-03-10 11:30:23 19 9.5 419.600
ButteBrRT 2022-02-26 20:13:28 2022-03-09 13:44:10 2022-03-20 09:28:08 13 6.5 344.108
MeridianBr 2022-04-15 00:12:56 2022-04-15 00:12:56 2022-04-15 00:12:56 1 0.5 290.848
TowerBridge 2022-03-14 08:16:17 2022-03-25 10:55:08 2022-04-14 18:06:27 7 3.5 172.000
I80-50_Br 2022-03-14 08:53:23 2022-03-25 11:23:32 2022-04-14 18:38:57 6 3.0 170.748
Sac_BlwGeorgiana 2022-03-16 06:36:08 2022-03-30 04:26:34 2022-04-16 09:44:02 3 1.5 119.058
Sac_BlwGeorgiana2 2022-03-16 06:52:33 2022-03-30 04:49:06 2022-04-16 09:57:08 3 1.5 118.398
Benicia_east 2022-03-27 14:52:51 2022-04-05 05:14:20 2022-04-19 07:27:38 3 1.5 52.240
Benicia_west 2022-03-27 23:45:05 2022-03-29 08:35:04 2022-03-30 17:25:03 2 1.0 52.040
4.2 Detections for Week 4 release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
Blw_Salt_RT 2022-03-17 10:24:26 2022-03-19 06:26:23 2022-05-07 19:50:05 178 89.0 457.000
Abv_Otter_Island 2022-03-17 21:45:24 2022-03-19 12:26:03 2022-04-24 07:49:18 113 56.5 419.600
ButteBrRT 2022-03-20 05:00:21 2022-03-23 16:45:28 2022-04-07 09:56:14 33 16.5 344.108
MeridianBr 2022-04-16 18:39:27 2022-04-16 18:39:27 2022-04-16 18:39:27 1 0.5 290.848
TowerBridge 2022-03-23 23:19:29 2022-03-29 05:59:06 2022-04-15 21:32:40 8 4.0 172.000
I80-50_Br 2022-03-24 00:05:48 2022-03-29 07:34:59 2022-04-15 22:05:45 8 4.0 170.748
Sac_BlwGeorgiana 2022-03-31 06:33:22 2022-03-31 06:33:22 2022-03-31 06:33:22 1 0.5 119.058
Sac_BlwGeorgiana2 2022-03-31 06:48:48 2022-03-31 06:48:48 2022-03-31 06:48:48 1 0.5 118.398
Benicia_east 2022-04-03 20:51:01 2022-04-03 20:51:01 2022-04-03 20:51:01 1 0.5 52.240
Benicia_west 2022-04-03 21:00:47 2022-04-03 21:00:47 2022-04-03 21:00:47 1 0.5 52.040
4.2 Detections for Week 5 release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
Blw_Salt_RT 2022-04-07 09:55:15 2022-04-08 00:54:53 2022-04-09 20:46:53 336 91.55 457.000
Abv_Otter_Island 2022-04-08 04:01:36 2022-04-09 06:56:37 2022-04-11 16:32:10 149 40.60 419.600
ButteBrRT 2022-04-10 08:32:26 2022-04-13 06:06:48 2022-04-23 08:55:19 22 5.99 344.108
MeridianBr 2022-04-14 18:50:37 2022-04-17 00:08:13 2022-04-24 11:44:48 8 2.18 290.848
TowerBridge 2022-04-14 17:36:53 2022-04-16 03:42:39 2022-04-18 06:04:54 22 5.99 172.000
I80-50_Br 2022-04-14 18:16:32 2022-04-16 04:50:43 2022-04-18 07:02:00 22 5.99 170.748
Sac_BlwGeorgiana 2022-04-17 10:27:44 2022-04-19 06:08:22 2022-04-22 17:00:44 4 1.09 119.058
Sac_BlwGeorgiana2 2022-04-17 10:44:40 2022-04-19 11:56:30 2022-04-22 17:14:50 3 0.82 118.398
Benicia_east 2022-04-19 10:12:21 2022-04-21 03:28:35 2022-04-22 10:32:31 6 1.63 52.240
Benicia_west 2022-04-19 10:16:13 2022-04-21 08:26:33 2022-04-22 11:08:59 7 1.91 52.040
4.2 Detections for Week 6 release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
Blw_Salt_RT 2022-04-27 11:09:49 2022-04-28 05:19:02 2022-04-30 19:06:06 322 89.20 457.000
Abv_Otter_Island 2022-04-28 03:15:26 2022-04-30 07:37:45 2022-05-06 06:06:18 178 49.31 419.600
ButteBrRT 2022-05-01 05:19:26 2022-05-03 09:03:45 2022-05-09 20:38:58 29 8.03 344.108
MeridianBr 2022-05-02 10:45:57 2022-05-04 21:04:35 2022-05-14 17:44:21 26 7.20 290.848
TowerBridge 2022-05-05 13:41:22 2022-05-08 10:06:34 2022-05-09 15:13:45 6 1.66 172.000
I80-50_Br 2022-05-09 12:50:17 2022-05-09 15:21:43 2022-05-09 19:00:04 4 1.11 170.748
Sac_BlwGeorgiana 2022-05-06 13:38:21 2022-05-11 01:29:12 2022-05-14 01:18:38 3 0.83 119.058
Sac_BlwGeorgiana2 2022-05-06 13:48:18 2022-05-11 02:09:33 2022-05-14 02:09:44 3 0.83 118.398
Benicia_east 2022-05-15 18:56:21 2022-05-15 18:56:21 2022-05-15 18:56:21 1 0.28 52.240
Benicia_west 2022-05-15 19:03:17 2022-05-15 19:03:17 2022-05-15 19:03:17 1 0.28 52.040
4.2 Detections for Week 7 release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived river_km
Blw_Salt_RT 2022-05-11 10:16:49 2022-05-11 23:24:17 2022-05-13 02:33:23 366 97.60 457.000
Abv_Otter_Island 2022-05-11 21:56:24 2022-05-12 20:57:58 2022-05-16 12:15:34 246 65.60 419.600
ButteBrRT 2022-05-13 20:17:22 2022-05-14 20:29:49 2022-05-16 02:38:11 19 5.07 344.108
MeridianBr 2022-05-14 19:17:13 2022-05-15 19:38:35 2022-05-16 19:41:27 11 2.93 290.848
TowerBridge 2022-05-16 16:30:17 2022-05-16 16:30:17 2022-05-16 16:30:17 1 0.27 172.000
I80-50_Br 2022-05-16 16:58:42 2022-05-16 16:58:42 2022-05-16 16:58:42 1 0.27 170.748


4.3 Fish arrivals per day (“NA” means receivers were non-operational)

try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

## THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
  "No detections yet"
} else {
  arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
    
  beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F)
  beacon_by_day$day <- as.Date(beacon_by_day$day)
  
  gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
  
  arrivals$day <- as.Date(format(arrivals$DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8"))
  
  arrivals_per_day <- aggregate(list(New_arrivals = arrivals$TagCode), by = list(day = arrivals$day, general_location = arrivals$general_location), length)
  arrivals_per_day$day <- as.Date(arrivals_per_day$day)

  ## Now subset to only look at data for the correct beacon for that day
  beacon_by_day <- as.data.frame(beacon_by_day[which(beacon_by_day$TagCode == beacon_by_day$beacon),])
  
  endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
  ## Now only keep beacon by day for days since fish were released
  beacon_by_day <- beacon_by_day[beacon_by_day$day >= as.Date(min(study_tagcodes$release_time)) & beacon_by_day$day <= endtime,]  
  
  beacon_by_day <- merge(beacon_by_day, gen_locs[,c("location", "general_location","rkm")], by = "location", all.x = T)

  arrivals_per_day <- merge(unique(beacon_by_day[,c("general_location", "day", "rkm")]), arrivals_per_day, all.x = T, by = c("general_location", "day"))
  
  arrivals_per_day$day <- factor(arrivals_per_day$day)
  
  ## Remove bench test and other NA locations
  arrivals_per_day <- arrivals_per_day[!arrivals_per_day$general_location == "Bench_test",]
  arrivals_per_day <- arrivals_per_day[is.na(arrivals_per_day$general_location) == F,]
  
  ## Change order of data to plot decreasing river_km
  arrivals_per_day <- arrivals_per_day[order(arrivals_per_day$rkm, decreasing = T),]
  arrivals_per_day$general_location <- factor(arrivals_per_day$general_location, unique(arrivals_per_day$general_location))
  
  # 
  # ggplot(data=arrivals_per_day, aes(x=general_location, y=fct_rev(as_factor(day)))) +
  # geom_tile(fill = "lightgray", color = "black") + 
  # geom_text(aes(label=New_arrivals)) +
  # labs(x="General Location", y = "Date") +
  # theme(panel.background = element_blank(), axis.text.x = element_text(angle = 90, hjust = 1))
    crosstab <- xtabs(formula = arrivals_per_day$New_arrivals ~ arrivals_per_day$day + arrivals_per_day$general_location, addNA =T)
  crosstab[is.na(crosstab)] <- ""
  crosstab[crosstab==0] <- NA
  crosstab <- as.data.frame.matrix(crosstab)
  #colnames(crosstab) <- c("Butte Br", "Tower Br", "I8050 Br", "Old River", "Middle River", "CVP Tanks", "Georg Slough1", "Sac_Blw Georg1", "Georg Slough2", "Sac_Blw Georg2", "Benicia East", "Benicia West")

 kable(crosstab, align = "c") %>%
  kable_styling(c("striped", "condensed"), font_size = 11, full_width = F, position = "left") %>%
  #row_spec(0, angle = -45) %>%
  column_spec(column = 1:ncol(crosstab),width_min = "50px",border_left = T, border_right = T) %>%
  column_spec(1, bold = T, width_min = "75px")%>%
  scroll_box(height = "700px")
}
Blw_Salt_RT Abv_Otter_Island ButteBrRT MeridianBr TowerBridge I80-50_Br Old River MiddleRiver Clifton_Court_US_Radial_Gates Holland_Cut_Quimby CVP_Tank CVP_Trash_Rack_1 Clifton_Court_Intake_Canal Old_River_Quimby Sac_BlwGeorgiana Sac_BlwGeorgiana2 Benicia_east Benicia_west
2021-12-15 NA NA NA NA NA
2021-12-16 NA NA 39 NA NA
2021-12-17 NA NA 34 NA
2021-12-18 NA NA 13 NA 39 39
2021-12-19 NA NA 1 NA 63 63 12 12
2021-12-20 NA NA NA 21 20 23 22
2021-12-21 NA NA NA 11 11 14 13
2021-12-22 NA NA NA 2 3 1 2
2021-12-23 NA NA 1 NA 2 1 2 4 3
2021-12-24 NA NA NA 4 5 4 5
2021-12-25 NA NA NA 4 3 1 1 1 9 9
2021-12-26 NA NA NA 3 3 1 1 1 8 9
2021-12-27 NA NA NA 2 2 1 1
2021-12-28 NA NA 1 NA 1 1 2 3
2021-12-29 NA NA NA
2021-12-30 NA NA NA 3 3
2021-12-31 NA NA NA 1 1 1 1 1
2022-01-01 NA NA NA NA
2022-01-02 NA NA NA NA
2022-01-03 NA NA NA
2022-01-04 NA NA NA 1 2
2022-01-05 NA NA NA 1 1 1
2022-01-06 NA NA NA
2022-01-07 NA NA NA 1 1 1 1
2022-01-08 NA NA NA 1 1
2022-01-09 NA NA NA
2022-01-10 NA NA NA
2022-01-11 NA NA NA 1 1
2022-01-12 NA NA NA
2022-01-13 NA NA NA
2022-01-14 NA NA NA
2022-01-15 NA NA NA
2022-01-16 NA NA NA
2022-01-17 NA NA NA
2022-01-18 NA NA NA
2022-01-19 NA NA NA
2022-01-20 NA NA NA
2022-01-21 NA NA
2022-01-22 NA NA
2022-01-23 NA NA
2022-01-24 NA NA
2022-01-25 NA NA
2022-01-26 NA NA
2022-01-27 NA NA
2022-01-28 NA NA
2022-01-29 NA NA
2022-01-30 NA NA
2022-01-31 NA NA
2022-02-01 NA NA
2022-02-02 90 NA NA
2022-02-03 95 NA NA
2022-02-04 2 NA 2 NA
2022-02-05 NA 6 NA
2022-02-06 NA 3 NA
2022-02-07 NA 3 NA
2022-02-08 NA NA
2022-02-09 NA NA
2022-02-10 NA NA
2022-02-11 NA NA
2022-02-12 NA NA
2022-02-13 1 NA NA
2022-02-14 NA NA
2022-02-15 NA 2 NA
2022-02-16 NA NA
2022-02-17 NA NA
2022-02-18 NA 1 NA 1 1
2022-02-19 NA 1 NA
2022-02-20 NA NA
2022-02-21 NA 1 NA
2022-02-22 NA NA
2022-02-23 72 NA NA
2022-02-24 79 NA NA
2022-02-25 9 NA NA
2022-02-26 2 NA 1 NA
2022-02-27 NA NA
2022-02-28 NA NA
2022-03-01 NA 1 NA
2022-03-02 2 NA NA
2022-03-03 NA NA
2022-03-04 1 NA
2022-03-05 1 11 NA
2022-03-06 2 3 1 NA
2022-03-07 5 NA
2022-03-08 8 NA
2022-03-09 5 NA
2022-03-10 1 2 NA
2022-03-11 NA 1 1
2022-03-12 NA 1
2022-03-13 NA 1 1 1
2022-03-14 1 NA 2 1
2022-03-15 NA 1 2
2022-03-16 2 NA 1 1
2022-03-17 80 3 NA 2 2
2022-03-18 90 50 NA 1 1 1 1 1 1
2022-03-19 2 44 NA
2022-03-20 12 6 NA 1 1
2022-03-21 1 11 NA 1
2022-03-22 10 NA 1
2022-03-23 4 NA 3 2
2022-03-24 1 NA 1 2
2022-03-25 NA 3 2 1 1
2022-03-26 NA 2 2 1 1
2022-03-27 NA 3 2 1 1
2022-03-28 NA 1
2022-03-29 1 NA 1 1
2022-03-30 1 1 NA 1 1
2022-03-31 2 NA 1 1
2022-04-01 NA
2022-04-02 1 2 NA
2022-04-03 NA 1 1
2022-04-04 NA
2022-04-05 2 NA
2022-04-06 NA
2022-04-07 168 1 NA
2022-04-08 166 64 NA
2022-04-09 2 58 NA 1 1
2022-04-10 20 2 NA
2022-04-11 7 3 NA
2022-04-12 9 NA
2022-04-13 6 NA
2022-04-14 2 4 3
2022-04-15 3 4 9 10
2022-04-16 1 8 7 1 1
2022-04-17 1 2 3 3 1 1
2022-04-18 1 1 2 1
2022-04-19 3 2
2022-04-20
2022-04-21 2 2
2022-04-22 1 1 2 3
2022-04-23 1 1
2022-04-24 1 1
2022-04-25
2022-04-26
2022-04-27 117
2022-04-28 191 25
2022-04-29 13 54
2022-04-30 1 55
2022-05-01 1 27 8
2022-05-02 6 7 4
2022-05-03 5 7 9 1 1
2022-05-04 4 2 4
2022-05-05 1 1 2 4 1
2022-05-06 1 2 1 1 1 1
2022-05-07 1 2
2022-05-08 1
2022-05-09 1 4 4
2022-05-10
2022-05-11 170 3
2022-05-12 195 105 1 1
2022-05-13 1 131 3
2022-05-14 5 4 4 1 1
2022-05-15 1 10 2 1 1
2022-05-16 1 2 6 1 1
2022-05-17
2022-05-18
2022-05-19
2022-05-20
2022-05-21
2022-05-22
2022-05-23
2022-05-24
2022-05-25
2022-05-26
2022-05-27
2022-05-28
2022-05-29
2022-05-30
2022-05-31
2022-06-01
2022-06-02
2022-06-03
2022-06-04
2022-06-05
2022-06-06
2022-06-07 NA
2022-06-08 NA
2022-06-09 NA
2022-06-10 NA
2022-06-11 NA
2022-06-12 NA
2022-06-13 NA
2022-06-14 NA
2022-06-15 NA
2022-06-16 NA
2022-06-17 NA NA
2022-06-18 NA NA
2022-06-19 NA NA
2022-06-20 NA NA
2022-06-21 NA NA
2022-06-22 NA NA
2022-06-23 NA NA
2022-06-24 NA NA
2022-06-25 NA NA
2022-06-26 NA NA
2022-06-27 NA NA
2022-06-28 NA NA
2022-06-29 NA NA
2022-06-30 NA NA
2022-07-01 NA NA
2022-07-02 NA NA
2022-07-03 NA NA
2022-07-04 NA NA
2022-07-05 NA NA
2022-07-06 NA NA
2022-07-07 NA NA
2022-07-08 NA NA
2022-07-09 NA NA
2022-07-10 NA NA
2022-07-11 NA NA
2022-07-12 NA NA
2022-07-13 NA NA
2022-07-14 NA NA
2022-07-15 NA NA
2022-07-16 NA NA
2022-07-17 NA NA
2022-07-18 NA NA
2022-07-19 NA NA
2022-07-20 NA NA
2022-07-21 NA NA
2022-07-22 NA NA
2022-07-23 NA NA
2022-07-24 NA NA
2022-07-25 NA NA
2022-07-26 NA NA
2022-07-27 NA NA
2022-07-28 NA NA
2022-07-29 NA NA
2022-07-30 NA NA
2022-07-31 NA NA
2022-08-01 NA NA
2022-08-02 NA NA
2022-08-03 NA NA
2022-08-04 NA NA
2022-08-05 NA NA
2022-08-06 NA NA
2022-08-07 NA NA
2022-08-08 NA NA
2022-08-09 NA NA
2022-08-10 NA NA
2022-08-11 NA NA
2022-08-12 NA NA
2022-08-13 NA NA
2022-08-14 NA NA
2022-08-15 NA NA
2022-08-16 NA NA
2022-08-17 NA NA
2022-08-18 NA NA
2022-08-19 NA NA
2022-08-20 NA NA
2022-08-21 NA NA
2022-08-22 NA NA
2022-08-23 NA NA
2022-08-24 NA NA
2022-08-25 NA NA
2022-08-26 NA NA
rm(list = ls())
cleanup(ask = F)



For questions or comments, please contact