Central Valley Enhanced

Acoustic Tagging Project

logo





Hatchery-origin winter-run Chinook salmon

2018-2019 Season (PROVISIONAL DATA)



1. Project Status


Study is complete, all tags are no longer active. All times in Pacific Standard Time.


Telemetry Study Template for this study can be found here

setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

tagcodes <- read.csv("qry_HexCodes.txt", stringsAsFactors = F, colClasses=c("TagID_Hex"="character"))

tagcodes$RelDT <- as.POSIXct(tagcodes$RelDT, format = "%m/%d/%Y %I:%M:%S %p", tz = "Etc/GMT+8")
latest <- read.csv("latest_download.csv", stringsAsFactors = F)

study_tagcodes <- tagcodes[tagcodes$StudyID == "Winter_H_2019",]
 

if (nrow(study_tagcodes) == 0){
  cat("Project has not yet begun")
}else{
  cat(paste("Project began on ", min(study_tagcodes$RelDT), ", see tagging details below:", sep = ""))

  study_tagcodes$Release <- "Week 1"
  study_tagcodes[study_tagcodes$RelDT < as.POSIXct("2018-05-12"), "Release"] <- "Week 1"
  study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2018-05-12") & study_tagcodes$RelDT < as.POSIXct("2018-05-20"), "Release"] <- "Week 2"
  study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2018-05-20") & study_tagcodes$RelDT < as.POSIXct("2018-05-24"), "Release"] <- "Week 3"
  study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2018-05-24") & study_tagcodes$RelDT < as.POSIXct("2018-06-01"), "Release"] <- "Week 4"
  study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2018-06-01") & study_tagcodes$RelDT < as.POSIXct("2018-06-07"), "Release"] <- "Week 5"
  
  
  release_stats <- aggregate(list(First_release_time = study_tagcodes$RelDT),
                             by= list(Release = study_tagcodes$Release),
                             FUN = min)
  release_stats <- merge(release_stats,
                         aggregate(list(Last_release_time = study_tagcodes$RelDT),
                             by= list(Release = study_tagcodes$Release),
                             FUN = max),
                         by = c("Release"))
  
                             
  release_stats <- merge(release_stats, aggregate(list(Number_fish_released =
                                                         study_tagcodes$TagID_Hex),
                             by= list(Release = study_tagcodes$Release),
                             FUN = function(x) {length(unique(x))}),
                         by = c("Release"))
  
  release_stats <- merge(release_stats,
                         aggregate(list(Release_location = study_tagcodes$Rel_loc),
                             by= list(Release = study_tagcodes$Release),
                             FUN = function(x) {head(x,1)}),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Release_rkm = study_tagcodes$Rel_rkm),
                             by= list(Release = study_tagcodes$Release),
                             FUN = function(x) {head(x,1)}),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Mean_length = study_tagcodes$Length),
                             by= list(Release = study_tagcodes$Release),
                             FUN = mean),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Mean_weight = study_tagcodes$Weight),
                             by= list(Release = study_tagcodes$Release),
                             FUN = mean),
                         by = c("Release"))
  
  release_stats[,c("Mean_length", "Mean_weight")] <- round(release_stats[,c("Mean_length", "Mean_weight")],1)
  
  release_stats$First_release_time <- format(release_stats$First_release_time, tz = "Etc/GMT+8")
  
  release_stats$Last_release_time <- format(release_stats$Last_release_time, tz = "Etc/GMT+8")
  
  kable(release_stats, format = "html") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left")
}                       
Project began on 2019-02-14 18:30:00, see tagging details below:
Release First_release_time Last_release_time Number_fish_released Release_location Release_rkm Mean_length Mean_weight
Week 1 2019-02-14 18:30:00 2019-02-14 18:30:00 650 Caldwell Park_Rel 551.288 93.9 9



2. Real-time Fish Detections


Data current as of 2025-04-22 08:00:00. All times in Pacific Standard Time.

setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

library(cder)
library(reshape2)

detects_study <- read.csv("C:/Users/field/Desktop/Real-time data massaging/products/Study_detection_files/detects_Winter_H_2019.csv", stringsAsFactors = F)
detects_study$DateTime_PST <- as.POSIXct(detects_study$DateTime_PST, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")

if(nrow(detects_study)>0){
  detects_study <- merge(detects_study, study_tagcodes[,c("TagID_Hex", "RelDT", "StudyID", "Release", "tag_life")], by.x = "TagCode", by.y = "TagID_Hex")
}
  
detects_butte <- detects_study[detects_study$general_location == "ButteBrRT",]

#wlk_flow <- read.csv("wlk.csv")

if (nrow(detects_butte) == 0){
  "No detections yet"
} else {
  
  detects_butte <- merge(detects_butte,aggregate(list(first_detect = detects_butte$DateTime_PST), by = list(TagCode= detects_butte$TagCode), FUN = min))
  
  detects_butte$Day <- as.Date(detects_butte$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_butte$RelDT), "Etc/GMT+8")
  ## Endtime should be either now, or end of predicted tag life, whichever comes first
  endtime <- min(as.Date(c(Sys.time())), max(as.Date(detects_butte$RelDT)+(detects_butte$tag_life*1.5)))
  

  BTC_flow <- cdec_query("BTC", "20", "H", starttime, endtime+1)

  BTC_flow$datetime <- as.Date(BTC_flow$DateTime)
  BTC_flow_day <- aggregate(list(parameter_value = BTC_flow$Value),
                            by = list(Day = BTC_flow$datetime),
                            FUN = mean, na.rm = T)


  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))

  rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_butte$StudyID), "Release"])
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_butte$Release))])

  tagcount <- aggregate(list(unique_tags = detects_butte$TagCode), by = list(Day = detects_butte$Day, Release = detects_butte$Release ), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)

  daterange1 <- merge(daterange, tagcount1, all.x=T)

  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }

  daterange2 <- merge(daterange1, BTC_flow_day, by = "Day", all.x = T)

  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL

  par(mar=c(6, 5, 2, 5) + 0.1)
  barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
  barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=rainbow(rel_num),
          xlab = "", ylab = "Number of fish arrivals per day",
          ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
          las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n", border=NA)#,
  #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
  #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= rainbow(rel_num), horiz = T, title = "Release Week")
  ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
  xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  barpmeans <- colMeans(barp)
  axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
  axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))

  par(new=T)

  plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
  axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
  mtext("Flow (cfs) at Butte City", side=4, line=3, cex=1.5, col="blue")
}
2.1 Detections at Butte City Bridge versus Sacramento River flows at Butte City

2.1 Detections at Butte City Bridge versus Sacramento River flows at Butte City


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

library(cder)
library(reshape2)

detects_study <- read.csv("C:/Users/field/Desktop/Real-time data massaging/products/Study_detection_files/detects_Winter_H_2019.csv", stringsAsFactors = F)
detects_study$DateTime_PST <- as.POSIXct(detects_study$DateTime_PST, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")

if(nrow(detects_study)>0){
  detects_study <- merge(detects_study, study_tagcodes[,c("TagID_Hex", "RelDT", "StudyID", "Release", "tag_life")], by.x = "TagCode", by.y = "TagID_Hex")
}
  
detects_tower <- detects_study[detects_study$general_location == "TowerBridge",]

#wlk_flow <- read.csv("wlk.csv")

if (nrow(detects_tower) == 0){
  "No detections yet"
} else {
  
  detects_tower <- merge(detects_tower,aggregate(list(first_detect = detects_tower$DateTime_PST), by = list(TagCode= detects_tower$TagCode), FUN = min))
  
  detects_tower$Day <- as.Date(detects_tower$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_tower$RelDT), "Etc/GMT+8")
  ## Endtime should be either now, or end of predicted tag life, whichever comes first
  endtime <- min(as.Date(c(Sys.time())), max(as.Date(detects_tower$RelDT)+(detects_tower$tag_life*1.5)))
  

  wlk_flow <- cdec_query("WLK", "20", "H", starttime, endtime+1)

  wlk_flow$datetime <- as.Date(wlk_flow$DateTime)
  wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$Value),
                            by = list(Day = wlk_flow$datetime),
                            FUN = mean, na.rm = T)


  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))

  rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_tower$StudyID), "Release"])
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_tower$Release))])

  tagcount <- aggregate(list(unique_tags = detects_tower$TagCode), by = list(Day = detects_tower$Day, Release = detects_tower$Release ), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)

  daterange1 <- merge(daterange, tagcount1, all.x=T)

  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }

  daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)

  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL

  par(mar=c(6, 5, 2, 5) + 0.1)
  barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
  barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=rainbow(rel_num),
          xlab = "", ylab = "Number of fish arrivals per day",
          ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
          las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n", border=NA)#,
  #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
  #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= rainbow(rel_num), horiz = T, title = "Release Week")
  ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
  xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  barpmeans <- colMeans(barp)
  axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
  axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))

  par(new=T)

  plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
  axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
  mtext("Flow (cfs) at Wilkins Slough", side=4, line=3, cex=1.5, col="blue")
}
2.2 Detections at Tower Bridge (downtown Sacramento) versus Sacramento River flows at Wilkins Slough

2.2 Detections at Tower Bridge (downtown Sacramento) versus Sacramento River flows at Wilkins Slough


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))


detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]

if (nrow(detects_benicia)>0) {
  detects_benicia <- merge(detects_benicia,aggregate(list(first_detect = detects_benicia$DateTime_PST), by = list(TagCode= detects_benicia$TagCode), FUN = min))
  
  detects_benicia$Day <- as.Date(detects_benicia$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_benicia$RelDT), "Etc/GMT+8")

  #endtime <- as.Date(c(Sys.time()))#, max(detects_benicia$first_detect)+60*60*24)))
  #wlk_flow <- cdec_query("COL", "20", "H", starttime, endtime+1)
  #wlk_flow$datetime <- as.Date(wlk_flow$datetime)
  #wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$parameter_value), by = list(Day = wlk_flow$datetime), FUN = mean, na.rm = T)
  
  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
  
  rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_benicia$StudyID), "Release"])
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_benicia$Release))])
  
  tagcount <- aggregate(list(unique_tags = detects_benicia$TagCode), by = list(Day = detects_benicia$Day, Release = detects_benicia$Release ), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
                    
  daterange1 <- merge(daterange, tagcount1, all.x=T)
  
  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }
  
  #daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
  daterange2 <- daterange1
  
  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL
  
  par(mar=c(6, 5, 2, 5) + 0.1)
  barp <- barplot(t(daterange2[,1:ncol(daterange2)]), plot = FALSE, beside = T)
  barplot(t(daterange2[,1:ncol(daterange2)]), beside = T, col=rainbow(rel_num), 
          xlab = "", ylab = "Number of fish arrivals per day", 
          ylim = c(0,max(daterange2[,1:ncol(daterange2)], na.rm = T)*1.2), 
          las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n")#, 
          #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
          #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)], fill= rainbow(rel_num), horiz = T, title = "Release Group")
  ybreaks <- if(max(daterange2[,1:ncol(daterange2)], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)], na.rm = T)} else {5}
  xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  barpmeans <- colMeans(barp)
  axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2)[xbreaks], las = 2)
  axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)], na.rm = T), ybreaks))
  box()

#par(new=T)

#plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
#axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
#mtext("Flow (cfs) at Colusa Bridge", side=4, line=3, cex=1.5, col="blue")

}else{
  print("No detections at Benicia yet")
}
2.3 Detections at Benicia Bridge

2.3 Detections at Benicia Bridge




3. Survival and Routing Probability


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

library(data.table)
library(RMark)

gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)

study_count <- nrow(study_tagcodes)

if (nrow(detects_tower) == 0){
  "No detections yet"
} else {
  
  ## Only do survival to Sac for now
  test <- detects_study[detects_study$rkm > 168 & detects_study$rkm < 175,]
  
  ## Create inp for survival estimation
  
  inp <- as.data.frame(reshape2::dcast(test, TagCode ~ rkm, fun.aggregate = length))
  
  ## Sort columns by river km in descending order
  # Count number of genlocs
  gen_loc_sites <- ncol(inp)-1
  
  if(gen_loc_sites <2){
    "Detections at only one location so far, survival cannot yet be estimated"
  }else{
  
    inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
  
    inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
    
    inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
    inp2[is.na(inp2)] <- 0
    inp2[inp2 > 0] <- 1
    
    inp <- cbind(inp, inp2)
    groups <- as.character(sort(unique(inp$Release)))
  
    inp[,groups] <- 0
    for (i in groups) {
      inp[as.character(inp$Release) == i, i] <- 1
    }
    
    if(length(unique(inp[,groups])) > 1){
      inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
      write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
      WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
      WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel") 
      
      WR.ddl <- make.design.data(WR.process)
    
      WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
    
      WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
      WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
      WR.surv$Detection_efficiency <- NA
      WR.surv[1,"Detection_efficiency"] <-   round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
    
      WR.surv <- cbind(c("ALL", groups), WR.surv)
    }
    if(length(unique(inp[,groups])) < 2){
      inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ", 1,sep = "")
      write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
      WRinp <- convert.inp("WRinp.inp")
      WR.process <- process.data(WRinp, model="CJS", begin.time=1) 
      
      WR.ddl <- make.design.data(WR.process)
    
      WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
      WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
      WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
      WR.surv$Detection_efficiency <- NA
      WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
    
      WR.surv <- cbind(c("ALL", groups), WR.surv)
    }

    
    colnames(WR.surv) <- c("Release Week", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
    
    print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival") %>%
            kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
  }
}
3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival
Release Week Survival (%) SE 95% lower C.I. 95% upper C.I. Detection efficiency (%)
ALL 23.3 2.4 19 28.2 43.6
Week 1 23.3 2.4 19 28.2 NA


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))


if (nrow(detects_study) == 0){
  "No detections yet"
} else {
  
  ## Only do survival to Georg split for now
  test2 <- detects_study[detects_study$general_location %in% c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
  
  ## We can only do multistate model if there is at least one detection in each route
  
  if(nrow(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2"),]) == 0 |
     nrow(test2[test2$general_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"),]) == 0){
    "Too few detections: routing probability cannot be estimated"
  }else{
    
    ## Make tagcode character
    study_tagcodes$TagID_Hex <- as.character(study_tagcodes$TagID_Hex)
    ## Make a crosstab query with frequencies for all tag/location combination
    test2$general_location <- factor(test2$general_location, levels = c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"))
    test2$TagCode <- factor(test2$TagCode, levels = study_tagcodes$TagID_Hex)
    mytable <- table(test2$TagCode, test2$general_location) # A will be rows, B will be columns
    
    ## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
    mytable[mytable>0] <- "A"
    
    ## Order in order of rkm
    mytable2 <- mytable[, c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2")]
    
    ## Now sort the crosstab rows alphabetically
    mytable2 <- mytable2[order(row.names(mytable2)),]
    
    mytable2[which(mytable2[, "Sac_BlwGeorgiana"]=="A"), "Sac_BlwGeorgiana"] <- "A"
    mytable2[which(mytable2[, "Sac_BlwGeorgiana2"]=="A"), "Sac_BlwGeorgiana2"] <- "A"
    mytable2[which(mytable2[, "Georgiana_Slough1"]=="A"), "Georgiana_Slough1"] <- "B"
    mytable2[which(mytable2[, "Georgiana_Slough2"]=="A"), "Georgiana_Slough2"] <- "B"
    
    ## Make a crosstab query with frequencies for all weekly Release groups
    #test2$Release <- factor(test2$Release)
    #mytable3 <- table(test2$TagCode, test2$Release) # A will be rows, B will be columns
    
    ## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
    #mytable3[mytable3>0] <- 1
    
    ## Order in order of rkm
    #mytable4 <- mytable3[, order(colnames(mytable3))]
    
    ## Now sort the crosstab rows alphabetically
    #mytable4 <- mytable4[order(row.names(mytable4)),]
    
    ## Now order the study_tagcodes table the same way
    study_tagcodes <- study_tagcodes[order(study_tagcodes$TagID_Hex),]
    
    ## Paste together (concatenate) the data from each column of the crosstab into one string per row, add to tagging_meta.
    ## For this step, make sure both are sorted by FishID
    study_tagcodes$inp_part1 <- apply(mytable2[,1:3],1,paste,collapse="")
    study_tagcodes$inp_partA <- apply(mytable2[,4:5],1,paste,collapse="")
    study_tagcodes$inp_partB <- apply(mytable2[,6:7],1,paste,collapse="")
    #study_tagcodes$inp_group <- apply(mytable4,1,paste,collapse=" ")
    
    ## We need to have a way of picking which route to assign to a fish if it was detected by both georg and blw-georg recvs
    ## We will say that the last detection at that junction is what determines the route it took
    
    ## find last detection at each genloc
    departure <- aggregate(list(depart = test2$DateTime_PST), by = list(TagID_Hex = test2$TagCode, last_location = test2$general_location), FUN = max)
    ## subset for just juncture locations
    departure <- departure[departure$last_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
    ## Find genloc of last known detection per tag
    last_depart <- aggregate(list(depart = departure$depart), by = list(TagID_Hex = departure$TagID_Hex), FUN = max)
    
    last_depart1 <- merge(last_depart, departure)
    study_tagcodes <- merge(study_tagcodes, last_depart1[,c("TagID_Hex", "last_location")], by = "TagID_Hex", all.x = T)
    
    ## Assume that the Sac is default pathway, and for fish that were detected in neither route, it would get a "00" in inp so doesn't matter anyway
    study_tagcodes$inp_final <- paste("A",study_tagcodes$inp_part1, study_tagcodes$inp_partA," 1 ;", sep = "")
    
    ## now put in exceptions...fish that were seen in georgiana last
    study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_final"] <- paste("A",study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_part1"], study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_partB"]," 1 ;", sep = "")
    
    write.table(study_tagcodes$inp_final,"WRinp_multistate.inp",row.names = F, col.names = F, quote = F)
    
    WRinp <- convert.inp("WRinp_multistate.inp")
    
    dp <- process.data(WRinp, model="Multistrata") 
    
    ddl <- make.design.data(dp)
    
    #### p ####
    # Can't be seen at 2B or 3B or 4B (butte, tower or I80)
    ddl$p$fix=NA
    ddl$p$fix[ddl$p$stratum == "B" & ddl$p$time %in% c(2,3,4)]=0
    
    #### Psi ####
    # Only 1 transition allowed:
    # from A to B at time interval 4 to 5
    
    ddl$Psi$fix=0
    # A to B can only happen for interval 3-4
    ddl$Psi$fix[ddl$Psi$stratum=="A"&
                  ddl$Psi$tostratum=="B" & ddl$Psi$time==4]=NA
    
    #### Phi a.k.a. S ####
    ddl$S$fix=NA
    # None in B for reaches 1,2,3,4 and fixing it to 1 for 5 (between two georg lines). All getting fixed to 1
    ddl$S$fix[ddl$S$stratum=="B" & ddl$S$time %in% c(1,2,3,4,5)]=1
    
    # For route A, fixing it to 1 for 5 (between two blw_georg lines)
    ddl$S$fix[ddl$S$stratum=="A" & ddl$S$time==5]=1
    ## We use -1 at beginning of formula to remove intercept. This is because different routes probably shouldn't share the same intercept
    
    p.timexstratum=list(formula=~-1+stratum:time)
    Psi.stratumxtime=list(formula=~-1+stratum:time)
    S.stratumxtime=list(formula=~-1+stratum:time)
    
    ## Run model a first time
    S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, silent = T, output = F)
    
    ## Identify any parameter estimates at 1, which would likely have bad SE estimates.
    profile.intervals <- which(S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$estimate %in% c(0,1) & !S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$fixed == "Fixed")
    
    ## Rerun model using profile interval estimation for the tricky parameters
    S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, profile.int = profile.intervals, silent = T, output = F)
    
    results <- S.timexstratum.p.timexstratum.Psi.timexstratum$results$real
    
    results_short <- results[rownames(results) %in% c("S sA g1 c1 a0 o1 t1",
                                                      "S sA g1 c1 a1 o2 t2",
                                                      "S sA g1 c1 a2 o3 t3",
                                                      "S sA g1 c1 a3 o4 t4",
                                                      "p sA g1 c1 a1 o1 t2",
                                                      "p sA g1 c1 a2 o2 t3",
                                                      "p sA g1 c1 a3 o3 t4",
                                                      "p sA g1 c1 a4 o4 t5",
                                                      "p sB g1 c1 a4 o4 t5",
                                                      "Psi sA toB g1 c1 a3 o4 t4"
                                                      ),]
    
    
    results_short <- round(results_short[,c("estimate", "se", "lcl", "ucl")] * 100,1)
    
    results_short$Measure <- c("Survival from release to Butte City","Survival from Butte City to TowerBridge (minimum estimate since fish may have taken Yolo Bypass)", "Survival from TowerBridge to I80-50_Br", "% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam)","Detection probability at Butte City",
                               "Detection probability at TowerBridge", "Detection probability at I80-50_Br", "Detection probability at Blw_Georgiana", "Detection probability at Georgiana Slough",
                               "Routing probability into Georgiana Slough (Conditional on fish arriving to junction)")
    
    results_short <- results_short[,c("Measure", "estimate", "se", "lcl", "ucl")]
    colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")

    print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
            kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
  }
}
3.2 Reach-specific survival and probability of entering Georgiana Slough
Measure Estimate SE 95% lower C.I. 95% upper C.I.
Survival from release to Butte City 64.5 6.4 51.2 75.9
Survival from Butte City to TowerBridge (minimum estimate since fish may have taken Yolo Bypass) 40.1 4.9 31.0 50.0
Survival from TowerBridge to I80-50_Br 97.6 7.3 8.2 100.0
% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam) 66.1 6.3 52.9 77.2
Detection probability at Butte City 32.4 3.8 25.4 40.4
Detection probability at TowerBridge 39.3 4.2 31.4 47.7
Detection probability at I80-50_Br 57.3 5.0 47.2 66.8
Detection probability at Blw_Georgiana 57.6 6.4 44.8 69.5
Detection probability at Georgiana Slough 100.0 0.0 90.8 100.0
Routing probability into Georgiana Slough (Conditional on fish arriving to junction) 18.4 3.8 12.0 27.2


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

if (nrow(detects_benicia) == 0){
  "No detections yet"
} else {
  
  benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F)
  benicia$RelDT <- as.POSIXct(benicia$RelDT)

  ## Only do survival to Benicia here
  test3 <- detects_study[detects_study$rkm < 53,]
  
  ## Create inp for survival estimation
  
  inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ rkm, fun.aggregate = length))
  
  ## Sort columns by river km in descending order
  # Count number of genlocs
  gen_loc_sites <- ncol(inp)-1
  
  inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]

  inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
  
  inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
  inp2[is.na(inp2)] <- 0
  inp2[inp2 > 0] <- 1
  
  inp <- cbind(inp, inp2)
  groups <- as.character(sort(unique(inp$Release)))

  inp[,groups] <- 0
  for (i in groups) {
    inp[as.character(inp$Release) == i, i] <- 1
  }
  
  if(length(groups) > 1){
    inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
  }else{
    inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",inp[,groups]," ;",sep = "")
  }
  
  
  write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
  
  if(length(groups) > 1){
  
    WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
    WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel") 
    
    WR.ddl <- make.design.data(WR.process)
    
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
    WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
    
    WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
    WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
    
  }else{
    
    WRinp <- convert.inp("WRinp.inp")
    WR.process <- process.data(WRinp, model="CJS", begin.time=1) 
    
      
    WR.ddl <- make.design.data(WR.process)
    
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)

    WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
    
  }
  
  WR.surv$Detection_efficiency <- NA
  WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
    
  WR.surv <- cbind(Release = c("ALL", groups), WR.surv)

  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")

  print(kable(WR.surv1, row.names = F, "html", caption = "3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))    
  
  ## Find mean release time per release group, and ALL
  reltimes <- aggregate(list(RelDT = study_tagcodes$RelDT), by = list(Release = study_tagcodes$Release), FUN = mean)
  reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$RelDT)))

  ## Assign whether the results are tentative or final
  quality <- "tentative"
  if(endtime < as.Date(c(Sys.time()))) { quality <- "final"}
  WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
  
  WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
  
  ## remove old benicia record for this studyID
  benicia <- benicia[!benicia$StudyID == unique(study_tagcodes$StudyID),]
  
  benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(study_tagcodes$StudyID), data_quality = quality))
  
  write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F) 
  
}
3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)
Release Group Survival (%) SE 95% lower C.I. 95% upper C.I. Detection efficiency (%)
ALL 25.6 1.7 22.4 29.1 90.2
Week 1 25.6 1.7 22.4 29.1 90.2



4. Detection Statistics


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

if (nrow(detects_study) == 0){
  "No detections yet"
} else {
  
  arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
  
  tag_stats <- aggregate(list(First_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = min)
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Mean_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = mean), 
                     by = c("general_location"))
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Last_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = max), 
                     by = c("general_location"))
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Fish_count = arrivals$TagCode), 
                         by= list(general_location = arrivals$general_location), 
                         FUN = function(x) {length(unique(x))}), 
                     by = c("general_location"))
  tag_stats$Percent_arrived <- round(tag_stats$Fish_count/study_count * 100,2)
      
  tag_stats <- merge(tag_stats, unique(gen_locs[,c("general_location", "rkm")]))
  
  tag_stats <- tag_stats[order(tag_stats$rkm, decreasing = T),]
  
  tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")

  print(kable(tag_stats, row.names = F, 
              caption = "4.1 Detections for all releases combined",
              "html") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
  count=1
  for (j in sort(unique(study_tagcodes$Release))) {
    
    if(nrow(detects_study[detects_study$Release == j,]) > 0 ) {
      count=count+1
      temp <- detects_study[detects_study$Release == j,]
      
        arrivals1 <- aggregate(list(DateTime_PST = temp$DateTime_PST), by = list(general_location = temp$general_location, TagCode = temp$TagCode), FUN = min)
  
      rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])
  
      tag_stats1 <- aggregate(list(First_arrival = arrivals1$DateTime_PST), 
                             by= list(general_location = arrivals1$general_location), 
                             FUN = min)
      tag_stats1 <- merge(tag_stats1, 
                         aggregate(list(Mean_arrival = arrivals1$DateTime_PST), 
                             by= list(general_location = arrivals1$general_location), 
                             FUN = mean), 
                         by = c("general_location"))
      tag_stats1 <- merge(tag_stats1, 
                   aggregate(list(Last_arrival = arrivals1$DateTime_PST), 
                       by= list(general_location = arrivals1$general_location), 
                       FUN = max), 
                   by = c("general_location"))
      tag_stats1 <- merge(tag_stats1, 
                         aggregate(list(Fish_count = arrivals1$TagCode), 
                                   by= list(general_location = arrivals1$general_location), 
                                   FUN = function(x) {length(unique(x))}), 
                         by = c("general_location"))
      
      tag_stats1$Percent_arrived <- round(tag_stats1$Fish_count/rel_count * 100,2)
    
      tag_stats1 <- merge(tag_stats1, unique(gen_locs[,c("general_location", "rkm")]))
    
      tag_stats1 <- tag_stats1[order(tag_stats1$rkm, decreasing = T),]
      
      tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
      
      final_stats <- kable(tag_stats1, row.names = F, 
            caption = paste("4.",count," Detections for ",j," release groups", sep = ""),
            "html")
      
      print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
      
    } else {
      cat("\n\n\\pagebreak\n")
      print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
      cat("\n\n\\pagebreak\n")
    }
  }
}
4.1 Detections for all releases combined
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived rkm
ButteBrRT 2019-02-17 01:32:13 2019-02-22 20:53:57 2019-04-09 20:18:05 136 20.92 344.108
TowerBridge 2019-02-21 02:43:04 2019-03-12 01:40:37 2019-04-17 07:26:51 66 10.15 172.000
I80-50_Br 2019-02-20 03:41:36 2019-03-16 19:53:32 2019-04-21 05:19:25 94 14.46 170.748
Georgiana_Slough1 2019-02-20 22:25:02 2019-03-14 21:36:01 2019-04-03 23:48:25 20 3.08 119.208
Sac_BlwGeorgiana 2019-02-21 11:47:02 2019-03-18 14:45:41 2019-04-17 19:07:40 51 7.85 119.058
Georgiana_Slough2 2019-02-20 22:33:47 2019-03-14 21:45:54 2019-04-03 23:57:27 20 3.08 118.758
Sac_BlwGeorgiana2 2019-02-20 15:48:15 2019-03-17 05:38:40 2019-04-17 19:18:54 59 9.08 118.398
Benicia_east 2019-02-24 11:27:51 2019-03-24 17:03:44 2019-04-22 21:43:15 150 23.08 52.240
Benicia_west 2019-02-24 12:38:28 2019-03-24 13:00:10 2019-04-23 08:52:46 153 23.54 52.040
4.2 Detections for Week 1 release groups
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived rkm
ButteBrRT 2019-02-17 01:32:13 2019-02-22 20:53:57 2019-04-09 20:18:05 136 20.92 344.108
TowerBridge 2019-02-21 02:43:04 2019-03-12 01:40:37 2019-04-17 07:26:51 66 10.15 172.000
I80-50_Br 2019-02-20 03:41:36 2019-03-16 19:53:32 2019-04-21 05:19:25 94 14.46 170.748
Georgiana_Slough1 2019-02-20 22:25:02 2019-03-14 21:36:01 2019-04-03 23:48:25 20 3.08 119.208
Sac_BlwGeorgiana 2019-02-21 11:47:02 2019-03-18 14:45:41 2019-04-17 19:07:40 51 7.85 119.058
Georgiana_Slough2 2019-02-20 22:33:47 2019-03-14 21:45:54 2019-04-03 23:57:27 20 3.08 118.758
Sac_BlwGeorgiana2 2019-02-20 15:48:15 2019-03-17 05:38:40 2019-04-17 19:18:54 59 9.08 118.398
Benicia_east 2019-02-24 11:27:51 2019-03-24 17:03:44 2019-04-22 21:43:15 150 23.08 52.240
Benicia_west 2019-02-24 12:38:28 2019-03-24 13:00:10 2019-04-23 08:52:46 153 23.54 52.040