Central Valley Enhanced

Acoustic Tagging Project

logo





Deer Creek wild spring-run Chinook salmon

2018-2019 Season (PROVISIONAL DATA)



1. Project Status


Study is complete, all tags are no longer active. All times in Pacific Standard Time.


Telemetry Study Template for this study can be found here

setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

tagcodes <- read.csv("qry_HexCodes.txt", stringsAsFactors = F, colClasses=c("TagID_Hex"="character"))

tagcodes$RelDT <- as.POSIXct(tagcodes$RelDT, format = "%m/%d/%Y %I:%M:%S %p", tz = "Etc/GMT+8")
latest <- read.csv("latest_download.csv", stringsAsFactors = F)

study_tagcodes <- tagcodes[tagcodes$StudyID == "DeerCk-Wild-2019",]
 

if (nrow(study_tagcodes) == 0){
  cat("Project has not yet begun")
}else{
  cat("Project has begun, see tagging details below:")
  
  study_tagcodes$Release <- "Week 1"
  study_tagcodes[study_tagcodes$RelDT < as.POSIXct("2018-05-12"), "Release"] <- "Week 1"
  study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2018-05-12") & study_tagcodes$RelDT < as.POSIXct("2018-05-20"), "Release"] <- "Week 2"
  study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2018-05-20") & study_tagcodes$RelDT < as.POSIXct("2018-05-24"), "Release"] <- "Week 3"
  study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2018-05-24") & study_tagcodes$RelDT < as.POSIXct("2018-06-01"), "Release"] <- "Week 4"
  study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2018-06-01") & study_tagcodes$RelDT < as.POSIXct("2018-06-07"), "Release"] <- "Week 5"
  
  
  release_stats <- aggregate(list(First_release_time = study_tagcodes$RelDT),
                             by= list(Release = study_tagcodes$Release),
                             FUN = min)
  release_stats <- merge(release_stats,
                         aggregate(list(Last_release_time = study_tagcodes$RelDT),
                             by= list(Release = study_tagcodes$Release),
                             FUN = max),
                         by = c("Release"))
  
                             
  release_stats <- merge(release_stats, aggregate(list(Number_fish_released =
                                                         study_tagcodes$TagID_Hex),
                             by= list(Release = study_tagcodes$Release),
                             FUN = function(x) {length(unique(x))}),
                         by = c("Release"))
  
  release_stats <- merge(release_stats,
                         aggregate(list(Release_location = study_tagcodes$Rel_loc),
                             by= list(Release = study_tagcodes$Release),
                             FUN = function(x) {head(x,1)}),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Release_rkm = study_tagcodes$Rel_rkm),
                             by= list(Release = study_tagcodes$Release),
                             FUN = function(x) {head(x,1)}),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Mean_length = study_tagcodes$Length),
                             by= list(Release = study_tagcodes$Release),
                             FUN = mean),
                         by = c("Release"))
  release_stats <- merge(release_stats,
                         aggregate(list(Mean_weight = study_tagcodes$Weight),
                             by= list(Release = study_tagcodes$Release),
                             FUN = mean),
                         by = c("Release"))
  
  release_stats[,c("Mean_length", "Mean_weight")] <- round(release_stats[,c("Mean_length", "Mean_weight")],1)
  
  kable(release_stats, format = "html") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left")
}                       
Project has begun, see tagging details below:
Release First_release_time Last_release_time Number_fish_released Release_location Release_rkm Mean_length Mean_weight
Week 1 2018-11-26 12:32:31 2018-12-12 11:32:00 8 DeerCkRST 441.728 108.9 14.1



2. Real-time Fish Detections


Data current as of 2025-04-22 09:00:00. All times in Pacific Standard Time.

setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

library(cder)
library(reshape2)

detects_study <- read.csv("C:/Users/field/Desktop/Real-time data massaging/products/Study_detection_files/detects_DeerCk-Wild-2019.csv", stringsAsFactors = F)
detects_study$DateTime_PST <- as.POSIXct(detects_study$DateTime_PST, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")

if(nrow(detects_study)>0){
  detects_study <- merge(detects_study, study_tagcodes[,c("TagID_Hex", "RelDT", "StudyID", "Release", "tag_life")], by.x = "TagCode", by.y = "TagID_Hex")
}
  
detects_tower <- detects_study[detects_study$general_location == "TowerBridge",]

#wlk_flow <- read.csv("wlk.csv")

if (nrow(detects_tower) == 0){
  "No detections yet"
} else {
  
  detects_tower <- merge(detects_tower,aggregate(list(first_detect = detects_tower$DateTime_PST), by = list(TagCode= detects_tower$TagCode), FUN = min))
  
  detects_tower$Day <- as.Date(detects_tower$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_tower$RelDT), "Etc/GMT+8")
  ## Endtime should be either now, or end of predicted tag life, whichever comes first
  endtime <- min(as.Date(c(Sys.time())), max(as.Date(detects_tower$RelDT)+detects_tower$tag_life))
  

  wlk_flow <- cdec_query("WLK", "20", "H", starttime, endtime+1)

  wlk_flow$datetime <- as.Date(wlk_flow$DateTime)
  wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$Value),
                            by = list(Day = wlk_flow$datetime),
                            FUN = mean, na.rm = T)


  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))

  rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_tower$StudyID), "Release"])
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_tower$Release))])

  tagcount <- aggregate(list(unique_tags = detects_tower$TagCode), by = list(Day = detects_tower$Day, Release = detects_tower$Release ), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)

  daterange1 <- merge(daterange, tagcount1, all.x=T)

  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }

  daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)

  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL

  par(mar=c(6, 5, 2, 5) + 0.1)
  barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
  barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=rainbow(rel_num),
          xlab = "", ylab = "Number of fish arrivals per day",
          ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
          las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n", border=NA)#,
  #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
  #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= rainbow(rel_num), horiz = T, title = "Release Week")
  ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
  xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  barpmeans <- colMeans(barp)
  axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
  axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))

  par(new=T)

  plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
  axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
  mtext("Flow (cfs) at Wilkins Slough", side=4, line=3, cex=1.5, col="blue")
}
2.1 Detections at Tower Bridge (downtown Sacramento) versus Sacramento River flows at Wilkins Slough

2.1 Detections at Tower Bridge (downtown Sacramento) versus Sacramento River flows at Wilkins Slough


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))


detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]

if (nrow(detects_benicia)>0) {
  detects_benicia <- merge(detects_benicia,aggregate(list(first_detect = detects_benicia$DateTime_PST), by = list(TagCode= detects_benicia$TagCode), FUN = min))
  
  detects_benicia$Day <- as.Date(detects_benicia$first_detect, "Etc/GMT+8")
  
  starttime <- as.Date(min(detects_benicia$RelDT), "Etc/GMT+8")
  #endtime <- as.Date(c(Sys.time()))#, max(detects_benicia$first_detect)+60*60*24)))
  #wlk_flow <- cdec_query("COL", "20", "H", starttime, endtime+1)
  #wlk_flow$datetime <- as.Date(wlk_flow$datetime)
  #wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$parameter_value), by = list(Day = wlk_flow$datetime), FUN = mean, na.rm = T)
  
  daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
  
  rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_benicia$StudyID), "Release"])
  rel_num <- length(rels)
  rels_no_detects <- as.character(rels[!(rels %in% unique(detects_benicia$Release))])
  
  tagcount <- aggregate(list(unique_tags = detects_benicia$TagCode), by = list(Day = detects_benicia$Day, Release = detects_benicia$Release ), FUN = function(x){length(unique(x))})
  tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
                    
  daterange1 <- merge(daterange, tagcount1, all.x=T)
  
  if(length(rels_no_detects)>0){
    for(i in rels_no_detects){
      daterange1 <- cbind(daterange1, x=NA)
      names(daterange1)[names(daterange1) == 'x'] <- paste(i)
    }
  }
  
  #daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
  daterange2 <- daterange1
  
  rownames(daterange2) <- daterange2$Day
  daterange2$Day <- NULL
  
  par(mar=c(6, 5, 2, 5) + 0.1)
  barp <- barplot(t(daterange2[,1:ncol(daterange2)]), plot = FALSE, beside = T)
  barplot(t(daterange2[,1:ncol(daterange2)]), beside = T, col=rainbow(rel_num), 
          xlab = "", ylab = "Number of fish arrivals per day", 
          ylim = c(0,max(daterange2[,1:ncol(daterange2)], na.rm = T)*1.2), 
          las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n")#, 
          #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
          #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
  legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)], fill= rainbow(rel_num), horiz = T, title = "Release Group")
  ybreaks <- if(max(daterange2[,1:ncol(daterange2)], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)], na.rm = T)} else {5}
  xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
  barpmeans <- colMeans(barp)
  axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2)[xbreaks], las = 2)
  axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)], na.rm = T), ybreaks))
  box()

#par(new=T)

#plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
#axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
#mtext("Flow (cfs) at Colusa Bridge", side=4, line=3, cex=1.5, col="blue")

}else{
  print("No detections at Benicia yet")
}
## [1] "No detections at Benicia yet"



3. Survival and Routing Probability


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

library(data.table)
library(RMark)

gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)

study_count <- nrow(study_tagcodes)

if (nrow(detects_tower) == 0){
  "No detections yet"
} else {
  
  ## Only do survival to Sac for now
  test <- detects_study[detects_study$rkm > 168 & detects_study$rkm < 175,]
  
  ## Create inp for survival estimation
  
  inp <- as.data.frame(reshape2::dcast(test, TagCode ~ rkm, fun.aggregate = length))
  
  ## Sort columns by river km in descending order
  # Count number of genlocs
  gen_loc_sites <- ncol(inp)-1
  
  if(gen_loc_sites <2){
    "Detections at only one location so far, survival cannot yet be estimated"
  }else{
  
    inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
  
    inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
    
    inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
    inp2[is.na(inp2)] <- 0
    inp2[inp2 > 0] <- 1
    
    inp <- cbind(inp, inp2)
    groups <- as.character(sort(unique(inp$Release)))
  
    inp[,groups] <- 0
    for (i in groups) {
      inp[as.character(inp$Release) == i, i] <- 1
    }
    
    if(length(unique(inp[,groups])) > 1){
      inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
      write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
      WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
      WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel") 
      
      WR.ddl <- make.design.data(WR.process)
    
      WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
    
      WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
      WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
      WR.surv$Detection_efficiency <- NA
      WR.surv[1,"Detection_efficiency"] <-   round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
    
      WR.surv <- cbind(c("ALL", groups), WR.surv)
    }
    if(length(unique(inp[,groups])) < 2){
      inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ", 1,sep = "")
      write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
      WRinp <- convert.inp("WRinp.inp")
      WR.process <- process.data(WRinp, model="CJS", begin.time=1) 
      
      WR.ddl <- make.design.data(WR.process)
    
      WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
      WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
      WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
      WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
      WR.surv$Detection_efficiency <- NA
      WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
    
      WR.surv <- cbind(c("ALL", groups), WR.surv)
    }

    
    colnames(WR.surv) <- c("Release Week", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
    
    print(kable(WR.surv, row.names = F, "html", caption = '3.1 Minimum survival to Tower Bridge (using CJS survival model)') %>%
            kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
  }
}
3.1 Minimum survival to Tower Bridge (using CJS survival model)
Release Week Survival (%) SE 95% lower C.I. 95% upper C.I. Detection efficiency (%)
ALL 12.5 0 12.5 12.5 100
Week 1 12.5 0 12.5 12.5 NA


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))


if (nrow(detects_study) == 0){
  "No detections yet"
} else {
  
  ## Only do survival to Georg split for now
  test2 <- detects_study[detects_study$general_location %in% c("TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
  
  ## We can only do multistate model if there is at least one detection in each route
  
  if(nrow(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2"),]) == 0 |
     nrow(test2[test2$general_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"),]) == 0){
    "Too few detections: routing probability cannot be estimated"
  }else{
    
    ## Make tagcode character
    study_tagcodes$TagID_Hex <- as.character(study_tagcodes$TagID_Hex)
    ## Make a crosstab query with frequencies for all tag/location combination
    test2$general_location <- factor(test2$general_location, levels = c("TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"))
    test2$TagCode <- factor(test2$TagCode, levels = study_tagcodes$TagID_Hex)
    mytable <- table(test2$TagCode, test2$general_location) # A will be rows, B will be columns
    
    ## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
    mytable[mytable>0] <- "A"
    
    ## Order in order of rkm
    mytable2 <- mytable[, c("TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2")]
    
    ## Now sort the crosstab rows alphabetically
    mytable2 <- mytable2[order(row.names(mytable2)),]
    
    mytable2[which(mytable2[, "Sac_BlwGeorgiana"]=="A"), "Sac_BlwGeorgiana"] <- "A"
    mytable2[which(mytable2[, "Sac_BlwGeorgiana2"]=="A"), "Sac_BlwGeorgiana2"] <- "A"
    mytable2[which(mytable2[, "Georgiana_Slough1"]=="A"), "Georgiana_Slough1"] <- "B"
    mytable2[which(mytable2[, "Georgiana_Slough2"]=="A"), "Georgiana_Slough2"] <- "B"
    
    ## Make a crosstab query with frequencies for all weekly release groups
    #test2$Release <- factor(test2$Release)
    #mytable3 <- table(test2$TagCode, test2$Release) # A will be rows, B will be columns
    
    ## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
    #mytable3[mytable3>0] <- 1
    
    ## Order in order of rkm
    #mytable4 <- mytable3[, order(colnames(mytable3))]
    
    ## Now sort the crosstab rows alphabetically
    #mytable4 <- mytable4[order(row.names(mytable4)),]
    
    ## Now order the study_tagcodes table the same way
    study_tagcodes <- study_tagcodes[order(study_tagcodes$TagID_Hex),]
    
    ## Paste together (concatenate) the data from each column of the crosstab into one string per row, add to tagging_meta.
    ## For this step, make sure both are sorted by FishID
    study_tagcodes$inp_part1 <- apply(mytable2[,1:2],1,paste,collapse="")
    study_tagcodes$inp_partA <- apply(mytable2[,3:4],1,paste,collapse="")
    study_tagcodes$inp_partB <- apply(mytable2[,5:6],1,paste,collapse="")
    #study_tagcodes$inp_group <- apply(mytable4,1,paste,collapse=" ")
    
    ## We need to have a way of picking which route to assign to a fish if it was detected by both georg and blw-georg recvs
    ## We will say that the last detection at that junction is what determines the route it took
    
    ## find last detection at each genloc
    departure <- aggregate(list(depart = test2$DateTime_PST), by = list(TagID_Hex = test2$TagCode, last_location = test2$general_location), FUN = max)
    ## subset for just juncture locations
    departure <- departure[departure$last_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
    ## Find genloc of last known detection per tag
    last_depart <- aggregate(list(depart = departure$depart), by = list(TagID_Hex = departure$TagID_Hex), FUN = max)
    
    last_depart1 <- merge(last_depart, departure)
    study_tagcodes <- merge(study_tagcodes, last_depart1[,c("TagID_Hex", "last_location")], by = "TagID_Hex", all.x = T)
    
    ## Assume that the Sac is default pathway, and for fish that were detected in neither route, it would get a "00" in inp so doesn't matter anyway
    study_tagcodes$inp_final <- paste("A",study_tagcodes$inp_part1, study_tagcodes$inp_partA," 1 ;", sep = "")
    
    ## now put in exceptions...fish that were seen in georgiana last
    study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_final"] <- paste("A",study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_part1"], study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_partB"]," 1 ;", sep = "")
    
    write.table(study_tagcodes$inp_final,"WRinp_multistate.inp",row.names = F, col.names = F, quote = F)
    
    WRinp <- convert.inp("WRinp_multistate.inp")
    
    dp <- process.data(WRinp, model="Multistrata") 
    
    ddl <- make.design.data(dp)
    
    #### p ####
    # Can't be seen at 2B or 3B (tower or I80)
    ddl$p$fix=NA
    ddl$p$fix[ddl$p$stratum == "B" & ddl$p$time %in% c(2,3)]=0
    
    #### Psi ####
    # Only 1 transition allowed:
    # from A to B at time interval 3 to 4
    
    ddl$Psi$fix=0
    # A to B can only happen for interval 2-3
    ddl$Psi$fix[ddl$Psi$stratum=="A"&
                  ddl$Psi$tostratum=="B" & ddl$Psi$time==3]=NA
    
    #### Phi a.k.a. S ####
    ddl$S$fix=NA
    # None in B for reaches 1,2,3 and fixing it to 1 for 4 (between two georg lines). All getting fixed to 1
    ddl$S$fix[ddl$S$stratum=="B" & ddl$S$time %in% c(1,2,3,4)]=1
    
    # For route A, fixing it to 1 for 4 (between two blw_georg lines)
    ddl$S$fix[ddl$S$stratum=="A" & ddl$S$time==4]=1
    ## We use -1 at beginning of formula to remove intercept. This is because different routes probably shouldn't share the same intercept
    
    p.timexstratum=list(formula=~-1+stratum:time)
    Psi.stratumxtime=list(formula=~-1+stratum:time)
    S.stratumxtime=list(formula=~-1+stratum:time)
    
    ## Run model a first time
    S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, silent = T, output = F)
    
    ## Identify any parameter estimates at 1, which would likely have bad SE estimates.
    profile.intervals <- which(S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$estimate %in% c(0,1) & !S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$fixed == "Fixed")
    
    ## Rerun model using profile interval estimation for the tricky parameters
    S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, profile.int = profile.intervals, silent = T, output = F)
    
    results <- S.timexstratum.p.timexstratum.Psi.timexstratum$results$real
    
    results_short <- results[rownames(results) %in% c("S sA g1 c1 a0 o1 t1",
                                                      "S sA g1 c1 a1 o2 t2",
                                                      "S sA g1 c1 a2 o3 t3",
                                                      "p sA g1 c1 a1 o1 t2",
                                                      "p sA g1 c1 a2 o2 t3",
                                                      "p sA g1 c1 a3 o3 t4",
                                                      "p sB g1 c1 a3 o3 t4",
                                                      "Psi sA toB g1 c1 a2 o3 t3"
                                                      ),]
    
    
    results_short <- round(results_short[,c("estimate", "se", "lcl", "ucl")] * 100,1)
    
    results_short$Measure <- c("Survival from release to TowerBridge", "Survival from TowerBridge to I80-50_Br", "% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam)",
                               "Detection probability at TowerBridge", "Detection probability at I80-50_Br", "Detection probability at Blw_Georgiana", "Detection probability at Georgiana Slough",
                               "Routing probability into Georgiana Slough (Conditional on fish arriving to junction)")
    
    results_short <- results_short[,c("Measure", "estimate", "se", "lcl", "ucl")]
    colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")

    print(kable(results_short, row.names = F, "html", caption = '3.2 Reach-specific survival and probability of entering Georgiana Slough') %>%
            kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
  }
}

[1] “Too few detections: routing probability cannot be estimated”


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

if (nrow(detects_benicia) == 0){
  "No detections yet"
} else {
  
  benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F)
  benicia$RelDT <- as.POSIXct(benicia$RelDT)

  ## Only do survival to Benicia here
  test3 <- detects_study[detects_study$rkm < 53,]
  
  ## Create inp for survival estimation
  
  inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ rkm, fun.aggregate = length))
  
  ## Sort columns by river km in descending order
  # Count number of genlocs
  gen_loc_sites <- ncol(inp)-1
  
  inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]

  inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
  
  inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
  inp2[is.na(inp2)] <- 0
  inp2[inp2 > 0] <- 1
  
  inp <- cbind(inp, inp2)
  groups <- as.character(sort(unique(inp$Release)))

  inp[,groups] <- 0
  for (i in groups) {
    inp[as.character(inp$Release) == i, i] <- 1
  }
  
  if(length(groups) > 1){
    inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
  }else{
    inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",inp[,groups]," ;",sep = "")
  }
  
  
  write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
  
  if(length(groups) > 1){
  
    WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
    WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel") 
    
    WR.ddl <- make.design.data(WR.process)
    
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
    
    WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
    
    WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
    WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
    
  }else{
    
    WRinp <- convert.inp("WRinp.inp")
    WR.process <- process.data(WRinp, model="CJS", begin.time=1) 
    
      
    WR.ddl <- make.design.data(WR.process)
    
    WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)

    WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
    
  }
  
  WR.surv$Detection_efficiency <- NA
  WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
    
  WR.surv <- cbind(Release = c("ALL", groups), WR.surv)

  WR.surv1 <- WR.surv
  colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")

  print(kable(WR.surv1, row.names = F, "html", caption = '3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)') %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))    
  
  ## Find mean release time per release group, and ALL
  reltimes <- aggregate(list(RelDT = study_tagcodes$RelDT), by = list(Release = study_tagcodes$Release), FUN = mean)
  reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$RelDT)))

  ## Assign whether the results are tentative or final
  quality <- "tentative"
  if(endtime < as.Date(c(Sys.time()))) { quality <- "final"}
  WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
  
  WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
  
  ## remove old benicia record for this studyID
  benicia <- benicia[!benicia$StudyID == unique(study_tagcodes$StudyID),]
  
  benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(study_tagcodes$StudyID), data_quality = quality))
  
  write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F) 
  
}

[1] “No detections yet”



4. Detection Statistics

setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

if (nrow(detects_study) == 0){
  "No detections yet"
} else {
  
  arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
  
  tag_stats <- aggregate(list(First_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = min)
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Mean_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = mean), 
                     by = c("general_location"))
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Last_arrival = arrivals$DateTime_PST), 
                         by= list(general_location = arrivals$general_location),
                         FUN = max), 
                     by = c("general_location"))
  tag_stats <- merge(tag_stats, 
                     aggregate(list(Fish_count = arrivals$TagCode), 
                         by= list(general_location = arrivals$general_location), 
                         FUN = function(x) {length(unique(x))}), 
                     by = c("general_location"))
  tag_stats$Percent_arrived <- round(tag_stats$Fish_count/study_count * 100,2)
      
  tag_stats <- merge(tag_stats, unique(gen_locs[,c("general_location", "rkm")]))
  
  tag_stats <- tag_stats[order(tag_stats$rkm, decreasing = T),]
  
  tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")

  print(kable(tag_stats, row.names = F, 
              caption = "4.1 Detections for all releases combined",
              "html") %>%
          kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
  count=1
  for (j in sort(unique(study_tagcodes$release))) {
    
    if(nrow(detects_study[detects_study$release == j,]) > 0 ) {
      count=count+1  
      temp <- detects_study[detects_study$release == j,]
      
        arrivals1 <- aggregate(list(DateTime_PST = temp$DateTime_PST), by = list(general_location = temp$general_location, TagCode = temp$TagCode), FUN = min)
  
      rel_count <- nrow(study_tagcodes[study_tagcodes$release == j,])
  
      tag_stats1 <- aggregate(list(First_arrival = arrivals1$DateTime_PST), 
                             by= list(general_location = arrivals1$general_location), 
                             FUN = min)
      tag_stats1 <- merge(tag_stats1, 
                         aggregate(list(Mean_arrival = arrivals1$DateTime_PST), 
                             by= list(general_location = arrivals1$general_location), 
                             FUN = mean), 
                         by = c("general_location"))
      tag_stats1 <- merge(tag_stats1, 
                   aggregate(list(Last_arrival = arrivals1$DateTime_PST), 
                       by= list(general_location = arrivals1$general_location), 
                       FUN = max), 
                   by = c("general_location"))
      tag_stats1 <- merge(tag_stats1, 
                         aggregate(list(Fish_count = arrivals1$TagCode), 
                                   by= list(general_location = arrivals1$general_location), 
                                   FUN = function(x) {length(unique(x))}), 
                         by = c("general_location"))
      
      tag_stats1$Percent_arrived <- round(tag_stats1$Fish_count/rel_count * 100,2)
    
      tag_stats1 <- merge(tag_stats1, unique(gen_locs[,c("general_location", "rkm")]))
    
      tag_stats1 <- tag_stats1[order(tag_stats1$rkm, decreasing = T),]
      
      tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
      
      final_stats <- kable(tag_stats1, row.names = F, 
            caption = paste("4.",count," Detections for ",j," release groups", sep = ""),
            "html")
      
      print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
      
    } else {
      cat("\n\n\\pagebreak\n")
      print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
      cat("\n\n\\pagebreak\n")
    }
  }
}
4.1 Detections for all releases combined
general_location First_arrival Mean_arrival Last_arrival Fish_count Percent_arrived rkm
TowerBridge 2018-12-21 12:49:33 2018-12-21 12:49:33 2018-12-21 12:49:33 1 12.5 172.000
I80-50_Br 2018-12-21 08:58:30 2018-12-21 08:58:30 2018-12-21 08:58:30 1 12.5 170.748
rm(list = ls())
cleanup(ask = F)