Hatchery-origin acclimation paired release fall-run Chinook salmon
2018-2019 Season (PROVISIONAL DATA)
1. Project Status
Study is complete, all tags are no longer active. All times in Pacific Standard Time.
Telemetry Study Template for this study can be found here
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
tagcodes <- read.csv("qry_HexCodes.txt", stringsAsFactors = F, colClasses=c("TagID_Hex"="character"))
tagcodes$RelDT <- as.POSIXct(tagcodes$RelDT, format = "%m/%d/%Y %I:%M:%S %p", tz = "Etc/GMT+8")
latest <- read.csv("latest_download.csv", stringsAsFactors = F)
study_tagcodes <- tagcodes[tagcodes$StudyID == "ColemanAltRelStrat2019",]
if (nrow(study_tagcodes) == 0){
cat("Project has not yet begun")
}else{
cat(paste("Project began on ", min(study_tagcodes$RelDT), ", see tagging details below:", sep = ""))
study_tagcodes$Release <- "Upstream"
study_tagcodes[study_tagcodes$RelDT > as.POSIXct("2019-04-12"), "Release"] <- "Downstream"
release_stats <- aggregate(list(First_release_time = study_tagcodes$RelDT),
by= list(Release = study_tagcodes$Release),
FUN = min)
release_stats <- merge(release_stats,
aggregate(list(Last_release_time = study_tagcodes$RelDT),
by= list(Release = study_tagcodes$Release),
FUN = max),
by = c("Release"))
release_stats <- merge(release_stats, aggregate(list(Number_fish_released =
study_tagcodes$TagID_Hex),
by= list(Release = study_tagcodes$Release),
FUN = function(x) {length(unique(x))}),
by = c("Release"))
release_stats <- merge(release_stats,
aggregate(list(Release_location = study_tagcodes$Rel_loc),
by= list(Release = study_tagcodes$Release),
FUN = function(x) {head(x,1)}),
by = c("Release"))
release_stats <- merge(release_stats,
aggregate(list(Release_rkm = study_tagcodes$Rel_rkm),
by= list(Release = study_tagcodes$Release),
FUN = function(x) {head(x,1)}),
by = c("Release"))
release_stats <- merge(release_stats,
aggregate(list(Mean_length = study_tagcodes$Length),
by= list(Release = study_tagcodes$Release),
FUN = mean, na.rm = T),
by = c("Release"))
release_stats <- merge(release_stats,
aggregate(list(Mean_weight = study_tagcodes$Weight),
by= list(Release = study_tagcodes$Release),
FUN = mean, na.rm = T),
by = c("Release"))
release_stats[,c("Mean_length", "Mean_weight")] <- round(release_stats[,c("Mean_length", "Mean_weight")],1)
release_stats$First_release_time <- format(release_stats$First_release_time, tz = "Etc/GMT+8")
release_stats$Last_release_time <- format(release_stats$Last_release_time, tz = "Etc/GMT+8")
kable(release_stats, format = "html") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left")
}
Project began on 2019-04-11 15:00:00, see tagging details below:
Release
|
First_release_time
|
Last_release_time
|
Number_fish_released
|
Release_location
|
Release_rkm
|
Mean_length
|
Mean_weight
|
Downstream
|
2019-04-13 17:00:00
|
2019-04-13 17:00:00
|
301
|
ScottysLanding
|
410.512
|
84.2
|
6.5
|
Upstream
|
2019-04-11 15:00:00
|
2019-04-11 15:00:00
|
300
|
BattleCkColeman
|
534.366
|
83.9
|
6.6
|
2. Real-time Fish Detections
Data current as of 2025-04-22 09:00:00. All times in Pacific Standard Time.
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
library(cder)
library(reshape2)
detects_study <- read.csv("C:/Users/field/Desktop/Real-time data massaging/products/Study_detection_files/detects_ColemanAltRelStrat2019.csv", stringsAsFactors = F)
detects_study$DateTime_PST <- as.POSIXct(detects_study$DateTime_PST, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")
if(nrow(detects_study)>0){
detects_study <- merge(detects_study, study_tagcodes[,c("TagID_Hex", "RelDT", "StudyID", "Release", "tag_life")], by.x = "TagCode", by.y = "TagID_Hex")
}
#detects_study <- detects_study[detects_study$recv != 17135,]
detects_butte <- detects_study[detects_study$general_location == "ButteBrRT",]
#wlk_flow <- read.csv("wlk.csv")
if (nrow(detects_butte) == 0){
"No detections yet"
} else {
detects_butte <- merge(detects_butte,aggregate(list(first_detect = detects_butte$DateTime_PST), by = list(TagCode= detects_butte$TagCode), FUN = min))
detects_butte$Day <- as.Date(detects_butte$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_butte$RelDT), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(c(Sys.time())), max(as.Date(detects_butte$RelDT)+(detects_butte$tag_life*1.5)))
# library(dataRetrieval)
#
# BTC_flow_day <- readNWISdv(siteNumbers = "11389500", parameterCd = "00060", startDate = starttime, endDate = endtime+1)
# colnames(BTC_flow_day)[4] <- "parameter_value"
# colnames(BTC_flow_day)[3] <- "Day"
BTC_flow <- cdec_query("BTC", "20", "H", starttime, endtime+1)
BTC_flow$datetime <- as.Date(BTC_flow$DateTime)
BTC_flow_day <- aggregate(list(parameter_value = BTC_flow$Value),
by = list(Day = BTC_flow$datetime),
FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_butte$StudyID), "Release"])
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_butte$Release))])
tagcount <- aggregate(list(unique_tags = detects_butte$TagCode), by = list(Day = detects_butte$Day, Release = detects_butte$Release ), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
daterange2 <- merge(daterange1, BTC_flow_day, by = "Day", all.x = T)
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=rainbow(rel_num),
xlab = "", ylab = "Number of fish arrivals per day",
ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n", border=NA)#,
#legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
#args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= rainbow(rel_num), horiz = T, title = "Release")
ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
barpmeans <- colMeans(barp)
axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
par(new=T)
plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
mtext("Flow (cfs) at Butte City", side=4, line=3, cex=1.5, col="blue")
}
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
library(reshape2)
detects_tower <- detects_study[detects_study$general_location == "TowerBridge",]
#wlk_flow <- read.csv("wlk.csv")
if (nrow(detects_tower) == 0){
"No detections yet"
} else {
detects_tower <- merge(detects_tower,aggregate(list(first_detect = detects_tower$DateTime_PST), by = list(TagCode= detects_tower$TagCode), FUN = min))
detects_tower$Day <- as.Date(detects_tower$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_tower$RelDT), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(c(Sys.time())), max(as.Date(detects_tower$RelDT)+(detects_tower$tag_life*1.5)))
library(dataRetrieval)
wlk_flow_day <- readNWISdv(siteNumbers = "11390500", parameterCd = "00060", startDate = starttime, endDate = endtime+1)
colnames(wlk_flow_day)[4] <- "parameter_value"
colnames(wlk_flow_day)[3] <- "Day"
# wlk_flow <- cdec_query("WLK", "20", "H", starttime, endtime+1)
#
# wlk_flow$datetime <- as.Date(wlk_flow$datetime)
# wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$parameter_value),
# by = list(Day = wlk_flow$datetime),
# FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_tower$StudyID), "Release"])
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_tower$Release))])
tagcount <- aggregate(list(unique_tags = detects_tower$TagCode), by = list(Day = detects_tower$Day, Release = detects_tower$Release ), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
daterange2 <- merge(daterange1, wlk_flow_day[,3:4], by = "Day", all.x = T)
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=rainbow(rel_num),
xlab = "", ylab = "Number of fish arrivals per day",
ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n", border=NA)#,
#legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
#args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= rainbow(rel_num), horiz = T, title = "Release")
ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
barpmeans <- colMeans(barp)
axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
par(new=T)
plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
mtext("Flow (cfs) at Wilkins Slough", side=4, line=3, cex=1.5, col="blue")
}
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
if (nrow(detects_benicia)>0) {
detects_benicia <- merge(detects_benicia,aggregate(list(first_detect = detects_benicia$DateTime_PST), by = list(TagCode= detects_benicia$TagCode), FUN = min))
detects_benicia$Day <- as.Date(detects_benicia$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_benicia$RelDT), "Etc/GMT+8")
#endtime <- as.Date(c(Sys.time()))#, max(detects_benicia$first_detect)+60*60*24)))
#wlk_flow <- cdec_query("COL", "20", "H", starttime, endtime+1)
#wlk_flow$datetime <- as.Date(wlk_flow$datetime)
#wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$parameter_value), by = list(Day = wlk_flow$datetime), FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_benicia$StudyID), "Release"])
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_benicia$Release))])
tagcount <- aggregate(list(unique_tags = detects_benicia$TagCode), by = list(Day = detects_benicia$Day, Release = detects_benicia$Release ), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
#daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
daterange2 <- daterange1
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
barp <- barplot(t(daterange2[,1:ncol(daterange2)]), plot = FALSE, beside = T)
barplot(t(daterange2[,1:ncol(daterange2)]), beside = T, col=rainbow(rel_num),
xlab = "", ylab = "Number of fish arrivals per day",
ylim = c(0,max(daterange2[,1:ncol(daterange2)], na.rm = T)*1.2),
las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n")#,
#legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
#args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)], fill= rainbow(rel_num), horiz = T, title = "Release")
ybreaks <- if(max(daterange2[,1:ncol(daterange2)], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)], na.rm = T)} else {5}
xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
barpmeans <- colMeans(barp)
axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2)[xbreaks], las = 2)
axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)], na.rm = T), ybreaks))
box()
#par(new=T)
#plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
#axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
#mtext("Flow (cfs) at Colusa Bridge", side=4, line=3, cex=1.5, col="blue")
}else{
print("No detections at Benicia yet")
}
3. Survival and Routing Probability
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
library(data.table)
library(RMark)
gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
study_count <- nrow(study_tagcodes)
if (nrow(detects_tower) == 0){
"No detections yet"
} else {
## Only do survival to Sac for now
test <- detects_study[detects_study$rkm > 168 & detects_study$rkm < 175,]
## Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(test, TagCode ~ rkm, fun.aggregate = length))
## Sort columns by river km in descending order
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
if(gen_loc_sites <2){
"Detections at only one location so far, survival cannot yet be estimated"
}else{
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
if(length(unique(inp[,groups])) > 1){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", groups), WR.surv)
}
if(length(unique(inp[,groups])) < 2){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ", 1,sep = "")
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp.inp")
WR.process <- process.data(WRinp, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", groups), WR.surv)
}
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = '3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival') %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
}
}
3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival
Release
|
Survival (%)
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Detection efficiency (%)
|
ALL
|
26.8
|
1.9
|
23.3
|
30.6
|
73.2
|
Downstream
|
24.3
|
2.6
|
19.7
|
29.8
|
NA
|
Upstream
|
29.3
|
2.7
|
24.3
|
34.8
|
NA
|
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
if (nrow(detects_study) == 0){
"No detections yet"
} else {
## Only do survival to Georg split for now
test2 <- detects_study[detects_study$general_location %in% c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
## We can only do multistate model if there is at least one detection in each route
if(nrow(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2"),]) == 0 |
nrow(test2[test2$general_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"),]) == 0){
"Too few detections: routing probability cannot be estimated"
}else{
## Make tagcode character
study_tagcodes$TagID_Hex <- as.character(study_tagcodes$TagID_Hex)
## Make a crosstab query with frequencies for all tag/location combination
test2$general_location <- factor(test2$general_location, levels = c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"))
test2$TagCode <- factor(test2$TagCode, levels = study_tagcodes$TagID_Hex)
mytable <- table(test2$TagCode, test2$general_location) # A will be rows, B will be columns
## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
mytable[mytable>0] <- "A"
## Order in order of rkm
mytable2 <- mytable[, c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2")]
## Now sort the crosstab rows alphabetically
mytable2 <- mytable2[order(row.names(mytable2)),]
mytable2[which(mytable2[, "Sac_BlwGeorgiana"]=="A"), "Sac_BlwGeorgiana"] <- "A"
mytable2[which(mytable2[, "Sac_BlwGeorgiana2"]=="A"), "Sac_BlwGeorgiana2"] <- "A"
mytable2[which(mytable2[, "Georgiana_Slough1"]=="A"), "Georgiana_Slough1"] <- "B"
mytable2[which(mytable2[, "Georgiana_Slough2"]=="A"), "Georgiana_Slough2"] <- "B"
## Make a crosstab query with frequencies for all weekly Release groups
#test2$Release <- factor(test2$Release)
#mytable3 <- table(test2$TagCode, test2$Release) # A will be rows, B will be columns
## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
#mytable3[mytable3>0] <- 1
## Order in order of rkm
#mytable4 <- mytable3[, order(colnames(mytable3))]
## Now sort the crosstab rows alphabetically
#mytable4 <- mytable4[order(row.names(mytable4)),]
## Now order the study_tagcodes table the same way
study_tagcodes <- study_tagcodes[order(study_tagcodes$TagID_Hex),]
## Paste together (concatenate) the data from each column of the crosstab into one string per row, add to tagging_meta.
## For this step, make sure both are sorted by FishID
study_tagcodes$inp_part1 <- apply(mytable2[,1:3],1,paste,collapse="")
study_tagcodes$inp_partA <- apply(mytable2[,4:5],1,paste,collapse="")
study_tagcodes$inp_partB <- apply(mytable2[,6:7],1,paste,collapse="")
#study_tagcodes$inp_group <- apply(mytable4,1,paste,collapse=" ")
## We need to have a way of picking which route to assign to a fish if it was detected by both georg and blw-georg recvs
## We will say that the last detection at that junction is what determines the route it took
## find last detection at each genloc
departure <- aggregate(list(depart = test2$DateTime_PST), by = list(TagID_Hex = test2$TagCode, last_location = test2$general_location), FUN = max)
## subset for just juncture locations
departure <- departure[departure$last_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
## Find genloc of last known detection per tag
last_depart <- aggregate(list(depart = departure$depart), by = list(TagID_Hex = departure$TagID_Hex), FUN = max)
last_depart1 <- merge(last_depart, departure)
study_tagcodes <- merge(study_tagcodes, last_depart1[,c("TagID_Hex", "last_location")], by = "TagID_Hex", all.x = T)
## Assume that the Sac is default pathway, and for fish that were detected in neither route, it would get a "00" in inp so doesn't matter anyway
study_tagcodes$inp_final <- paste("A",study_tagcodes$inp_part1, study_tagcodes$inp_partA," 1 ;", sep = "")
## now put in exceptions...fish that were seen in georgiana last
study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_final"] <- paste("A",study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_part1"], study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_partB"]," 1 ;", sep = "")
write.table(study_tagcodes$inp_final,"WRinp_multistate.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp_multistate.inp")
dp <- process.data(WRinp, model="Multistrata")
ddl <- make.design.data(dp)
#### p ####
# Can't be seen at 2B or 3B or 4B (butte, tower or I80)
ddl$p$fix=NA
ddl$p$fix[ddl$p$stratum == "B" & ddl$p$time %in% c(2,3,4)]=0
#### Psi ####
# Only 1 transition allowed:
# from A to B at time interval 4 to 5
ddl$Psi$fix=0
# A to B can only happen for interval 3-4
ddl$Psi$fix[ddl$Psi$stratum=="A"&
ddl$Psi$tostratum=="B" & ddl$Psi$time==4]=NA
#### Phi a.k.a. S ####
ddl$S$fix=NA
# None in B for reaches 1,2,3,4 and fixing it to 1 for 5 (between two georg lines). All getting fixed to 1
ddl$S$fix[ddl$S$stratum=="B" & ddl$S$time %in% c(1,2,3,4,5)]=1
# For route A, fixing it to 1 for 5 (between two blw_georg lines)
ddl$S$fix[ddl$S$stratum=="A" & ddl$S$time==5]=1
## We use -1 at beginning of formula to remove intercept. This is because different routes probably shouldn't share the same intercept
p.timexstratum=list(formula=~-1+stratum:time)
Psi.stratumxtime=list(formula=~-1+stratum:time)
S.stratumxtime=list(formula=~-1+stratum:time)
## Run model a first time
S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, silent = T, output = F)
## Identify any parameter estimates at 1, which would likely have bad SE estimates.
profile.intervals <- which(S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$estimate %in% c(0,1) & !S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$fixed == "Fixed")
## Rerun model using profile interval estimation for the tricky parameters
S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, profile.int = profile.intervals, silent = T, output = F)
results <- S.timexstratum.p.timexstratum.Psi.timexstratum$results$real
results_short <- results[rownames(results) %in% c("S sA g1 c1 a0 o1 t1",
"S sA g1 c1 a1 o2 t2",
"S sA g1 c1 a2 o3 t3",
"S sA g1 c1 a3 o4 t4",
"p sA g1 c1 a1 o1 t2",
"p sA g1 c1 a2 o2 t3",
"p sA g1 c1 a3 o3 t4",
"p sA g1 c1 a4 o4 t5",
"p sB g1 c1 a4 o4 t5",
"Psi sA toB g1 c1 a3 o4 t4"
),]
results_short <- round(results_short[,c("estimate", "se", "lcl", "ucl")] * 100,1)
results_short$Measure <- c("Survival from release to Butte City","Survival from Butte City to TowerBridge (minimum estimate since fish may have taken Yolo Bypass)", "Survival from TowerBridge to I80-50_Br", "% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam)","Detection probability at Butte City",
"Detection probability at TowerBridge", "Detection probability at I80-50_Br", "Detection probability at Blw_Georgiana", "Detection probability at Georgiana Slough",
"Routing probability into Georgiana Slough (Conditional on fish arriving to junction)")
results_short <- results_short[,c("Measure", "estimate", "se", "lcl", "ucl")]
colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(results_short, row.names = F, "html", caption = '3.2 Reach-specific survival and probability of entering Georgiana Slough') %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
}
}
3.2 Reach-specific survival and probability of entering Georgiana Slough
Measure
|
Estimate
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Survival from release to Butte City
|
68.9
|
10.4
|
46.0
|
85.2
|
Survival from Butte City to TowerBridge (minimum estimate since fish may have taken Yolo Bypass)
|
41.7
|
6.7
|
29.5
|
55.1
|
Survival from TowerBridge to I80-50_Br
|
100.0
|
0.0
|
96.5
|
100.0
|
% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam)
|
72.9
|
3.5
|
65.5
|
79.3
|
Detection probability at Butte City
|
13.5
|
2.6
|
9.2
|
19.5
|
Detection probability at TowerBridge
|
68.3
|
3.6
|
60.9
|
75.0
|
Detection probability at I80-50_Br
|
82.2
|
3.0
|
75.5
|
87.4
|
Detection probability at Blw_Georgiana
|
84.4
|
3.7
|
75.7
|
90.4
|
Detection probability at Georgiana Slough
|
100.0
|
0.0
|
92.3
|
100.0
|
Routing probability into Georgiana Slough (Conditional on fish arriving to junction)
|
19.1
|
3.5
|
13.1
|
26.9
|
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
if (nrow(detects_benicia) == 0){
"No detections yet"
} else {
benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F)
benicia$RelDT <- as.POSIXct(benicia$RelDT)
## Only do survival to Benicia here
test3 <- detects_study[detects_study$rkm < 53,]
## Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ rkm, fun.aggregate = length))
## Sort columns by river km in descending order
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
if(length(groups) > 1){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
}else{
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",inp[,groups]," ;",sep = "")
}
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
if(length(groups) > 1){
WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
}else{
WRinp <- convert.inp("WRinp.inp")
WR.process <- process.data(WRinp, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
}
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(Release = c("ALL", groups), WR.surv)
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = '3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)') %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
## Find mean release time per release group, and ALL
reltimes <- aggregate(list(RelDT = study_tagcodes$RelDT), by = list(Release = study_tagcodes$Release), FUN = mean)
reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$RelDT)))
## Assign whether the results are tentative or final
quality <- "tentative"
if(endtime < as.Date(c(Sys.time()))) { quality <- "final"}
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
## remove old benicia record for this studyID
benicia <- benicia[!benicia$StudyID == unique(study_tagcodes$StudyID),]
benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(study_tagcodes$StudyID), data_quality = quality))
write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F)
}
3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)
Release Group
|
Survival (%)
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Detection efficiency (%)
|
ALL
|
18.6
|
1.6
|
15.7
|
22.0
|
99.1
|
Downstream
|
16.9
|
2.2
|
13.1
|
21.6
|
NA
|
Upstream
|
20.3
|
2.3
|
16.2
|
25.3
|
NA
|
4. Detection Statistics
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
if (nrow(detects_study) == 0){
"No detections yet"
} else {
arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
tag_stats <- aggregate(list(First_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = min)
tag_stats <- merge(tag_stats,
aggregate(list(Mean_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = mean),
by = c("general_location"))
tag_stats <- merge(tag_stats,
aggregate(list(Last_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = max),
by = c("general_location"))
tag_stats <- merge(tag_stats,
aggregate(list(Fish_count = arrivals$TagCode),
by= list(general_location = arrivals$general_location),
FUN = function(x) {length(unique(x))}),
by = c("general_location"))
tag_stats$Percent_arrived <- round(tag_stats$Fish_count/study_count * 100,2)
tag_stats <- merge(tag_stats, unique(gen_locs[,c("general_location", "rkm")]))
tag_stats <- tag_stats[order(tag_stats$rkm, decreasing = T),]
tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
print(kable(tag_stats, row.names = F,
caption = "4.1 Detections for all releases combined",
"html") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
count=1
for (j in sort(unique(study_tagcodes$Release))) {
if(nrow(detects_study[detects_study$Release == j,]) > 0 ) {
count=count+1
temp <- detects_study[detects_study$Release == j,]
arrivals1 <- aggregate(list(DateTime_PST = temp$DateTime_PST), by = list(general_location = temp$general_location, TagCode = temp$TagCode), FUN = min)
rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])
tag_stats1 <- aggregate(list(First_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = min)
tag_stats1 <- merge(tag_stats1,
aggregate(list(Mean_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = mean),
by = c("general_location"))
tag_stats1 <- merge(tag_stats1,
aggregate(list(Last_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = max),
by = c("general_location"))
tag_stats1 <- merge(tag_stats1,
aggregate(list(Fish_count = arrivals1$TagCode),
by= list(general_location = arrivals1$general_location),
FUN = function(x) {length(unique(x))}),
by = c("general_location"))
tag_stats1$Percent_arrived <- round(tag_stats1$Fish_count/rel_count * 100,2)
tag_stats1 <- merge(tag_stats1, unique(gen_locs[,c("general_location", "rkm")]))
tag_stats1 <- tag_stats1[order(tag_stats1$rkm, decreasing = T),]
tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
final_stats <- kable(tag_stats1, row.names = F,
caption = paste("4.",count," Detections for ",j," release groups", sep = ""),
"html")
print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = F, position = "left"))
} else {
cat("\n\n\\pagebreak\n")
print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
cat("\n\n\\pagebreak\n")
}
}
}
4.1 Detections for all releases combined
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
rkm
|
ButteBrRT
|
2019-04-12 21:41:34
|
2019-04-19 12:21:24
|
2019-05-24 22:09:04
|
56
|
9.32
|
344.108
|
TowerBridge
|
2019-04-15 03:00:15
|
2019-04-21 07:29:15
|
2019-05-26 21:43:56
|
118
|
19.63
|
172.000
|
I80-50_Br
|
2019-04-15 01:02:19
|
2019-04-21 01:36:32
|
2019-05-26 15:23:36
|
142
|
23.63
|
170.748
|
Georgiana_Slough1
|
2019-04-15 22:11:06
|
2019-04-21 14:52:36
|
2019-04-28 02:22:02
|
24
|
3.99
|
119.208
|
Sac_BlwGeorgiana
|
2019-04-16 04:30:57
|
2019-04-22 06:43:33
|
2019-05-27 14:36:59
|
86
|
14.31
|
119.058
|
Georgiana_Slough2
|
2019-04-15 22:17:42
|
2019-04-21 15:03:41
|
2019-04-28 02:36:04
|
24
|
3.99
|
118.758
|
Sac_BlwGeorgiana2
|
2019-04-16 04:39:15
|
2019-04-22 05:40:03
|
2019-05-27 14:44:12
|
96
|
15.97
|
118.398
|
Benicia_east
|
2019-04-19 01:10:29
|
2019-04-25 03:06:26
|
2019-05-02 06:19:09
|
111
|
18.47
|
52.240
|
Benicia_west
|
2019-04-18 16:47:10
|
2019-04-25 04:17:50
|
2019-05-02 06:24:24
|
110
|
18.30
|
52.040
|
4.2 Detections for Downstream release groups
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
rkm
|
ButteBrRT
|
2019-04-14 01:07:10
|
2019-04-19 10:12:31
|
2019-05-24 22:09:04
|
25
|
8.31
|
344.108
|
TowerBridge
|
2019-04-15 18:44:38
|
2019-04-21 19:07:42
|
2019-05-26 21:43:56
|
59
|
19.60
|
172.000
|
I80-50_Br
|
2019-04-15 13:12:27
|
2019-04-21 13:39:17
|
2019-05-26 15:23:36
|
61
|
20.27
|
170.748
|
Georgiana_Slough1
|
2019-04-16 12:07:25
|
2019-04-22 11:49:20
|
2019-04-27 09:10:35
|
7
|
2.33
|
119.208
|
Sac_BlwGeorgiana
|
2019-04-16 04:30:57
|
2019-04-22 15:13:02
|
2019-05-27 14:36:59
|
42
|
13.95
|
119.058
|
Georgiana_Slough2
|
2019-04-16 12:16:55
|
2019-04-22 12:00:19
|
2019-04-27 09:22:01
|
7
|
2.33
|
118.758
|
Sac_BlwGeorgiana2
|
2019-04-16 04:39:15
|
2019-04-22 06:29:57
|
2019-05-27 14:44:12
|
48
|
15.95
|
118.398
|
Benicia_east
|
2019-04-19 01:10:29
|
2019-04-25 06:10:37
|
2019-04-29 15:18:37
|
50
|
16.61
|
52.240
|
Benicia_west
|
2019-04-18 16:47:10
|
2019-04-25 07:02:55
|
2019-04-29 15:21:34
|
50
|
16.61
|
52.040
|
4.3 Detections for Upstream release groups
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
rkm
|
ButteBrRT
|
2019-04-12 21:41:34
|
2019-04-19 14:05:20
|
2019-04-26 14:26:42
|
31
|
10.33
|
344.108
|
TowerBridge
|
2019-04-15 03:00:15
|
2019-04-20 19:50:47
|
2019-04-27 08:57:03
|
59
|
19.67
|
172.000
|
I80-50_Br
|
2019-04-15 01:02:19
|
2019-04-20 16:32:13
|
2019-04-27 02:30:17
|
81
|
27.00
|
170.748
|
Georgiana_Slough1
|
2019-04-15 22:11:06
|
2019-04-21 06:15:08
|
2019-04-28 02:22:02
|
17
|
5.67
|
119.208
|
Sac_BlwGeorgiana
|
2019-04-16 08:01:19
|
2019-04-21 22:37:14
|
2019-04-29 09:02:56
|
44
|
14.67
|
119.058
|
Georgiana_Slough2
|
2019-04-15 22:17:42
|
2019-04-21 06:26:15
|
2019-04-28 02:36:04
|
17
|
5.67
|
118.758
|
Sac_BlwGeorgiana2
|
2019-04-16 08:12:10
|
2019-04-22 04:50:08
|
2019-04-29 09:13:10
|
48
|
16.00
|
118.398
|
Benicia_east
|
2019-04-19 19:32:24
|
2019-04-25 00:35:28
|
2019-05-02 06:19:09
|
61
|
20.33
|
52.240
|
Benicia_west
|
2019-04-19 19:38:26
|
2019-04-25 02:00:15
|
2019-05-02 06:24:24
|
60
|
20.00
|
52.040
|
rm(list = ls())
cleanup(ask = F)