2. Real-time Fish Detections
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
library(cder)
library(reshape2)
detects_study <- fread(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products\\Study_detection_files\\detects_ColemanLateFall_2020.csv", sep = ""), colClasses = c(DateTime_PST = "character", RelDT = "character"))
detects_study$DateTime_PST <- as.POSIXct(detects_study$DateTime_PST, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")
if(nrow(detects_study)>0){
detects_study <- merge(detects_study, study_tagcodes[,c("TagID_Hex", "RelDT", "StudyID", "Release", "tag_life")], by.x = "TagCode", by.y = "TagID_Hex")
}
#detects_study <- detects_study[detects_study$recv != 17135,]
detects_butte <- detects_study[detects_study$general_location == "ButteBrRT",]
#wlk_flow <- read.csv("wlk.csv")
if (nrow(detects_butte) == 0){
"No detections yet"
} else {
detects_butte <- merge(detects_butte,aggregate(list(first_detect = detects_butte$DateTime_PST), by = list(TagCode= detects_butte$TagCode), FUN = min))
detects_butte$Day <- as.Date(detects_butte$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_butte$RelDT), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_butte$RelDT)+(detects_butte$tag_life*1.5)))
BTC_flow <- cdec_query("BTC", "20", "H", starttime, endtime+1)
BTC_flow$datetime <- as.Date(format(BTC_flow$DateTime, "%Y-%m-%d"))
BTC_flow_day <- aggregate(list(parameter_value = BTC_flow$Value),
by = list(Day = BTC_flow$datetime),
FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_butte$StudyID), "Release"])
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_butte$Release))])
tagcount <- aggregate(list(unique_tags = detects_butte$TagCode), by = list(Day = detects_butte$Day, Release = detects_butte$Release ), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
daterange2 <- merge(daterange1, BTC_flow_day, by = "Day", all.x = T)
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=viridis_pal()(rel_num),
xlab = "", ylab = "Number of fish arrivals per day",
ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt ="n")#,
#border=NA
#legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
#args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= viridis_pal()(rel_num), horiz = T, title = "Release")
ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
barpmeans <- colMeans(barp)
axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
par(new=T)
plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
mtext("Flow (cfs) at Butte City", side=4, line=3, cex=1.5, col="blue")
}
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
library(reshape2)
detects_tower <- detects_study[detects_study$general_location == "TowerBridge",]
#wlk_flow <- read.csv("wlk.csv")
if (nrow(detects_tower) == 0){
"No detections yet"
} else {
detects_tower <- merge(detects_tower,aggregate(list(first_detect = detects_tower$DateTime_PST), by = list(TagCode= detects_tower$TagCode), FUN = min))
detects_tower$Day <- as.Date(detects_tower$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_tower$RelDT), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(c(Sys.time())), max(as.Date(detects_tower$RelDT)+(detects_tower$tag_life*1.5)))
wlk_flow <- cdec_query("WLK", "20", "H", starttime, endtime+1)
wlk_flow$datetime <- as.Date(wlk_flow$DateTime)
wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$Value),
by = list(Day = wlk_flow$datetime),
FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_tower$StudyID), "Release"])
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_tower$Release))])
tagcount <- aggregate(list(unique_tags = detects_tower$TagCode), by = list(Day = detects_tower$Day, Release = detects_tower$Release ), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=viridis_pal()(rel_num),
xlab = "", ylab = "Number of fish arrivals per day",
ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt ="n")#,
#border=NA
#legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
#args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= viridis_pal()(rel_num), horiz = T, title = "Release")
ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
barpmeans <- colMeans(barp)
axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
par(new=T)
plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
mtext("Flow (cfs) at Wilkins Slough", side=4, line=3, cex=1.5, col="blue")
}
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
if (nrow(detects_benicia)>0) {
detects_benicia <- merge(detects_benicia,aggregate(list(first_detect = detects_benicia$DateTime_PST), by = list(TagCode= detects_benicia$TagCode), FUN = min))
detects_benicia$Day <- as.Date(detects_benicia$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_benicia$RelDT), "Etc/GMT+8")
#endtime <- as.Date(c(Sys.time()))#, max(detects_benicia$first_detect)+60*60*24)))
#wlk_flow <- cdec_query("COL", "20", "H", starttime, endtime+1)
#wlk_flow$datetime <- as.Date(wlk_flow$datetime)
#wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$parameter_value), by = list(Day = wlk_flow$datetime), FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_benicia$StudyID), "Release"])
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_benicia$Release))])
tagcount <- aggregate(list(unique_tags = detects_benicia$TagCode), by = list(Day = detects_benicia$Day, Release = detects_benicia$Release ), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
#daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
daterange2 <- daterange1
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
barp <- barplot(t(daterange2[,1:ncol(daterange2)]), plot = FALSE, beside = T)
barplot(t(daterange2[,1:ncol(daterange2)]), beside = T, col=viridis_pal()(rel_num),
xlab = "", ylab = "Number of fish arrivals per day",
ylim = c(0,max(daterange2[,1:ncol(daterange2)], na.rm = T)*1.2),
las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n")#,
#legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
#args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)], fill= viridis_pal()(rel_num), horiz = T, title = "Release")
ybreaks <- if(max(daterange2[,1:ncol(daterange2)], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)], na.rm = T)} else {5}
xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
barpmeans <- colMeans(barp)
axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2)[xbreaks], las = 2)
axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)], na.rm = T), ybreaks))
box()
#par(new=T)
#plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "blue", type = "l", lwd=2, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
#axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
#mtext("Flow (cfs) at Colusa Bridge", side=4, line=3, cex=1.5, col="blue")
}else{
print("No detections at Benicia yet")
}
3. Survival and Routing Probability
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
library(data.table)
library(RMark)
gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
study_count <- nrow(study_tagcodes)
if (nrow(detects_tower) == 0){
"No detections yet"
} else {
## Only do survival to Sac for now
test <- detects_study[detects_study$rkm > 168 & detects_study$rkm < 175,]
## Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(test, TagCode ~ rkm, fun.aggregate = length))
## Sort columns by river km in descending order
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
if(gen_loc_sites <2){
"Detections at only one location so far, survival cannot yet be estimated"
}else{
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
if(length(unique(inp[,groups])) > 1){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", groups), WR.surv)
}
if(length(unique(inp[,groups])) < 2){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ", 1,sep = "")
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp.inp")
WR.process <- process.data(WRinp, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", groups), WR.surv)
}
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}
}
3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival
Release
|
Survival (%)
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Detection efficiency (%)
|
ALL
|
60.4
|
2
|
56.4
|
64.2
|
100
|
Week 1
|
60.4
|
2
|
56.4
|
64.2
|
NA
|
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
if (nrow(detects_study) == 0){
"No detections yet"
} else {
## Only do survival to Georg split for now
test2 <- detects_study[detects_study$general_location %in% c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
## We can only do multistate model if there is at least one detection in each route
if(nrow(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2"),]) == 0 |
nrow(test2[test2$general_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"),]) == 0){
"Too few detections: routing probability cannot be estimated"
}else{
## Make tagcode character
study_tagcodes$TagID_Hex <- as.character(study_tagcodes$TagID_Hex)
## Make a crosstab query with frequencies for all tag/location combination
test2$general_location <- factor(test2$general_location, levels = c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"))
test2$TagCode <- factor(test2$TagCode, levels = study_tagcodes$TagID_Hex)
mytable <- table(test2$TagCode, test2$general_location) # A will be rows, B will be columns
## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
mytable[mytable>0] <- "A"
## Order in order of rkm
mytable2 <- mytable[, c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2")]
## Now sort the crosstab rows alphabetically
mytable2 <- mytable2[order(row.names(mytable2)),]
mytable2[which(mytable2[, "Sac_BlwGeorgiana"]=="A"), "Sac_BlwGeorgiana"] <- "A"
mytable2[which(mytable2[, "Sac_BlwGeorgiana2"]=="A"), "Sac_BlwGeorgiana2"] <- "A"
mytable2[which(mytable2[, "Georgiana_Slough1"]=="A"), "Georgiana_Slough1"] <- "B"
mytable2[which(mytable2[, "Georgiana_Slough2"]=="A"), "Georgiana_Slough2"] <- "B"
## Make a crosstab query with frequencies for all weekly Release groups
#test2$Release <- factor(test2$Release)
#mytable3 <- table(test2$TagCode, test2$Release) # A will be rows, B will be columns
## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
#mytable3[mytable3>0] <- 1
## Order in order of rkm
#mytable4 <- mytable3[, order(colnames(mytable3))]
## Now sort the crosstab rows alphabetically
#mytable4 <- mytable4[order(row.names(mytable4)),]
## Now order the study_tagcodes table the same way
study_tagcodes <- study_tagcodes[order(study_tagcodes$TagID_Hex),]
## Paste together (concatenate) the data from each column of the crosstab into one string per row, add to tagging_meta.
## For this step, make sure both are sorted by FishID
study_tagcodes$inp_part1 <- apply(mytable2[,1:3],1,paste,collapse="")
study_tagcodes$inp_partA <- apply(mytable2[,4:5],1,paste,collapse="")
study_tagcodes$inp_partB <- apply(mytable2[,6:7],1,paste,collapse="")
#study_tagcodes$inp_group <- apply(mytable4,1,paste,collapse=" ")
## We need to have a way of picking which route to assign to a fish if it was detected by both georg and blw-georg recvs
## We will say that the last detection at that junction is what determines the route it took
## find last detection at each genloc
departure <- aggregate(list(depart = test2$DateTime_PST), by = list(TagID_Hex = test2$TagCode, last_location = test2$general_location), FUN = max)
## subset for just juncture locations
departure <- departure[departure$last_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
## Find genloc of last known detection per tag
last_depart <- aggregate(list(depart = departure$depart), by = list(TagID_Hex = departure$TagID_Hex), FUN = max)
last_depart1 <- merge(last_depart, departure)
study_tagcodes <- merge(study_tagcodes, last_depart1[,c("TagID_Hex", "last_location")], by = "TagID_Hex", all.x = T)
## Assume that the Sac is default pathway, and for fish that were detected in neither route, it would get a "00" in inp so doesn't matter anyway
study_tagcodes$inp_final <- paste("A",study_tagcodes$inp_part1, study_tagcodes$inp_partA," 1 ;", sep = "")
## now put in exceptions...fish that were seen in georgiana last
study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_final"] <- paste("A",study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_part1"], study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_partB"]," 1 ;", sep = "")
write.table(study_tagcodes$inp_final,"WRinp_multistate.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp_multistate.inp")
dp <- process.data(WRinp, model="Multistrata")
ddl <- make.design.data(dp)
#### p ####
# Can't be seen at 2B or 3B or 4B (butte, tower or I80)
ddl$p$fix=NA
ddl$p$fix[ddl$p$stratum == "B" & ddl$p$time %in% c(2,3,4)]=0
#### Psi ####
# Only 1 transition allowed:
# from A to B at time interval 4 to 5
ddl$Psi$fix=0
# A to B can only happen for interval 3-4
ddl$Psi$fix[ddl$Psi$stratum=="A"&
ddl$Psi$tostratum=="B" & ddl$Psi$time==4]=NA
#### Phi a.k.a. S ####
ddl$S$fix=NA
# None in B for reaches 1,2,3,4 and fixing it to 1 for 5 (between two georg lines). All getting fixed to 1
ddl$S$fix[ddl$S$stratum=="B" & ddl$S$time %in% c(1,2,3,4,5)]=1
# For route A, fixing it to 1 for 5 (between two blw_georg lines)
ddl$S$fix[ddl$S$stratum=="A" & ddl$S$time==5]=1
## We use -1 at beginning of formula to remove intercept. This is because different routes probably shouldn't share the same intercept
p.timexstratum=list(formula=~-1+stratum:time)
Psi.stratumxtime=list(formula=~-1+stratum:time)
S.stratumxtime=list(formula=~-1+stratum:time)
## Run model a first time
S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, silent = T, output = F)
## Identify any parameter estimates at 1, which would likely have bad SE estimates.
profile.intervals <- which(S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$estimate %in% c(0,1) & !S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$fixed == "Fixed")
## Rerun model using profile interval estimation for the tricky parameters
S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, profile.int = profile.intervals, silent = T, output = F)
results <- S.timexstratum.p.timexstratum.Psi.timexstratum$results$real
results_short <- results[rownames(results) %in% c("S sA g1 c1 a0 o1 t1",
"S sA g1 c1 a1 o2 t2",
"S sA g1 c1 a2 o3 t3",
"S sA g1 c1 a3 o4 t4",
"p sA g1 c1 a1 o1 t2",
"p sA g1 c1 a2 o2 t3",
"p sA g1 c1 a3 o3 t4",
"p sA g1 c1 a4 o4 t5",
"p sB g1 c1 a4 o4 t5",
"Psi sA toB g1 c1 a3 o4 t4"
),]
results_short <- round(results_short[,c("estimate", "se", "lcl", "ucl")] * 100,1)
## Now find estimate and CIs for AtoA route at junction
Psilist=get.real(S.timexstratum.p.timexstratum.Psi.timexstratum,"Psi",vcv=TRUE)
Psivalues=Psilist$estimates
routes <- TransitionMatrix(Psivalues[Psivalues$time==4 & Psivalues$cohort==1,],vcv.real=Psilist$vcv.real)
results_short$Measure <- c("Survival from release to Butte City","Survival from Butte City to TowerBridge (minimum estimate since fish may have taken Yolo Bypass)", "Survival from TowerBridge to I80-50_Br", "% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam)","Detection probability at Butte City",
"Detection probability at TowerBridge", "Detection probability at I80-50_Br", "Detection probability at Blw_Georgiana", "Detection probability at Georgiana Slough",
"Routing probability into Georgiana Slough (Conditional on fish arriving to junction)")
results_short <- results_short[,c("Measure", "estimate", "se", "lcl", "ucl")]
colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}
}
3.2 Reach-specific survival and probability of entering Georgiana Slough
Measure
|
Estimate
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Survival from release to Butte City
|
76.1
|
2.0
|
72.1
|
79.8
|
Survival from Butte City to TowerBridge (minimum estimate since fish may have taken Yolo Bypass)
|
79.7
|
2.1
|
75.3
|
83.5
|
Survival from TowerBridge to I80-50_Br
|
100.0
|
0.0
|
99.2
|
100.0
|
% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam)
|
60.7
|
2.6
|
55.6
|
65.5
|
Detection probability at Butte City
|
79.5
|
2.1
|
75.1
|
83.3
|
Detection probability at TowerBridge
|
99.5
|
0.4
|
97.8
|
99.9
|
Detection probability at I80-50_Br
|
98.9
|
0.5
|
97.1
|
99.6
|
Detection probability at Blw_Georgiana
|
100.0
|
0.0
|
100.0
|
100.0
|
Detection probability at Georgiana Slough
|
100.0
|
0.0
|
96.2
|
100.0
|
Routing probability into Georgiana Slough (Conditional on fish arriving to junction)
|
23.4
|
2.8
|
18.3
|
29.4
|
if(nrow(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2"),]) == 0 |
nrow(test2[test2$general_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"),]) == 0){
"Too few detections: routing probability cannot be estimated"
}else{
library(png)
#Replace the directory and file information with your info
georg <- readPNG("../data/georg.png")
par(mar=c(2,0,0,0))
#Set up the plot area
plot(1:2, type='n', xlab="", ylab="", xaxt = "n", yaxt = "n")
#Get the plot information so the image will fill the plot box, and draw it
lim <- par()
rasterImage(georg, lim$usr[1], lim$usr[3], lim$usr[2], lim$usr[4])
legend(x = 1.55,y = 1.6,legend = paste(round(routes$TransitionMat["A","A"],3)*100,"% (",round(routes$lcl.TransitionMat["A","A"],3)*100,"-",round(routes$ucl.TransitionMat["A","A"],3)*100,")", sep =""),col = "white", box.col = "light gray", bg = "light gray")
legend(1.55,1.45, legend = paste(round(routes$TransitionMat["A","B"],3)*100,"% (",round(routes$lcl.TransitionMat["A","B"],3)*100,"-",round(routes$ucl.TransitionMat["A","B"],3)*100,")", sep =""), box.col = "light gray", bg = "light gray")
mtext(text = "3.3 Routing Probabilities at Georgiana Slough Junction", cex = 1.3, side = 1, line = 0.2, adj = 0)
}

setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
if (nrow(detects_benicia) == 0){
"No detections yet"
} else {
benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F)
benicia$RelDT <- as.POSIXct(benicia$RelDT)
## Only do survival to Benicia here
test3 <- detects_study[detects_study$rkm < 53,]
## Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ rkm, fun.aggregate = length))
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
if(length(groups) > 1){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
}else{
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",inp[,groups]," ;",sep = "")
}
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
if(length(groups) > 1){
WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
}else{
WRinp <- convert.inp("WRinp.inp")
WR.process <- process.data(WRinp, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
}
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(Release = c("ALL", groups), WR.surv)
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.4 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
## Find mean release time per release group, and ALL
reltimes <- aggregate(list(RelDT = study_tagcodes$RelDT), by = list(Release = study_tagcodes$Release), FUN = mean)
reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$RelDT)))
## Assign whether the results are tentative or final
quality <- "tentative"
if(endtime < as.Date(c(Sys.time()))) { quality <- "final"}
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
## remove old benicia record for this studyID
benicia <- benicia[!benicia$StudyID == unique(study_tagcodes$StudyID),]
benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(study_tagcodes$StudyID), data_quality = quality))
write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F)
}
3.4 Minimum survival to Benicia Bridge East Span (using CJS survival model)
Release Group
|
Survival (%)
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Detection efficiency (%)
|
ALL
|
16.9
|
1.5
|
14.1
|
20.1
|
100
|
Week 1
|
16.9
|
1.5
|
14.1
|
20.1
|
100
|
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
if (nrow(detects_study) == 0){
results_short <- data.frame("Measure"=NA, "Estimate"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(results_short, row.names = F, "html", caption = "3.5 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}else{
## Only do survival to Georg split for now
test4 <- detects_study[detects_study$general_location %in% c("I80-50_Br", "Benicia_west", "Benicia_east"),]
if(nrow(test4[test4$general_location =="Benicia_west",]) == 0 |
nrow(test4[test4$general_location =="Benicia_east",]) == 0){
results_short <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(results_short, row.names = F, "html", caption = "3.5 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}else{
Delta <- read.csv("Delta_surv.csv", stringsAsFactors = F)
Delta$RelDT <- as.POSIXct(Delta$RelDT)
inp <- as.data.frame(reshape2::dcast(test4, TagCode ~ general_location, fun.aggregate = length))
## Sort columns by river km in descending order
inp <- inp[,c("TagCode","I80-50_Br", "Benicia_east", "Benicia_west")]
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by.x = "TagID_Hex", by.y = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
if(length(groups) > 1){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
}else{
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",inp[,groups]," ;",sep = "")
}
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
if(length(groups) > 1){
WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=2,to=length(groups)*3,by = 3),c("estimate", "se", "lcl", "ucl")] * 100,1))
}else{
WRinp <- convert.inp("WRinp.inp")
WR.process <- process.data(WRinp, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1)
}
WR.surv <- cbind(Release = c("ALL", groups), WR.surv)
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.5 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
## remove old benicia record for this studyID
Delta <- Delta[!Delta$StudyID == unique(study_tagcodes$StudyID),]
Delta <- rbind(Delta, data.frame(WR.surv, StudyID = unique(study_tagcodes$StudyID), data_quality = quality))
write.csv(Delta, "Delta_surv.csv", row.names = F, quote = F)
}
}
3.5 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)
Release Group
|
Survival (%)
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
ALL
|
27.9
|
2.4
|
23.5
|
32.7
|
Week 1
|
27.9
|
2.4
|
23.5
|
32.7
|
4. Detections statistics at all realtime receivers
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
if (nrow(detects_study) == 0){
"No detections yet"
} else {
arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
tag_stats <- aggregate(list(First_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = min)
tag_stats <- merge(tag_stats,
aggregate(list(Mean_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = mean),
by = c("general_location"))
tag_stats <- merge(tag_stats,
aggregate(list(Last_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = max),
by = c("general_location"))
tag_stats <- merge(tag_stats,
aggregate(list(Fish_count = arrivals$TagCode),
by= list(general_location = arrivals$general_location),
FUN = function(x) {length(unique(x))}),
by = c("general_location"))
tag_stats$Percent_arrived <- round(tag_stats$Fish_count/study_count * 100,2)
tag_stats <- merge(tag_stats, unique(gen_locs[,c("general_location", "rkm")]))
tag_stats <- tag_stats[order(tag_stats$rkm, decreasing = T),]
tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
print(kable(tag_stats, row.names = F,
caption = "4.1 Detections for all releases combined",
"html") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
for (j in sort(unique(study_tagcodes$Release))) {
if(nrow(detects_study[detects_study$Release == j,]) > 0 ) {
temp <- detects_study[detects_study$Release == j,]
arrivals1 <- aggregate(list(DateTime_PST = temp$DateTime_PST), by = list(general_location = temp$general_location, TagCode = temp$TagCode), FUN = min)
rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])
tag_stats1 <- aggregate(list(First_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = min)
tag_stats1 <- merge(tag_stats1,
aggregate(list(Mean_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = mean),
by = c("general_location"))
tag_stats1 <- merge(tag_stats1,
aggregate(list(Last_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = max),
by = c("general_location"))
tag_stats1 <- merge(tag_stats1,
aggregate(list(Fish_count = arrivals1$TagCode),
by= list(general_location = arrivals1$general_location),
FUN = function(x) {length(unique(x))}),
by = c("general_location"))
tag_stats1$Percent_arrived <- round(tag_stats1$Fish_count/rel_count * 100,2)
tag_stats1 <- merge(tag_stats1, unique(gen_locs[,c("general_location", "rkm")]))
tag_stats1 <- tag_stats1[order(tag_stats1$rkm, decreasing = T),]
tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
final_stats <- kable(tag_stats1, row.names = F,
caption = paste("4.2 Detections for",j,"release groups", sep = " "),
"html")
print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
cat("\n\n\\pagebreak\n")
print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
cat("\n\n\\pagebreak\n")
}
}
}
4.1 Detections for all releases combined
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
rkm
|
ButteBrRT
|
2019-12-08 08:17:20
|
2019-12-09 19:08:40
|
2020-01-18 08:40:21
|
365
|
60.53
|
344.108
|
TowerBridge
|
2019-12-10 05:54:03
|
2019-12-12 09:23:46
|
2020-02-16 08:52:01
|
364
|
60.36
|
172.000
|
I80-50_Br
|
2019-12-09 22:42:35
|
2019-12-12 02:37:18
|
2020-02-16 04:39:57
|
362
|
60.03
|
170.748
|
MiddleRiver
|
2020-01-01 20:32:34
|
2020-01-01 20:32:34
|
2020-01-01 20:32:34
|
1
|
0.17
|
150.000
|
Georgiana_Slough1
|
2019-12-11 12:35:59
|
2019-12-14 12:14:43
|
2020-01-28 22:24:39
|
54
|
8.96
|
119.208
|
Sac_BlwGeorgiana
|
2019-12-11 11:45:06
|
2019-12-13 23:46:32
|
2020-01-29 22:06:53
|
172
|
28.52
|
119.058
|
Georgiana_Slough2
|
2019-12-11 12:43:08
|
2019-12-13 08:35:17
|
2019-12-29 02:27:41
|
51
|
8.46
|
118.758
|
Sac_BlwGeorgiana2
|
2019-12-11 12:44:54
|
2019-12-14 00:34:26
|
2020-01-29 22:25:03
|
170
|
28.19
|
118.398
|
Benicia_east
|
2019-12-14 19:39:48
|
2019-12-21 02:58:12
|
2020-02-04 14:09:57
|
102
|
16.92
|
52.240
|
Benicia_west
|
2019-12-14 19:42:19
|
2019-12-21 03:19:47
|
2020-02-04 14:12:19
|
100
|
16.58
|
52.040
|
4.2 Detections for Week 1 release groups
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
rkm
|
ButteBrRT
|
2019-12-08 08:17:20
|
2019-12-09 19:08:40
|
2020-01-18 08:40:21
|
365
|
60.53
|
344.108
|
TowerBridge
|
2019-12-10 05:54:03
|
2019-12-12 09:23:46
|
2020-02-16 08:52:01
|
364
|
60.36
|
172.000
|
I80-50_Br
|
2019-12-09 22:42:35
|
2019-12-12 02:37:18
|
2020-02-16 04:39:57
|
362
|
60.03
|
170.748
|
MiddleRiver
|
2020-01-01 20:32:34
|
2020-01-01 20:32:34
|
2020-01-01 20:32:34
|
1
|
0.17
|
150.000
|
Georgiana_Slough1
|
2019-12-11 12:35:59
|
2019-12-14 12:14:43
|
2020-01-28 22:24:39
|
54
|
8.96
|
119.208
|
Sac_BlwGeorgiana
|
2019-12-11 11:45:06
|
2019-12-13 23:46:32
|
2020-01-29 22:06:53
|
172
|
28.52
|
119.058
|
Georgiana_Slough2
|
2019-12-11 12:43:08
|
2019-12-13 08:35:17
|
2019-12-29 02:27:41
|
51
|
8.46
|
118.758
|
Sac_BlwGeorgiana2
|
2019-12-11 12:44:54
|
2019-12-14 00:34:26
|
2020-01-29 22:25:03
|
170
|
28.19
|
118.398
|
Benicia_east
|
2019-12-14 19:39:48
|
2019-12-21 02:58:12
|
2020-02-04 14:09:57
|
102
|
16.92
|
52.240
|
Benicia_west
|
2019-12-14 19:42:19
|
2019-12-21 03:19:47
|
2020-02-04 14:12:19
|
100
|
16.58
|
52.040
|
## Set fig height for next plot here, based on how long fish have been at large
figheight <- min(10,max(c(3,as.numeric(difftime(Sys.Date(), min(study_tagcodes$RelDT), units = "days")) / 4)))
4.3 Fish arrivals per day
setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))
if (nrow(detects_study) == 0){
"No detections yet"
} else {
beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F)
beacon_by_day$day <- as.Date(beacon_by_day$day)
arrivals$day <- as.Date(format(arrivals$DateTime_PST, "%Y-%m-%d"))
arrivals_per_day <- aggregate(list(New_arrivals = arrivals$TagCode), by = list(day = arrivals$day, general_location = arrivals$general_location), length)
arrivals_per_day$day <- as.Date(arrivals_per_day$day)
## Now subset to only look at data for the correct beacon for that day
beacon_by_day <- as.data.frame(beacon_by_day[which(beacon_by_day$TagCode == beacon_by_day$beacon),])
## Now only keep beacon by day for days since fish were released, and up to today or taglife
beacon_by_day <- beacon_by_day[beacon_by_day$day >= as.Date(min(study_tagcodes$RelDT)) & beacon_by_day$day <= endtime,]
beacon_by_day <- merge(beacon_by_day, gen_locs[,c("location", "general_location","rkm")], by = "location", all.x = T)
arrivals_per_day <- merge(beacon_by_day, arrivals_per_day, all.x = T, by = c("general_location", "day"))
arrivals_per_day$day <- factor(arrivals_per_day$day)
## Remove bench test and other NA locations
arrivals_per_day <- arrivals_per_day[!arrivals_per_day$general_location == "Bench_test",]
arrivals_per_day <- arrivals_per_day[is.na(arrivals_per_day$general_location) == F,]
## Change order of data to plot decreasing rkm
arrivals_per_day <- arrivals_per_day[order(arrivals_per_day$rkm, decreasing = T),]
arrivals_per_day$general_location <- factor(arrivals_per_day$general_location, unique(arrivals_per_day$general_location))
# ggplot(data=arrivals_per_day, aes(x=general_location, y=fct_rev(as_factor(day)))) +
# geom_tile(fill = "lightgray", color = "black") +
# geom_text(aes(label=New_arrivals)) +
# labs(x="General Location", y = "Date") +
# theme(panel.background = element_blank(), axis.text.x = element_text(angle = 90, hjust = 1)) +
# theme(plot.margin = unit(c(0,0,0,0), "cm"))
crosstab <- xtabs(formula = arrivals_per_day$New_arrivals ~ arrivals_per_day$day + arrivals_per_day$general_location, addNA =T)
crosstab[is.na(crosstab)] <- ""
crosstab[crosstab==0] <- NA
crosstab <- as.data.frame.matrix(crosstab)
colnames(crosstab) <- c("Butte Br", "Tower Br", "I8050 Br", "Old River", "Middle River", "CVP Tanks", "Georg Slough1", "Sac_Blw Georg1", "Georg Slough2", "Sac_Blw Georg2", "Benicia East", "Benicia West")
kable(crosstab, align = "c", escape = F, booktabs = T) %>%
kable_styling() %>%
#row_spec(0, angle = -45) %>%
column_spec(column = 1:12,width_min = "50px",border_left = T, border_right = T) %>%
column_spec(1, bold = T, width_min = "70px")%>%
scroll_box(width = "100%", height = "700px") %>%
kable_styling(font_size = 10)
}
|
Butte Br
|
Tower Br
|
I8050 Br
|
Old River
|
Middle River
|
CVP Tanks
|
Georg Slough1
|
Sac_Blw Georg1
|
Georg Slough2
|
Sac_Blw Georg2
|
Benicia East
|
Benicia West
|
2019-12-05
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-12-06
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-12-07
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-12-08
|
570
|
|
|
|
|
|
|
|
|
|
|
|
2019-12-09
|
366
|
|
10
|
|
|
|
|
|
|
|
|
|
2019-12-10
|
45
|
750
|
1480
|
|
|
|
|
|
|
|
|
|
2019-12-11
|
15
|
1980
|
1520
|
|
|
|
24
|
85
|
32
|
90
|
|
|
2019-12-12
|
36
|
500
|
240
|
|
|
|
156
|
350
|
184
|
426
|
|
|
2019-12-13
|
9
|
70
|
60
|
|
|
|
84
|
240
|
120
|
270
|
|
|
2019-12-14
|
15
|
70
|
60
|
|
|
|
24
|
70
|
40
|
84
|
52
|
44
|
2019-12-15
|
9
|
120
|
110
|
|
|
|
6
|
30
|
8
|
48
|
377
|
319
|
2019-12-16
|
6
|
30
|
20
|
|
|
|
12
|
40
|
16
|
48
|
91
|
77
|
2019-12-17
|
3
|
|
|
|
|
|
|
20
|
|
24
|
156
|
121
|
2019-12-18
|
|
|
20
|
|
|
|
|
|
|
|
130
|
88
|
2019-12-19
|
|
40
|
40
|
|
|
|
|
5
|
|
6
|
65
|
66
|
2019-12-20
|
|
20
|
|
|
|
|
|
|
|
|
91
|
77
|
2019-12-21
|
|
10
|
10
|
|
|
|
|
|
|
|
26
|
22
|
2019-12-22
|
|
|
|
|
|
|
|
5
|
|
6
|
39
|
33
|
2019-12-23
|
3
|
|
|
|
|
|
|
|
|
|
26
|
33
|
2019-12-24
|
15
|
|
|
|
|
|
|
|
|
|
65
|
44
|
2019-12-25
|
|
|
|
|
|
|
|
|
|
|
65
|
66
|
2019-12-26
|
|
10
|
10
|
|
|
|
|
|
|
|
13
|
11
|
2019-12-27
|
|
10
|
10
|
|
|
|
|
|
|
|
13
|
11
|
2019-12-28
|
|
|
|
|
|
|
|
|
|
|
13
|
11
|
2019-12-29
|
|
|
|
|
|
|
6
|
|
8
|
|
|
|
2019-12-30
|
|
|
|
|
|
|
|
|
|
|
26
|
22
|
2019-12-31
|
|
|
|
|
|
|
6
|
|
|
|
13
|
|
2020-01-01
|
|
|
|
|
2
|
|
|
|
|
|
|
|
2020-01-02
|
|
|
|
|
|
|
|
5
|
|
3
|
|
|
2020-01-03
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-04
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-05
|
|
|
|
|
|
|
|
|
|
|
12
|
11
|
2020-01-06
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-07
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-08
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-09
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-10
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-11
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-12
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-13
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-14
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-15
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-16
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-17
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-18
|
3
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-19
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-20
|
|
|
|
|
NA
|
|
|
|
|
|
|
|
2020-01-21
|
|
7
|
10
|
|
NA
|
|
|
|
|
|
|
|
2020-01-22
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-23
|
|
|
|
|
|
|
|
5
|
|
6
|
|
|
2020-01-24
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-25
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-26
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-27
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-28
|
|
|
|
|
|
|
6
|
|
|
|
|
|
2020-01-29
|
|
|
|
|
|
|
|
5
|
|
6
|
|
|
2020-01-30
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-01-31
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-01
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-02
|
|
|
|
|
|
|
|
|
|
|
39
|
33
|
2020-02-03
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-04
|
|
|
|
|
|
|
|
|
|
|
13
|
11
|
2020-02-05
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-06
|
|
7
|
10
|
|
|
|
|
|
|
|
|
|
2020-02-07
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-08
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-09
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-10
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-11
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-12
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-13
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-14
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-15
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-16
|
|
10
|
10
|
|
|
|
|
|
|
|
|
|
2020-02-17
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-18
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-19
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-20
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-21
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-22
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-23
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-24
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-25
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-26
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-27
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-28
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-29
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-01
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-02
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-03
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-04
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-05
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-06
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-07
|
|
|
|
|
|
NA
|
|
|
|
|
|
|
2020-03-08
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-09
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-10
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-11
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-12
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-13
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-14
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-15
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-16
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-17
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-18
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-19
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-20
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-21
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-22
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-23
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-24
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-25
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-26
|
|
|
|
|
|
|
|
|
|
|
|
|
rm(list = ls())
cleanup(ask = F)