try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
library(knitr)
library(kableExtra)
library(lubridate)
library(data.table)
library(ggplot2)
library(RMark)
library(scales)
library(viridis)
library(forcats)
library(reshape2)
library(png)
library(dataRetrieval)
library(rerddap)
library(plotly)
##################################################################################################################
#### ASSIGN STUDY ID IN THIS NEXT LINE OF CODE ####
study <- "BC_Jumpstart_2022"
##################################################################################################################
detects_study <- fread("study_detections_archive.csv", stringsAsFactors = F, colClasses = c(DateTime_PST = "character", RelDT = "character"))
detects_study <- as.data.frame(detects_study[detects_study$Study_ID == study,])
detects_study$DateTime_PST <- as.POSIXct(detects_study$DateTime_PST, format = "%Y-%m-%d %H:%M:%S", tz="Etc/GMT+8")
detects_study$release_time <- as.POSIXct(detects_study$RelDT, format = "%Y-%m-%d %H:%M:%S", tz="Etc/GMT+8")
colnames(detects_study)[which(colnames(detects_study) == "Weight")] <- "weight"
colnames(detects_study)[which(colnames(detects_study) == "Length")] <- "length"
colnames(detects_study)[which(colnames(detects_study) == "Rel_rkm")] <- "release_rkm"
colnames(detects_study)[which(colnames(detects_study) == "Rel_loc")] <- "release_location"
colnames(detects_study)[which(colnames(detects_study) == "rkm")] <- "river_km"
latest <- read.csv("latest_download.csv", stringsAsFactors = F)$x
##################################################################################################################
#### TO RUN THE FOLLOWING CODE CHUNKS FROM HERE ON DOWN USING R ERDDAP, UN-COMMENT THESE NEXT 9 LINES OF CODE ####
##################################################################################################################
# cache_delete_all()
# query=paste('&',"Study_ID",'="',study,'"',sep = '')
# datafile=URLencode(paste("https://oceanview.pfeg.noaa.gov/erddap/tabledap/","FEDcalFishTrack",".csv?",query,sep = ''))
# options(url.method = "libcurl", download.file.method = "libcurl", timeout = 180)
# detects_study <- data.frame(read.csv(datafile,row.names = NULL, stringsAsFactors = F))
# detects_study <- detects_study[-1,]
# detects_study$DateTime_PST <- as.POSIXct(detects_study$local_time, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")
# detects_study$release_time <- as.POSIXct(detects_study$release_time, format = "%Y-%m-%d %H:%M:%S", "Etc/GMT+8")
# detects_study$river_km <- as.numeric(detects_study$river_km)
##################################################################################################################
if (nrow(detects_study) == 0){
cat("Study has not yet begun\n ")
}else{
if (min(detects_study$release_time) > Sys.time()){
cat("Study has not yet begun, below data is a placeholder:\n ")
}
if (min(detects_study$release_time) < Sys.time()){
cat(paste("Study began on ", min(detects_study$release_time), ", see tagging details below:", sep = ""))
}
########################################################################
#### ASSIGN RELEASE GROUPS HERE ####
#######################################################################
detects_study$Release <- "Release 1"
detects_study[detects_study$release_time > as.POSIXct("2022-03-16 12:00:00"), "Release"] <- "Release 2"
detects_study[detects_study$release_time > as.POSIXct("2022-03-16 13:30:00"), "Release"] <- "Release 3"
#######################################################################
study_tagcodes <- unique(detects_study[,c("TagCode", "release_time", "weight", "length", "release_rkm", "release_location", "Release")])
release_stats <- aggregate(list(First_release_time = study_tagcodes$release_time),
by= list(Release = study_tagcodes$Release),
FUN = min)
release_stats <- merge(release_stats,
aggregate(list(Last_release_time = study_tagcodes$release_time),
by= list(Release = study_tagcodes$Release),
FUN = max),
by = c("Release"))
release_stats <- merge(release_stats, aggregate(list(Number_fish_released =
study_tagcodes$TagCode),
by= list(Release = study_tagcodes$Release),
FUN = function(x) {length(unique(x))}),
by = c("Release"))
release_stats <- merge(release_stats,
aggregate(list(Release_location = study_tagcodes$release_location),
by= list(Release = study_tagcodes$Release),
FUN = function(x) {head(x,1)}),
by = c("Release"))
release_stats <- merge(release_stats,
aggregate(list(Release_rkm = study_tagcodes$release_rkm),
by= list(Release = study_tagcodes$Release),
FUN = function(x) {head(x,1)}),
by = c("Release"))
release_stats <- merge(release_stats,
aggregate(list(Mean_length = as.numeric(study_tagcodes$length)),
by= list(Release = study_tagcodes$Release),
FUN = mean, na.rm = T),
by = c("Release"))
release_stats <- merge(release_stats,
aggregate(list(Mean_weight = as.numeric(study_tagcodes$weight)),
by= list(Release = study_tagcodes$Release),
FUN = mean, na.rm = T),
by = c("Release"))
release_stats[,c("Mean_length", "Mean_weight")] <- round(release_stats[,c("Mean_length", "Mean_weight")],1)
release_stats$First_release_time <- format(release_stats$First_release_time, tz = "Etc/GMT+8")
release_stats$Last_release_time <- format(release_stats$Last_release_time, tz = "Etc/GMT+8")
release_stats <- release_stats[order(release_stats$First_release_time),]
kable(release_stats, format = "html", row.names = F) %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left")
}
Study began on 2022-03-16 09:15:00, see tagging details below:
Release | First_release_time | Last_release_time | Number_fish_released | Release_location | Release_rkm | Mean_length | Mean_weight |
---|---|---|---|---|---|---|---|
Release 1 | 2022-03-16 09:15:00 | 2022-03-16 09:15:00 | 312 | NFBC_BelowECD_rel | 540.26 | 96.9 | 9.9 |
Release 2 | 2022-03-16 12:08:00 | 2022-03-16 12:45:00 | 888 | NFBC_BelowECD_rel | 540.26 | 95.5 | 9.8 |
Study is complete, all tags are no longer active as of 2022-06-30. All times in Pacific Standard Time.
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
## THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
"No detections yet"
} else {
arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F)
beacon_by_day$day <- as.Date(beacon_by_day$day)
gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
arrivals$day <- as.Date(format(arrivals$DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8"))
arrivals_per_day <- aggregate(list(New_arrivals = arrivals$TagCode), by = list(day = arrivals$day, general_location = arrivals$general_location), length)
arrivals_per_day$day <- as.Date(arrivals_per_day$day)
## Now subset to only look at data for the correct beacon for that day
beacon_by_day <- as.data.frame(beacon_by_day[which(beacon_by_day$TagCode == beacon_by_day$beacon),])
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
## Now only keep beacon by day for days since fish were released
beacon_by_day <- beacon_by_day[beacon_by_day$day >= as.Date(min(study_tagcodes$release_time)) & beacon_by_day$day <= endtime,]
beacon_by_day <- merge(beacon_by_day, gen_locs[,c("location", "general_location","rkm")], by = "location", all.x = T)
arrivals_per_day <- merge(unique(beacon_by_day[,c("general_location", "day", "rkm")]), arrivals_per_day, all.x = T, by = c("general_location", "day"))
arrivals_per_day$day <- factor(arrivals_per_day$day)
## Remove bench test and other NA locations
arrivals_per_day <- arrivals_per_day[!arrivals_per_day$general_location == "Bench_test",]
arrivals_per_day <- arrivals_per_day[is.na(arrivals_per_day$general_location) == F,]
## Remove sites that were not operation the whole time
#### FOR THE SEASONAL SURVIVAL PAGE, KEEP ALL SITES SINCE PEOPLE WANT TO SEE DETECTIONS OF LATER FISH AT NEWLY DEPLOYED SPOTS##
gen_locs_days_in_oper <- aggregate(list(days_in_oper = arrivals_per_day$day), by = list(general_location = arrivals_per_day$general_location), FUN = length)
#gen_locs_days_in_oper <- gen_locs_days_in_oper[gen_locs_days_in_oper$days_in_oper == max(gen_locs_days_in_oper$days_in_oper),]
arrivals_per_day_in_oper <- arrivals_per_day[arrivals_per_day$general_location %in% gen_locs_days_in_oper$general_location,]
fish_per_site <- aggregate(list(fish_count = arrivals_per_day_in_oper$New_arrivals), by = list(general_location = arrivals_per_day_in_oper$general_location), FUN = sum, na.rm = T)
active_gen_locs <- gen_locs[is.na(gen_locs$stop),]
active_gen_locs <- active_gen_locs[active_gen_locs$general_location %in% fish_per_site$general_location,]
## estimate mean lat and lons for each genloc
gen_locs_mean_coords <- aggregate(list(latitude = active_gen_locs$latitude), by = list(general_location = active_gen_locs$general_location), FUN = mean)
gen_locs_mean_coords <- merge(gen_locs_mean_coords, aggregate(list(longitude = active_gen_locs$longitude), by = list(general_location = active_gen_locs$general_location), FUN = mean))
fish_per_site <- merge(fish_per_site, gen_locs_mean_coords)
library(leaflet)
library(maps)
library(htmlwidgets)
library(leaflet.extras)
icons <- awesomeIcons(iconColor = "lightblue",
#library = "ion",
text = fish_per_site$fish_count)
leaflet(data = fish_per_site) %>%
# setView(-72.14600, 43.82977, zoom = 8) %>%
addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
# Marker data are from the sites data frame. We need the ~ symbols
# to indicate the columns of the data frame.
addMarkers(~longitude, ~latitude, label = ~fish_count, group = "Receiver Sites", popup = ~general_location, labelOptions = labelOptions(noHide = T, textsize = "15px")) %>%
#addAwesomeMarkers(~longitude, ~latitude, icon = icons, labelOptions(textsize = "15px")) %>%
addScaleBar(position = "bottomleft") %>%
addLayersControl(
baseGroups = c("Street Map", "Satellite", "Relief"),
options = layersControlOptions(collapsed = FALSE))
}
2.1 Map of unique fish detections at operational realtime detection locations
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_saltcrk <- detects_study[detects_study$general_location == "Blw_Salt_RT",]
if (nrow(detects_saltcrk) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_saltcrk <- merge(detects_saltcrk,aggregate(list(first_detect = detects_saltcrk$DateTime_PST), by = list(TagCode= detects_saltcrk$TagCode), FUN = min))
detects_saltcrk$Day <- as.Date(detects_saltcrk$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_saltcrk$release_time), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
#endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_butte$release_time)+60), max(as.Date(detects_butte$release_time)+(as.numeric(detects_butte$tag_life)*1.5)))
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
#BTC_flow <- cdec_query("BTC", "20", "H", starttime, endtime+1)
## download bend bridge flow data
BTC_flow <- readNWISuv(siteNumbers = "11377100", parameterCd="00060", startDate = starttime, endDate = endtime+1)
BTC_flow$datetime <- as.Date(format(BTC_flow$dateTime, "%Y-%m-%d"))
BTC_flow_day <- aggregate(list(parameter_value = BTC_flow$X_00060_00000),
by = list(Day = BTC_flow$datetime),
FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
#rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_butte$StudyID), "Release"])
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_saltcrk$Release))])
tagcount <- aggregate(list(unique_tags = detects_saltcrk$TagCode), by = list(Day = detects_saltcrk$Day, Release = detects_saltcrk$Release), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange1 <- daterange1[,order(colnames(daterange1))]
daterange2 <- merge(daterange1, BTC_flow_day, by = "Day", all.x = T)
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
# barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
# barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=brewer.pal(n = rel_num, name = "Set1"),
# xlab = "", ylab = "Number of fish arrivals per day",
# ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
# las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt ="n", border = NA)#,
# #border=NA
# #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
# #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
# legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
# ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
# xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
# barpmeans <- colMeans(barp)
# axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
# axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
#
# par(new=T)
#
# plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "lightslateblue", type = "l", lwd=1.5, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
# axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
# mtext("Flow (cfs) at Bend Bridge", side=4, line=3, cex=1.5, col="lightslateblue")
daterange2$Date <- as.Date(row.names(daterange2))
daterange2_flow <- daterange2[,c("Date", "parameter_value")]
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))], id.vars = "Date", variable.name = ".")
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Bend Bridge",
automargin = TRUE
)
# p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
# geom_bar(stat='identity') +
# ylab("Number of fish arrivals per day") +
# #xlim(c(as.Date("2021-02-01"), as.Date("2021-02-05"))) +
# #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
# #scale_x_date(date_breaks = "5 days") +
# #scale_y_continuous(name = "Number of fish arrivals per day",
# # Add a second axis and specify its features
# # sec.axis = sec_axis(~.*500, name="Second Axis")) +
# theme_bw() +
# theme(panel.border = element_rect(colour = "black", fill=NA))
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50)
)
}
2.2 Detections at Salt Creek versus Sacramento River flows at Bend Bridge for duration of tag life
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_butte <- detects_study[detects_study$general_location == "ButteBrRT",]
if (nrow(detects_butte) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_butte <- merge(detects_butte,aggregate(list(first_detect = detects_butte$DateTime_PST), by = list(TagCode= detects_butte$TagCode), FUN = min))
detects_butte$Day <- as.Date(detects_butte$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_butte$release_time), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
#endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_butte$release_time)+60), max(as.Date(detects_butte$release_time)+(as.numeric(detects_butte$tag_life)*1.5)))
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
#BTC_flow <- cdec_query("BTC", "20", "H", starttime, endtime+1)
## download bend bridge flow data
BTC_flow <- readNWISuv(siteNumbers = "11377100", parameterCd="00060", startDate = starttime, endDate = endtime+1)
BTC_flow$datetime <- as.Date(format(BTC_flow$dateTime, "%Y-%m-%d"))
BTC_flow_day <- aggregate(list(parameter_value = BTC_flow$X_00060_00000),
by = list(Day = BTC_flow$datetime),
FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
#rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_butte$StudyID), "Release"])
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_butte$Release))])
tagcount <- aggregate(list(unique_tags = detects_butte$TagCode), by = list(Day = detects_butte$Day, Release = detects_butte$Release), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange1 <- daterange1[,order(colnames(daterange1))]
daterange2 <- merge(daterange1, BTC_flow_day, by = "Day", all.x = T)
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
# barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
# barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=brewer.pal(n = rel_num, name = "Set1"),
# xlab = "", ylab = "Number of fish arrivals per day",
# ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
# las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt ="n", border = NA)#,
# #border=NA
# #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
# #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
# legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
# ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
# xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
# barpmeans <- colMeans(barp)
# axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
# axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
#
# par(new=T)
#
# plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "lightslateblue", type = "l", lwd=1.5, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
# axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
# mtext("Flow (cfs) at Bend Bridge", side=4, line=3, cex=1.5, col="lightslateblue")
daterange2$Date <- as.Date(row.names(daterange2))
daterange2_flow <- daterange2[,c("Date", "parameter_value")]
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))], id.vars = "Date", variable.name = ".")
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Bend Bridge",
automargin = TRUE
)
# p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
# geom_bar(stat='identity') +
# ylab("Number of fish arrivals per day") +
# #xlim(c(as.Date("2021-02-01"), as.Date("2021-02-05"))) +
# #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
# #scale_x_date(date_breaks = "5 days") +
# #scale_y_continuous(name = "Number of fish arrivals per day",
# # Add a second axis and specify its features
# # sec.axis = sec_axis(~.*500, name="Second Axis")) +
# theme_bw() +
# theme(panel.border = element_rect(colour = "black", fill=NA))
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50)
)
}
2.3 Detections at Butte City Bridge versus Sacramento River flows at Bend Bridge for duration of tag life
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_tower <- detects_study[detects_study$general_location == "TowerBridge",]
#wlk_flow <- read.csv("wlk.csv")
if (nrow(detects_tower) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_tower <- merge(detects_tower,aggregate(list(first_detect = detects_tower$DateTime_PST), by = list(TagCode= detects_tower$TagCode), FUN = min))
detects_tower$Day <- as.Date(detects_tower$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_tower$release_time), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
## download wilkins slough flow data
wlk_flow <- readNWISuv(siteNumbers = "11390500", parameterCd="00060", startDate = starttime, endDate = endtime+1)
wlk_flow$datetime <- as.Date(format(wlk_flow$dateTime, "%Y-%m-%d"))
wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$X_00060_00000),
by = list(Day = wlk_flow$datetime),
FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
#rels <- unique(study_tagcodes[study_tagcodes$StudyID == unique(detects_tower$StudyID), "Release"])
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_tower$Release))])
tagcount <- aggregate(list(unique_tags = detects_tower$TagCode), by = list(Day = detects_tower$Day, Release = detects_tower$Release ), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange1 <- daterange1[,order(colnames(daterange1))]
daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
# barp <- barplot(t(daterange2[,1:ncol(daterange2)-1]), plot = FALSE, beside = T)
# barplot(t(daterange2[,1:ncol(daterange2)-1]), beside = T, col=brewer.pal(n = rel_num, name = "Set1"),
# xlab = "", ylab = "Number of fish arrivals per day",
# ylim = c(0,max(daterange2[,1:ncol(daterange2)-1], na.rm = T)*1.2),
# las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt ="n", border = NA)#,
# #border=NA
# #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
# #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
# legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)-1], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
# ybreaks <- if(max(daterange2[,1:ncol(daterange2)-1], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)-1], na.rm = T)} else {5}
# xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
# barpmeans <- colMeans(barp)
# axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2[xbreaks,]), las = 2)
# axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)-1], na.rm = T), ybreaks))
#
# par(new=T)
#
# plot(x = barpmeans, daterange2$parameter_value, yaxt = "n", xaxt = "n", ylab = "", xlab = "", col = "lightslateblue", type = "l", lwd=1.5, xlim=c(0,max(barp)+1), ylim = c(min(daterange2$parameter_value, na.rm = T), max(daterange2$parameter_value, na.rm=T)*1.1))#, ylab = "Returning adults", xlab= "Outmigration year", yaxt="n", col="red", pch=20)
# axis(side = 4)#, labels = c(2000:2016), at = c(2000:2016))
# mtext("Flow (cfs) at Wilkins Slough", side=4, line=3, cex=1.5, col="lightslateblue")
daterange2$Date <- as.Date(row.names(daterange2))
daterange2_flow <- daterange2[,c("Date", "parameter_value")]
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))], id.vars = "Date", variable.name = ".")
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Wilkins Slough",
automargin = TRUE
)
# p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
# geom_bar(stat='identity') +
# ylab("Number of fish arrivals per day") +
# #xlim(c(as.Date("2021-02-01"), as.Date("2021-02-05"))) +
# #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
# #scale_x_date(date_breaks = "5 days") +
# #scale_y_continuous(name = "Number of fish arrivals per day",
# # Add a second axis and specify its features
# # sec.axis = sec_axis(~.*500, name="Second Axis")) +
# theme_bw() +
# theme(panel.border = element_rect(colour = "black", fill=NA))
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50)
)
}
2.4 Detections at Tower Bridge (downtown Sacramento) versus Sacramento River flows at Wilkins Slough for duration of tag life
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
if (nrow(detects_benicia)>0) {
detects_benicia <- merge(detects_benicia,aggregate(list(first_detect = detects_benicia$DateTime_PST), by = list(TagCode= detects_benicia$TagCode), FUN = min))
detects_benicia$Day <- as.Date(detects_benicia$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_benicia$release_time), "Etc/GMT+8")
## Endtime should be either now or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
#wlk_flow <- cdec_query("COL", "20", "H", starttime, endtime+1)
#wlk_flow$datetime <- as.Date(wlk_flow$datetime)
#wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$parameter_value), by = list(Day = wlk_flow$datetime), FUN = mean, na.rm = T)
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_benicia$Release))])
tagcount <- aggregate(list(unique_tags = detects_benicia$TagCode), by = list(Day = detects_benicia$Day, Release = detects_benicia$Release ), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Day ~ Release)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == 'x'] <- paste(i)
}
}
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange1 <- daterange1[,order(colnames(daterange1))]
#daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
daterange2 <- daterange1
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
# barp <- barplot(t(daterange2[,1:ncol(daterange2)]), plot = FALSE, beside = T)
# barplot(t(daterange2[,1:ncol(daterange2)]), beside = T, col=brewer.pal(n = rel_num, name = "Dark2"),
# xlab = "", ylab = "Number of fish arrivals per day",
# ylim = c(0,max(daterange2[,1:ncol(daterange2)], na.rm = T)*1.2),
# las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n", border = NA)#,
# #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
# #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
# legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
# ybreaks <- if(max(daterange2[,1:ncol(daterange2)], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)], na.rm = T)} else {5}
# xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
# barpmeans <- colMeans(barp)
# axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2)[xbreaks], las = 2)
# axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)], na.rm = T), ybreaks))
# box()
daterange2$Date <- as.Date(row.names(daterange2))
daterange3 <- melt(daterange2, id.vars = "Date", variable.name = ".", )
# p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
# geom_bar(stat='identity') +
# ylab("Number of fish arrivals per day") +
# #xlim(range(daterange$Day)) +
# #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
# #scale_x_date(date_breaks = "5 days") +
# #scale_y_continuous(name = "Number of fish arrivals per day",
# # Add a second axis and specify its features
# # sec.axis = sec_axis(~.*500, name="Second Axis")) +
# theme_bw() +
# theme(panel.border = element_rect(colour = "black", fill=NA))
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
#add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
layout(showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50)
)
}else{
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}
2.5 Detections at Benicia Bridge for duration of tag life
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_omr_cor <- detects_study[detects_study$general_location %in% c("Old_River_Quimby", "Holland_Cut_Quimby", "Old River", "MiddleRiver", "Clifton_Court_US_Radial_Gates", "SWP_radial_gates_DS", "SWP_radial_gates_US", "CVP_Trash_Rack_1", "CVP_Tank", "SWP_intake", "Clifton_Court_Intake_Canal"),]
## DUMMY DATA TO TEST PLOT
# detects_omr_cor <- detects_study[1:3,] %>% mutate(general_location = "Old_River_Quimby")
# detects_omr_cor <- rbind(detects_omr_cor, detects_study[212:214,] %>% mutate(general_location = "Old_River_Quimby"))
# detects_omr_cor <- rbind(detects_omr_cor, detects_study[256:258,] %>% mutate(general_location = "Old River"))
# detects_omr_cor <- rbind(detects_omr_cor, detects_study[386:387,] %>% mutate(general_location = "SWP_radial_gates_DS"))
# detects_omr_cor <- rbind(detects_omr_cor, detects_study[941:943,] %>% mutate(general_location = "Old River"))
# detects_omr_cor <- rbind(detects_omr_cor, detects_study[36621:36623,] %>% mutate(general_location = "CVP_Tank"))
if (nrow(detects_omr_cor)>0) {
# Save the first detection
detects_omr_cor <- merge(detects_omr_cor,aggregate(list(first_detect = detects_omr_cor$DateTime_PST), by = list(TagCode= detects_omr_cor$TagCode), FUN = min))
# Convert first detection to day
detects_omr_cor$Day <- as.Date(detects_omr_cor$first_detect, "Etc/GMT+8")
starttime <- as.Date(min(detects_omr_cor$release_time), "Etc/GMT+8")
## Endtime should be either now or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
#wlk_flow <- cdec_query("COL", "20", "H", starttime, endtime+1)
#wlk_flow$datetime <- as.Date(wlk_flow$datetime)
#wlk_flow_day <- aggregate(list(parameter_value = wlk_flow$parameter_value), by = list(Day = wlk_flow$datetime), FUN = mean, na.rm = T)
daterange <- data.frame(Date = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_omr_cor$Release))])
tagcount <- aggregate(list(unique_tags = detects_omr_cor$TagCode), by = list(Date = detects_omr_cor$Day, Release = detects_omr_cor$Release, Location = detects_omr_cor$general_location), FUN = function(x){length(unique(x))})
tagcount1 <- reshape2::dcast(tagcount, Date ~ Location)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
# if(length(rels_no_detects)>0){
# for(i in rels_no_detects){
# daterange1 <- cbind(daterange1, x=NA)
# names(daterange1)[names(daterange1) == 'x'] <- paste(i)
# }
# }
## reorder columns in alphabetical order so its coloring in barplots is consistent
# daterange1 <- daterange1[,order(colnames(daterange1))]
#daterange2 <- merge(daterange1, wlk_flow_day, by = "Day", all.x = T)
daterange2 <- daterange1
rownames(daterange2) <- daterange2$Date
par(mar=c(6, 5, 2, 5) + 0.1)
# barp <- barplot(t(daterange2[,1:ncol(daterange2)]), plot = FALSE, beside = T)
# barplot(t(daterange2[,1:ncol(daterange2)]), beside = T, col=brewer.pal(n = rel_num, name = "Dark2"),
# xlab = "", ylab = "Number of fish arrivals per day",
# ylim = c(0,max(daterange2[,1:ncol(daterange2)], na.rm = T)*1.2),
# las = 2, xlim=c(0,max(barp)+1), cex.lab = 1.5, yaxt = "n", xaxt = "n", border = NA)#,
# #legend.text = colnames(daterange2[,1:ncol(daterange2)-1]),
# #args.legend = list(x ='topright', bty='n', inset=c(-0.2,0)), title = "Release Group")
# legend(x ='topleft', legend = colnames(daterange2)[1:ncol(daterange2)], fill= brewer.pal(n = rel_num, name = "Set1"), horiz = T, title = "Release")
# ybreaks <- if(max(daterange2[,1:ncol(daterange2)], na.rm = T) < 4) {max(daterange2[,1:ncol(daterange2)], na.rm = T)} else {5}
# xbreaks <- if(ncol(barp) > 10) {seq(1, ncol(barp), 2)} else {1:ncol(barp)}
# barpmeans <- colMeans(barp)
# axis(1, at = barpmeans[xbreaks], labels = rownames(daterange2)[xbreaks], las = 2)
# axis(2, at = pretty(0:max(daterange2[,1:ncol(daterange2)], na.rm = T), ybreaks))
# box()
# daterange2$Date <- as.Date(row.names(daterange2))
daterange3 <- melt(daterange2, id.vars = "Date", variable.name = ".", )
# p <- ggplot(data = daterange3, aes(x = Date, y = value, color = ., fill = .)) +
# geom_bar(stat='identity') +
# ylab("Number of fish arrivals per day") +
# #xlim(range(daterange$Day)) +
# #geom_line(data= daterange2_flow, aes(x = Date, y = parameter_value/500), color = alpha("#947FFF", alpha = 0.5))+
# #scale_x_date(date_breaks = "5 days") +
# #scale_y_continuous(name = "Number of fish arrivals per day",
# # Add a second axis and specify its features
# # sec.axis = sec_axis(~.*500, name="Second Axis")) +
# theme_bw() +
# theme(panel.border = element_rect(colour = "black", fill=NA))
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
#add_lines(x=~daterange2_flow$Date, y=~daterange2_flow$parameter_value, line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE, inherit=FALSE) %>%
layout(showlegend = T,
barmode = "dodge",
xaxis = list(title = "Date", mirror=T,ticks='outside',showline=T), yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks='outside',showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50)
)
}else{
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}
2.6 (BETA Test) Detections in the OMR corridor for duration of tag life
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_tower <- detects_study[detects_study$general_location == "TowerBridge",]
if (nrow(detects_tower) == 0){
WR.surv <- data.frame("Release"=NA, "Survival (%)"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA, "Detection efficiency (%)"=NA)
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
study_count <- nrow(study_tagcodes)
## Only do survival to Sac for now
test <- detects_study[which(detects_study$river_km > 168 & detects_study$river_km < 175),]
## Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(test, TagCode ~ river_km, fun.aggregate = length))
## Sort columns by river km in descending order
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
if(gen_loc_sites <2){
WR.surv <- data.frame("Release"=NA, "Survival (%)"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA, "Detection efficiency (%)"=NA)
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}else{
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
test$Release <- factor(test$Release, levels = groups)
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
if(length(groups) > 1){
## make sure factor levels have a release that has detections first. if first release in factor order has zero detections, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = factor(inp$Release, levels = names(sort(table(test$Release),decreasing = T))), stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", names(sort(table(test$Release),decreasing = T))), WR.surv)
}
if(length(unique(inp[,groups])) < 2){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ", 1,sep = "")
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp.inp")
WR.process <- process.data(WRinp, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", groups), WR.surv)
}
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}
}
Release | Survival (%) | SE | 95% lower C.I. | 95% upper C.I. | Detection efficiency (%) |
---|---|---|---|---|---|
ALL | 1.1 | 0.0 | 1.1 | 1.1 | 100 |
Release 2 | 1.1 | 0.4 | 0.6 | 2.1 | NA |
Release 1 | 1.0 | 0.6 | 0.3 | 2.9 | NA |
## Once Georgiana Sl receivers are back online, remove "eval = F" from header
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
route_results_possible <- F
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
results_short <- data.frame("Measure"=NA, "Estimate"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
## Only do survival to Georg split for now
test2 <- detects_study[detects_study$general_location %in% c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
## We can only do multistate model if there is at least one detection in each route
if(nrow(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2"),]) == 0 |
nrow(test2[test2$general_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"),]) == 0){
results_short <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}else{
## Make tagcode character
study_tagcodes$TagCode <- as.character(study_tagcodes$TagCode)
## Make a crosstab query with frequencies for all tag/location combination
test2$general_location <- factor(test2$general_location, levels = c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"))
test2$TagCode <- factor(test2$TagCode, levels = study_tagcodes$TagCode)
mytable <- table(test2$TagCode, test2$general_location) # A will be rows, B will be columns
## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
mytable[mytable>0] <- "A"
## Order in order of rkm
mytable2 <- mytable[, c("ButteBrRT","TowerBridge", "I80-50_Br", "Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2")]
## Now sort the crosstab rows alphabetically
mytable2 <- mytable2[order(row.names(mytable2)),]
mytable2[which(mytable2[, "Sac_BlwGeorgiana"]=="A"), "Sac_BlwGeorgiana"] <- "A"
mytable2[which(mytable2[, "Sac_BlwGeorgiana2"]=="A"), "Sac_BlwGeorgiana2"] <- "A"
mytable2[which(mytable2[, "Georgiana_Slough1"]=="A"), "Georgiana_Slough1"] <- "B"
mytable2[which(mytable2[, "Georgiana_Slough2"]=="A"), "Georgiana_Slough2"] <- "B"
## Make a crosstab query with frequencies for all weekly Release groups
#test2$Release <- factor(test2$Release)
#mytable3 <- table(test2$TagCode, test2$Release) # A will be rows, B will be columns
## Change all frequencies bigger than 1 to 1. Here you could change your minimum cutoff to 2 detections, and then make another command that changes all detections=1 to 0
#mytable3[mytable3>0] <- 1
## Order in order of rkm
#mytable4 <- mytable3[, order(colnames(mytable3))]
## Now sort the crosstab rows alphabetically
#mytable4 <- mytable4[order(row.names(mytable4)),]
## Now order the study_tagcodes table the same way
study_tagcodes <- study_tagcodes[order(study_tagcodes$TagCode),]
## Paste together (concatenate) the data from each column of the crosstab into one string per row, add to tagging_meta.
## For this step, make sure both are sorted by FishID
study_tagcodes$inp_part1 <- apply(mytable2[,1:3],1,paste,collapse="")
study_tagcodes$inp_partA <- apply(mytable2[,4:5],1,paste,collapse="")
study_tagcodes$inp_partB <- apply(mytable2[,6:7],1,paste,collapse="")
#study_tagcodes$inp_group <- apply(mytable4,1,paste,collapse=" ")
## We need to have a way of picking which route to assign to a fish if it was detected by both georg and blw-georg recvs
## We will say that the last detection at that junction is what determines the route it took
## find last detection at each genloc
departure <- aggregate(list(depart = test2$DateTime_PST), by = list(TagCode = test2$TagCode, last_location = test2$general_location), FUN = max)
## subset for just juncture locations
departure <- departure[departure$last_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2", "Georgiana_Slough1", "Georgiana_Slough2"),]
## Find genloc of last known detection per tag
last_depart <- aggregate(list(depart = departure$depart), by = list(TagCode = departure$TagCode), FUN = max)
last_depart1 <- merge(last_depart, departure)
study_tagcodes <- merge(study_tagcodes, last_depart1[,c("TagCode", "last_location")], by = "TagCode", all.x = T)
## Assume that the Sac is default pathway, and for fish that were detected in neither route, it would get a "00" in inp so doesn't matter anyway
study_tagcodes$inp_final <- paste("A",study_tagcodes$inp_part1, study_tagcodes$inp_partA," 1 ;", sep = "")
## now put in exceptions...fish that were seen in georgiana last
study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_final"] <- paste("A",study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_part1"], study_tagcodes[study_tagcodes$last_location %in% c("Georgiana_Slough1", "Georgiana_Slough2"), "inp_partB"]," 1 ;", sep = "")
## At this point, some fish might not have been deemed to ever take a route based on last visit analysis. If so, model can't be run
if(any(grepl(pattern = "A", study_tagcodes$inp_final)==T) & any(grepl(pattern = "B", study_tagcodes$inp_final)==T)){
write.table(study_tagcodes$inp_final,"WRinp_multistate.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp_multistate.inp")
dp <- process.data(WRinp, model="Multistrata")
ddl <- make.design.data(dp)
#### p ####
# Can't be seen at 2B or 3B or 4B (butte, tower or I80)
ddl$p$fix=NA
ddl$p$fix[ddl$p$stratum == "B" & ddl$p$time %in% c(2,3,4)]=0
#### Psi ####
# Only 1 transition allowed:
# from A to B at time interval 4 to 5
ddl$Psi$fix=0
# A to B can only happen for interval 3-4
ddl$Psi$fix[ddl$Psi$stratum=="A"&
ddl$Psi$tostratum=="B" & ddl$Psi$time==4]=NA
#### Phi a.k.a. S ####
ddl$S$fix=NA
# None in B for reaches 1,2,3,4 and fixing it to 1 for 5 (between two georg lines). All getting fixed to 1
ddl$S$fix[ddl$S$stratum=="B" & ddl$S$time %in% c(1,2,3,4,5)]=1
# For route A, fixing it to 1 for 5 (between two blw_georg lines)
ddl$S$fix[ddl$S$stratum=="A" & ddl$S$time==5]=1
## We use -1 at beginning of formula to remove intercept. This is because different routes probably shouldn't share the same intercept
p.timexstratum=list(formula=~-1+stratum:time)
Psi.stratumxtime=list(formula=~-1+stratum:time)
S.stratumxtime=list(formula=~-1+stratum:time)
## Run model a first time
S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, silent = T, output = F)
## Identify any parameter estimates at 1, which would likely have bad SE estimates.
profile.intervals <- which(S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$estimate %in% c(0,1) & !S.timexstratum.p.timexstratum.Psi.timexstratum$results$real$fixed == "Fixed")
## Rerun model using profile interval estimation for the tricky parameters
S.timexstratum.p.timexstratum.Psi.timexstratum=mark(dp,ddl, model.parameters=list(S=S.stratumxtime,p= p.timexstratum,Psi=Psi.stratumxtime), realvcv = T, profile.int = profile.intervals, silent = T, output = F)
results <- S.timexstratum.p.timexstratum.Psi.timexstratum$results$real
results_short <- results[rownames(results) %in% c("S sA g1 c1 a0 o1 t1",
"S sA g1 c1 a1 o2 t2",
"S sA g1 c1 a2 o3 t3",
"S sA g1 c1 a3 o4 t4",
"p sA g1 c1 a1 o1 t2",
"p sA g1 c1 a2 o2 t3",
"p sA g1 c1 a3 o3 t4",
"p sA g1 c1 a4 o4 t5",
"p sB g1 c1 a4 o4 t5",
"Psi sA toB g1 c1 a3 o4 t4"
),]
results_short <- round(results_short[,c("estimate", "se", "lcl", "ucl")] * 100,1)
## Now find estimate and CIs for AtoA route at junction
Psilist=get.real(S.timexstratum.p.timexstratum.Psi.timexstratum,"Psi",vcv=TRUE)
Psivalues=Psilist$estimates
routes <- TransitionMatrix(Psivalues[Psivalues$time==4 & Psivalues$cohort==1,],vcv.real=Psilist$vcv.real)
results_short$Measure <- c("Survival from release to Butte City","Survival from Butte City to TowerBridge (minimum estimate since fish may have taken Yolo Bypass)", "Survival from TowerBridge to I80-50_Br", "% arrived from I80-50_Br to Georgiana Slough confluence (not survival because fish may have taken Sutter/Steam)","Detection probability at Butte City",
"Detection probability at TowerBridge", "Detection probability at I80-50_Br", "Detection probability at Blw_Georgiana", "Detection probability at Georgiana Slough",
"Routing probability into Georgiana Slough (Conditional on fish arriving to junction)")
results_short <- results_short[,c("Measure", "estimate", "se", "lcl", "ucl")]
colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
route_results_possible <- T
} else {
results_short <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(results_short) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(results_short, row.names = F, "html", caption = "3.2 Reach-specific survival and probability of entering Georgiana Slough") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}
}
}
## Once Georgiana Sl receivers are back online, remove "eval = F" from header
##____________________________________________________________________________
## If you don't have access to local files, uncomment and run next lines of code
#download.file("https://raw.githubusercontent.com/CalFishTrack/real-time/master/data/georg.png",destfile = "georg.png", quiet = T, mode = "wb")
##________________________________________________________________________________
georg <- readPNG("georg.png")
par(mar=c(2,0,0,0))
#Set up the plot area
plot(1:2, type='n', xlab="", ylab="", xaxt = "n", yaxt = "n")
#Get the plot information so the image will fill the plot box, and draw it
lim <- par()
rasterImage(georg, lim$usr[1], lim$usr[3], lim$usr[2], lim$usr[4])
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
legend(x = 1.55,y = 1.6,legend = "No detections yet",col = "white", box.col = "light gray", bg = "light gray")
legend(x = 1.55,y = 1.45,legend = "No detections yet",col = "white", box.col = "light gray", bg = "light gray")
}else if (route_results_possible == F){
legend(x = 1.55,y = 1.6,legend = "Too few detections",col = "white", box.col = "light gray", bg = "light gray")
legend(x = 1.55,y = 1.45,legend = "Too few detections",col = "white", box.col = "light gray", bg = "light gray")
}else{
legend(x = 1.55,y = 1.6,legend = paste(round(routes$TransitionMat["A","A"],3)*100,"% (",round(routes$lcl.TransitionMat["A","A"],3)*100,"-",round(routes$ucl.TransitionMat["A","A"],3)*100,")", sep =""),col = "white", box.col = "light gray", bg = "light gray")
legend(1.55,1.45, legend = paste(round(routes$TransitionMat["A","B"],3)*100,"% (",round(routes$lcl.TransitionMat["A","B"],3)*100,"-",round(routes$ucl.TransitionMat["A","B"],3)*100,")", sep =""), box.col = "light gray", bg = "light gray")
}
mtext(text = "3.3 Routing Probabilities at Georgiana Slough Junction (with 95% C.I.s)", cex = 1.3, side = 1, line = 0.2, adj = 0)
## Once Georgiana Sl receivers are back online, remove "eval = F" from header
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
plot(1:2, type = "n",xaxt = "n", yaxt = "n", xlab = "Range of days study fish were present at Georgiana Sl Junction", ylab = "Routing probability into Georgiana Slough at the junction")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}else if (route_results_possible == F){
plot(1:2, type = "n",xaxt = "n", yaxt = "n", xlab = "Range of days study fish were present at Georgiana Sl Junction", ylab = "Routing probability into Georgiana Slough at the junction")
text(1.5,1.5, labels = "TOO FEW DETECTIONS", cex = 2)
}else{
library(repmis)
trytest <- try(source_data("https://code.usgs.gov/crrl_qfes/Enhanced_Acoustic_Telemetry_Project/raw/master/EAT_data_2021.Rdata?raw=True"))
if (inherits(trytest, "try-error")){
plot(1:2, type = "n",xaxt = "n", yaxt = "n", xlab = "Range of days study fish were present at Georgiana Sl Junction", ylab = "Routing probability into Georgiana Slough at the junction")
text(1.5,1.5, labels = "ERROR DOWNLOADING STARS", cex = 2)
}else{
## first, find min and max arrivals at georg for a study
min_georg <- as.Date(format(min(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2","Georgiana_Slough1", "Georgiana_Slough2"),"DateTime_PST"]), "%Y-%m-%d"))
max_georg <- as.Date(format(max(test2[test2$general_location %in% c("Sac_BlwGeorgiana", "Sac_BlwGeorgiana2","Georgiana_Slough1", "Georgiana_Slough2"),"DateTime_PST"]), "%Y-%m-%d"))
psi_study <- psi_GeoCond[psi_GeoCond$Date <= max_georg & psi_GeoCond$Date >=min_georg-1,]
plot(psi_study$Date, psi_study$psi_geo.50, ylim = c(0,1), xlim = c(min_georg, max_georg), type = "n", xaxt = "n", xlab = "Range of days study fish were present at Georgiana Sl Junction", ylab = "Routing probability into Georgiana Slough at the junction")
polygon(c(psi_study$Date, rev(psi_study$Date)),
c(psi_study$psi_geo.10,rev(psi_study$psi_geo.90)), density = 200, col ='grey90')
lines(psi_study$Date, psi_study$psi_geo.50, lty = 3)
points(mean(psi_study$Date), tail(results_short$Estimate,1)/100, pch = 16, cex = 1.3)
arrows(mean(psi_study$Date), tail(results_short$`95% lower C.I.`,1)/100, mean(psi_study$Date), tail(results_short$`95% upper C.I.`,1)/100, length=0.05, angle=90, code=3)
axis(side=1, at=psi_study$Date, labels=format(psi_study$Date, '%b-%d'))
legend("topright", legend = c('STARS daily predictions during study (w/ 90% CI)', 'Empirical estimate over study period (w/ 95% CI)'),
bty = "n",
col = c("black","black"),
lty = c(3,1),
fill = c("grey90", NA),
border = c(NA,NA),
pch = c(NA,16),
seg.len =0.8,
cex= 1.2
)
}
}
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
try(benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F))
detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
if (nrow(detects_benicia) == 0){
if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
WR.surv <- data.frame("Release"="ALL", "estimate"=0, "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
}else{
WR.surv <- data.frame("Release"=NA, "estimate"="NO DETECTIONS YET", "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
}
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.5 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}else if (length(table(detects_benicia$general_location)) == 1){
if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
WR.surv <- data.frame("Release"="ALL", "estimate"=round(length(unique(detects_benicia$TagCode))/length(unique(detects_study$TagCode))*100,1), "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
}else{
WR.surv <- data.frame("Release"=NA, "estimate"="NOT ENOUGH DETECTIONS", "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
}
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.5 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
## Only do survival to Benicia here
test3 <- detects_study[which(detects_study$river_km < 53),]
## Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ river_km, fun.aggregate = length))
## Sort columns by river km in descending order
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
groups_w_detects <- names(table(test3$Release))
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
if(length(groups) > 1){
## make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
if(length(groups_w_detects) > 1){
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
}else{
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
}
WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- cbind(Release = groups_w_detects, round(WR.mark.rel$results$real[seq(from=1,to=length(groups_w_detects)*2,by = 2),c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
WR.surv <- rbind(WR.surv, WR.surv.rel)
}else{
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
}
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.5 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}
Release Group | Survival (%) | SE | 95% lower C.I. | 95% upper C.I. | Detection efficiency (%) |
---|---|---|---|---|---|
ALL | 0.1 | 0.1 | 0.0 | 0.6 | 100 |
Release 1 | 0.0 | NA | NA | NA | NA |
Release 2 | 0.1 | 0.0 | 0.1 | 0.1 | NA |
if(exists("benicia")==T & is.numeric(WR.surv1[1,2])){
## Find mean release time per release group, and ALL
reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))
## Assign whether the results are tentative or final
quality <- "tentative"
if(endtime < as.Date(format(Sys.time(), "%Y-%m-%d"))) { quality <- "final"}
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
benicia$RelDT <- as.POSIXct(benicia$RelDT)
## remove old benicia record for this studyID
benicia <- benicia[!benicia$StudyID == unique(detects_study$Study_ID),]
benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F)
}
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
try(Delta <- read.csv("Delta_surv.csv", stringsAsFactors = F))
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
WR.surv1 <- data.frame("Measure"=NA, "Estimate"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(WR.surv1) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.6 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}else{
test4 <- detects_study[detects_study$general_location %in% c("TowerBridge", "I80-50_Br", "Benicia_west", "Benicia_east"),]
if(nrow(test4[test4$general_location =="Benicia_west",]) == 0 |
nrow(test4[test4$general_location =="Benicia_east",]) == 0){
WR.surv1 <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(WR.surv1) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.6 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}else{
inp <- as.data.frame(reshape2::dcast(test4, TagCode ~ general_location, fun.aggregate = length))
## add together detections at Tower and I80 to ensure good detection entering Delta
if("I80-50_Br" %in% colnames(inp) & "TowerBridge" %in% colnames(inp)){
inp$`I80-50_Br` <- inp$`I80-50_Br` + inp$TowerBridge
}else if("TowerBridge" %in% colnames(inp)){
inp$`I80-50_Br` <- inp$TowerBridge
}
## Sort columns by river km in descending order, this also removes TowerBridge, no longer needed
inp <- inp[,c("TagCode","I80-50_Br", "Benicia_east", "Benicia_west")]
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
groups_w_detects <- names(table(detects_study[which(detects_study$river_km < 53),"Release"]))
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
# if(length(groups) > 1){
# inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",apply(inp[,groups], 1, paste, collapse=" ")," ;",sep = "")
# }else{
# inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ",inp[,groups]," ;",sep = "")
# }
if(length(groups) > 1){
## make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
if(length(groups_w_detects) > 1){
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
}else{
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
}
WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- cbind(Release = groups_w_detects, round(WR.mark.rel$results$real[seq(from=2,to=length(groups_w_detects)*3,by = 3),c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
WR.surv <- rbind(WR.surv, WR.surv.rel)
}else{
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1))
}
#
#
# # write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
#
# if(length(groups) > 1){
#
# inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = factor(inp$Release, levels = names(sort(table(test$Release),decreasing = T))), stringsAsFactors = F)
#
# WRinp <- convert.inp("WRinp.inp", group.df=data.frame(rel=groups))
# WR.process <- process.data(WRinp, model="CJS", begin.time=1, groups = "rel")
#
# WR.ddl <- make.design.data(WR.process)
#
# WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
#
# WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
#
# WR.surv <- round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1)
# WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=2,to=length(groups)*3,by = 3),c("estimate", "se", "lcl", "ucl")] * 100,1))
#
# }else{
#
# WRinp <- convert.inp("WRinp.inp")
# WR.process <- process.data(WRinp, model="CJS", begin.time=1)
#
#
# WR.ddl <- make.design.data(WR.process)
#
# WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
#
# WR.surv <- round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1)
#
# }
# WR.surv <- cbind(Release = c("ALL", groups), WR.surv)
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.6 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
if(exists("Delta")==T & is.numeric(WR.surv1[1,2])){
reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = '1970-01-01')
Delta$RelDT <- as.POSIXct(Delta$RelDT)
## remove old benicia record for this studyID
Delta <- Delta[!Delta$StudyID %in% unique(detects_study$Study_ID),]
Delta <- rbind(Delta, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
write.csv(Delta, "Delta_surv.csv", row.names = F, quote = F)
}
}
}
Release Group | Survival (%) | SE | 95% lower C.I. | 95% upper C.I. |
---|---|---|---|---|
ALL | 7.7 | 7.4 | 1.1 | 39.1 |
Release 1 | 0.0 | NA | NA | NA |
Release 2 | 10.0 | 9.5 | 1.4 | 46.7 |
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
"No detections yet"
} else {
arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
tag_stats <- aggregate(list(First_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = min)
tag_stats <- merge(tag_stats,
aggregate(list(Mean_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = mean),
by = c("general_location"))
tag_stats <- merge(tag_stats,
aggregate(list(Last_arrival = arrivals$DateTime_PST),
by= list(general_location = arrivals$general_location),
FUN = max),
by = c("general_location"))
tag_stats <- merge(tag_stats,
aggregate(list(Fish_count = arrivals$TagCode),
by= list(general_location = arrivals$general_location),
FUN = function(x) {length(unique(x))}),
by = c("general_location"))
tag_stats$Percent_arrived <- round(tag_stats$Fish_count/nrow(study_tagcodes) * 100,2)
tag_stats <- merge(tag_stats, unique(detects_study[,c("general_location", "river_km")]))
tag_stats <- tag_stats[order(tag_stats$river_km, decreasing = T),]
tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
tag_stats <- tag_stats[is.na(tag_stats$First_arrival)==F,]
print(kable(tag_stats, row.names = F,
caption = "4.1 Detections for all releases combined",
"html") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
for (j in sort(unique(study_tagcodes$Release))) {
if(nrow(detects_study[detects_study$Release == j,]) > 0 ) {
temp <- detects_study[detects_study$Release == j,]
arrivals1 <- aggregate(list(DateTime_PST = temp$DateTime_PST), by = list(general_location = temp$general_location, TagCode = temp$TagCode), FUN = min)
rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])
tag_stats1 <- aggregate(list(First_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = min)
tag_stats1 <- merge(tag_stats1,
aggregate(list(Mean_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = mean),
by = c("general_location"))
tag_stats1 <- merge(tag_stats1,
aggregate(list(Last_arrival = arrivals1$DateTime_PST),
by= list(general_location = arrivals1$general_location),
FUN = max),
by = c("general_location"))
tag_stats1 <- merge(tag_stats1,
aggregate(list(Fish_count = arrivals1$TagCode),
by= list(general_location = arrivals1$general_location),
FUN = function(x) {length(unique(x))}),
by = c("general_location"))
tag_stats1$Percent_arrived <- round(tag_stats1$Fish_count/rel_count * 100,2)
tag_stats1 <- merge(tag_stats1, unique(detects_study[,c("general_location", "river_km")]))
tag_stats1 <- tag_stats1[order(tag_stats1$river_km, decreasing = T),]
tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")] <- format(tag_stats1[,c("First_arrival", "Mean_arrival", "Last_arrival")], tz = "Etc/GMT+8")
tag_stats1 <- tag_stats1[is.na(tag_stats1$First_arrival)==F,]
final_stats <- kable(tag_stats1, row.names = F,
caption = paste("4.2 Detections for",j,"release groups", sep = " "),
"html")
print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
cat("\n\n\\pagebreak\n")
print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
cat("\n\n\\pagebreak\n")
}
}
}
general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km |
---|---|---|---|---|---|---|
Blw_Salt_RT | 2022-03-18 00:16:24 | 2022-03-22 15:35:37 | 2022-05-10 09:02:12 | 457 | 38.08 | 457.000 |
Abv_Otter_Island | 2022-03-18 12:30:18 | 2022-03-24 06:15:14 | 2022-05-11 01:34:12 | 304 | 25.33 | 419.600 |
ButteBrRT | 2022-03-21 04:16:20 | 2022-03-27 13:13:54 | 2022-04-19 00:59:27 | 72 | 6.00 | 344.108 |
MeridianBr | 2022-04-14 16:25:03 | 2022-04-19 11:50:00 | 2022-04-27 03:05:02 | 15 | 1.25 | 290.848 |
TowerBridge | 2022-03-26 21:12:29 | 2022-04-06 22:18:12 | 2022-05-12 01:57:19 | 13 | 1.08 | 172.000 |
I80-50_Br | 2022-03-26 21:43:36 | 2022-04-03 12:32:35 | 2022-04-20 23:22:23 | 11 | 0.92 | 170.748 |
Sac_BlwGeorgiana | 2022-04-04 13:36:24 | 2022-04-04 13:36:24 | 2022-04-04 13:36:24 | 1 | 0.08 | 119.058 |
Sac_BlwGeorgiana2 | 2022-04-04 13:50:32 | 2022-04-04 13:50:32 | 2022-04-04 13:50:32 | 1 | 0.08 | 118.398 |
Benicia_east | 2022-04-08 13:43:43 | 2022-04-08 13:43:43 | 2022-04-08 13:43:43 | 1 | 0.08 | 52.240 |
Benicia_west | 2022-04-08 13:48:25 | 2022-04-08 13:48:25 | 2022-04-08 13:48:25 | 1 | 0.08 | 52.040 |
general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km |
---|---|---|---|---|---|---|
Blw_Salt_RT | 2022-03-18 00:16:24 | 2022-03-21 22:25:42 | 2022-04-20 05:17:01 | 137 | 43.91 | 457.000 |
Abv_Otter_Island | 2022-03-18 12:30:18 | 2022-03-23 03:56:09 | 2022-04-19 06:50:30 | 98 | 31.41 | 419.600 |
ButteBrRT | 2022-03-21 04:16:20 | 2022-03-27 09:27:46 | 2022-04-17 21:15:37 | 20 | 6.41 | 344.108 |
MeridianBr | 2022-04-18 02:20:47 | 2022-04-18 06:29:04 | 2022-04-18 10:37:22 | 2 | 0.64 | 290.848 |
TowerBridge | 2022-04-03 11:09:46 | 2022-04-05 13:22:39 | 2022-04-09 12:39:43 | 3 | 0.96 | 172.000 |
I80-50_Br | 2022-04-03 11:43:03 | 2022-04-03 14:18:22 | 2022-04-03 16:53:41 | 2 | 0.64 | 170.748 |
general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km |
---|---|---|---|---|---|---|
Blw_Salt_RT | 2022-03-18 01:27:31 | 2022-03-22 22:56:33 | 2022-05-10 09:02:12 | 320 | 36.04 | 457.000 |
Abv_Otter_Island | 2022-03-18 13:07:47 | 2022-03-24 18:46:27 | 2022-05-11 01:34:12 | 206 | 23.20 | 419.600 |
ButteBrRT | 2022-03-21 07:08:24 | 2022-03-27 14:40:53 | 2022-04-19 00:59:27 | 52 | 5.86 | 344.108 |
MeridianBr | 2022-04-14 16:25:03 | 2022-04-19 16:20:55 | 2022-04-27 03:05:02 | 13 | 1.46 | 290.848 |
TowerBridge | 2022-03-26 21:12:29 | 2022-04-07 08:10:52 | 2022-05-12 01:57:19 | 10 | 1.13 | 172.000 |
I80-50_Br | 2022-03-26 21:43:36 | 2022-04-03 12:09:04 | 2022-04-20 23:22:23 | 9 | 1.01 | 170.748 |
Sac_BlwGeorgiana | 2022-04-04 13:36:24 | 2022-04-04 13:36:24 | 2022-04-04 13:36:24 | 1 | 0.11 | 119.058 |
Sac_BlwGeorgiana2 | 2022-04-04 13:50:32 | 2022-04-04 13:50:32 | 2022-04-04 13:50:32 | 1 | 0.11 | 118.398 |
Benicia_east | 2022-04-08 13:43:43 | 2022-04-08 13:43:43 | 2022-04-08 13:43:43 | 1 | 0.11 | 52.240 |
Benicia_west | 2022-04-08 13:48:25 | 2022-04-08 13:48:25 | 2022-04-08 13:48:25 | 1 | 0.11 | 52.040 |
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
## THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
"No detections yet"
} else {
arrivals <- aggregate(list(DateTime_PST = detects_study$DateTime_PST), by = list(general_location = detects_study$general_location, TagCode = detects_study$TagCode), FUN = min)
beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F)
beacon_by_day$day <- as.Date(beacon_by_day$day)
gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
arrivals$day <- as.Date(format(arrivals$DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8"))
arrivals_per_day <- aggregate(list(New_arrivals = arrivals$TagCode), by = list(day = arrivals$day, general_location = arrivals$general_location), length)
arrivals_per_day$day <- as.Date(arrivals_per_day$day)
## Now subset to only look at data for the correct beacon for that day
beacon_by_day <- as.data.frame(beacon_by_day[which(beacon_by_day$TagCode == beacon_by_day$beacon),])
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
## Now only keep beacon by day for days since fish were released
beacon_by_day <- beacon_by_day[beacon_by_day$day >= as.Date(min(study_tagcodes$release_time)) & beacon_by_day$day <= endtime,]
beacon_by_day <- merge(beacon_by_day, gen_locs[,c("location", "general_location","rkm")], by = "location", all.x = T)
arrivals_per_day <- merge(unique(beacon_by_day[,c("general_location", "day", "rkm")]), arrivals_per_day, all.x = T, by = c("general_location", "day"))
arrivals_per_day$day <- factor(arrivals_per_day$day)
## Remove bench test and other NA locations
arrivals_per_day <- arrivals_per_day[!arrivals_per_day$general_location == "Bench_test",]
arrivals_per_day <- arrivals_per_day[is.na(arrivals_per_day$general_location) == F,]
## Change order of data to plot decreasing river_km
arrivals_per_day <- arrivals_per_day[order(arrivals_per_day$rkm, decreasing = T),]
arrivals_per_day$general_location <- factor(arrivals_per_day$general_location, unique(arrivals_per_day$general_location))
#
# ggplot(data=arrivals_per_day, aes(x=general_location, y=fct_rev(as_factor(day)))) +
# geom_tile(fill = "lightgray", color = "black") +
# geom_text(aes(label=New_arrivals)) +
# labs(x="General Location", y = "Date") +
# theme(panel.background = element_blank(), axis.text.x = element_text(angle = 90, hjust = 1))
crosstab <- xtabs(formula = arrivals_per_day$New_arrivals ~ arrivals_per_day$day + arrivals_per_day$general_location, addNA =T)
crosstab[is.na(crosstab)] <- ""
crosstab[crosstab==0] <- NA
crosstab <- as.data.frame.matrix(crosstab)
#colnames(crosstab) <- c("Butte Br", "Tower Br", "I8050 Br", "Old River", "Middle River", "CVP Tanks", "Georg Slough1", "Sac_Blw Georg1", "Georg Slough2", "Sac_Blw Georg2", "Benicia East", "Benicia West")
kable(crosstab, align = "c") %>%
kable_styling(c("striped", "condensed"), font_size = 11, full_width = F, position = "left") %>%
#row_spec(0, angle = -45) %>%
column_spec(column = 1:ncol(crosstab),width_min = "50px",border_left = T, border_right = T) %>%
column_spec(1, bold = T, width_min = "75px")%>%
scroll_box(height = "700px")
}
Blw_Salt_RT | Abv_Otter_Island | ButteBrRT | MeridianBr | TowerBridge | I80-50_Br | Old River | MiddleRiver | Clifton_Court_US_Radial_Gates | Holland_Cut_Quimby | CVP_Tank | CVP_Trash_Rack_1 | Clifton_Court_Intake_Canal | Old_River_Quimby | Sac_BlwGeorgiana | Sac_BlwGeorgiana2 | Benicia_east | Benicia_west | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2022-03-16 | NA | |||||||||||||||||
2022-03-17 | NA | |||||||||||||||||
2022-03-18 | 35 | 9 | NA | |||||||||||||||
2022-03-19 | 84 | 18 | NA | |||||||||||||||
2022-03-20 | 139 | 68 | NA | |||||||||||||||
2022-03-21 | 109 | 76 | 6 | NA | ||||||||||||||
2022-03-22 | 25 | 55 | 14 | NA | ||||||||||||||
2022-03-23 | 7 | 20 | 18 | NA | ||||||||||||||
2022-03-24 | 4 | 7 | 7 | NA | ||||||||||||||
2022-03-25 | 1 | 1 | 5 | NA | ||||||||||||||
2022-03-26 | 3 | 5 | 1 | NA | 1 | 1 | ||||||||||||
2022-03-27 | 1 | NA | 2 | 2 | ||||||||||||||
2022-03-28 | 1 | 1 | 1 | NA | 1 | 1 | ||||||||||||
2022-03-29 | 1 | 4 | 1 | NA | 1 | 1 | ||||||||||||
2022-03-30 | 3 | 1 | NA | |||||||||||||||
2022-03-31 | 1 | 1 | 1 | NA | ||||||||||||||
2022-04-01 | 6 | 4 | 4 | NA | ||||||||||||||
2022-04-02 | 3 | 2 | NA | 1 | 1 | |||||||||||||
2022-04-03 | 5 | 2 | 2 | NA | 3 | 3 | ||||||||||||
2022-04-04 | 3 | 4 | 2 | NA | 1 | 1 | ||||||||||||
2022-04-05 | 3 | 1 | 1 | NA | ||||||||||||||
2022-04-06 | 1 | 2 | 1 | NA | ||||||||||||||
2022-04-07 | 3 | 3 | NA | |||||||||||||||
2022-04-08 | 1 | NA | 1 | 1 | ||||||||||||||
2022-04-09 | 1 | 2 | NA | 1 | ||||||||||||||
2022-04-10 | 2 | NA | ||||||||||||||||
2022-04-11 | 1 | 1 | NA | |||||||||||||||
2022-04-12 | 2 | 1 | 2 | NA | ||||||||||||||
2022-04-13 | NA | |||||||||||||||||
2022-04-14 | 1 | 1 | 1 | |||||||||||||||
2022-04-15 | 3 | 2 | ||||||||||||||||
2022-04-16 | 1 | 5 | 1 | |||||||||||||||
2022-04-17 | 2 | 2 | 2 | 2 | ||||||||||||||
2022-04-18 | 3 | 3 | 4 | |||||||||||||||
2022-04-19 | 1 | 1 | 1 | 2 | 1 | 1 | ||||||||||||
2022-04-20 | 1 | 1 | 1 | 1 | ||||||||||||||
2022-04-21 | 1 | |||||||||||||||||
2022-04-22 | ||||||||||||||||||
2022-04-23 | 1 | 1 | ||||||||||||||||
2022-04-24 | ||||||||||||||||||
2022-04-25 | ||||||||||||||||||
2022-04-26 | 1 | |||||||||||||||||
2022-04-27 | 1 | |||||||||||||||||
2022-04-28 | ||||||||||||||||||
2022-04-29 | ||||||||||||||||||
2022-04-30 | 1 | |||||||||||||||||
2022-05-01 | ||||||||||||||||||
2022-05-02 | ||||||||||||||||||
2022-05-03 | ||||||||||||||||||
2022-05-04 | ||||||||||||||||||
2022-05-05 | ||||||||||||||||||
2022-05-06 | 1 | |||||||||||||||||
2022-05-07 | ||||||||||||||||||
2022-05-08 | ||||||||||||||||||
2022-05-09 | ||||||||||||||||||
2022-05-10 | 1 | |||||||||||||||||
2022-05-11 | 1 | |||||||||||||||||
2022-05-12 | 1 | |||||||||||||||||
2022-05-13 | ||||||||||||||||||
2022-05-14 | ||||||||||||||||||
2022-05-15 | ||||||||||||||||||
2022-05-16 | ||||||||||||||||||
2022-05-17 | ||||||||||||||||||
2022-05-18 | ||||||||||||||||||
2022-05-19 | ||||||||||||||||||
2022-05-20 | ||||||||||||||||||
2022-05-21 | ||||||||||||||||||
2022-05-22 | ||||||||||||||||||
2022-05-23 | ||||||||||||||||||
2022-05-24 | ||||||||||||||||||
2022-05-25 | ||||||||||||||||||
2022-05-26 | ||||||||||||||||||
2022-05-27 | ||||||||||||||||||
2022-05-28 | ||||||||||||||||||
2022-05-29 | ||||||||||||||||||
2022-05-30 | ||||||||||||||||||
2022-05-31 | ||||||||||||||||||
2022-06-01 | ||||||||||||||||||
2022-06-02 | ||||||||||||||||||
2022-06-03 | ||||||||||||||||||
2022-06-04 | ||||||||||||||||||
2022-06-05 | ||||||||||||||||||
2022-06-06 | ||||||||||||||||||
2022-06-07 | NA | |||||||||||||||||
2022-06-08 | NA | |||||||||||||||||
2022-06-09 | NA | |||||||||||||||||
2022-06-10 | NA | |||||||||||||||||
2022-06-11 | NA | |||||||||||||||||
2022-06-12 | NA | |||||||||||||||||
2022-06-13 | NA | |||||||||||||||||
2022-06-14 | NA | |||||||||||||||||
2022-06-15 | NA | |||||||||||||||||
2022-06-16 | NA | |||||||||||||||||
2022-06-17 | NA | NA | ||||||||||||||||
2022-06-18 | NA | NA | ||||||||||||||||
2022-06-19 | NA | NA | ||||||||||||||||
2022-06-20 | NA | NA | ||||||||||||||||
2022-06-21 | NA | NA | ||||||||||||||||
2022-06-22 | NA | NA | ||||||||||||||||
2022-06-23 | NA | NA | ||||||||||||||||
2022-06-24 | NA | NA | ||||||||||||||||
2022-06-25 | NA | NA | ||||||||||||||||
2022-06-26 | NA | NA | ||||||||||||||||
2022-06-27 | NA | NA | ||||||||||||||||
2022-06-28 | NA | NA | ||||||||||||||||
2022-06-29 | NA | NA | ||||||||||||||||
2022-06-30 | NA | NA |
rm(list = ls())
cleanup(ask = F)
For questions or comments, please contact cyril.michel@noaa.gov