Study is complete, all tags are no longer active as of 2023-06-30. All times in Pacific Standard Time.
Study began on 2022-12-13 10:00:00, see tagging details below:Release | First_release_time | Last_release_time | Number_fish_released | Release_location | Release_rkm | Mean_length | Mean_weight |
---|---|---|---|---|---|---|---|
Week 1 | 2022-12-13 10:00:00 | 2022-12-14 10:00:00 | 200 | RBDD_Rel | 461.57 | 147.8 | 35.7 |
Week 2 | 2023-02-01 10:45:00 | 2023-02-02 10:00:00 | 200 | RBDD_Rel | 461.57 | 87.1 | 7.0 |
Week 3 | 2023-03-01 10:58:00 | 2023-03-02 10:22:00 | 200 | RBDD_Rel | 461.57 | 95.0 | 9.2 |
Week 4 | 2023-03-15 10:30:00 | 2023-03-16 10:05:00 | 200 | RBRiverPark_Rel | 463.70 | 96.3 | 9.9 |
library(leaflet)
library(maps)
library(htmlwidgets)
library(leaflet.extras)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
## THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
cat("No detections yet")
gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F) %>% filter(is.na(stop))
leaflet(data = gen_locs[is.na(gen_locs$stop),]) %>%
# setView(-72.14600, 43.82977, zoom = 8) %>%
addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
# Marker data are from the sites data frame. We need the ~ symbols
# to indicate the columns of the data frame.
addMarkers(~longitude, ~latitude, label = ~general_location, group = "Receiver Sites", popup = ~location) %>%
# addAwesomeMarkers(~lon_dd, ~lat_dd, label = ~locality, group = "Sites", icon=icons) %>%
addScaleBar(position = "bottomleft") %>%
addLayersControl(
baseGroups = c("Street Map", "Satellite", "Relief"),
overlayGroups = c("Receiver Sites"),
options = layersControlOptions(collapsed = FALSE)) %>%
addSearchFeatures(targetGroups = c("Receiver Sites"))
} else {
gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F) %>%
mutate(day = as.Date(day)) %>%
# Subset to only look at data for the correct beacon for that day
filter(TagCode == beacon) %>%
# Only keep beacon by day for days since fish were released
filter(day >= as.Date(min(study_tagcodes$release_time)) & day <= endtime) %>%
dplyr::left_join(., gen_locs[,c("location", "general_location","rkm")], by = "location")
arrivals_per_day <- detects_study %>%
group_by(general_location, TagCode) %>%
summarise(DateTime_PST = min(DateTime_PST, na.rm = T)) %>%
arrange(TagCode, general_location) %>%
mutate(day = as.Date(DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8")) %>%
group_by(day, general_location) %>%
summarise(New_arrivals = length(TagCode)) %>%
na.omit() %>%
mutate(day = as.Date(day)) %>%
dplyr::left_join(unique(beacon_by_day[,c("general_location", "day", "rkm")]), .,
by = c("general_location", "day")) %>%
arrange(general_location, day) %>%
mutate(day = as.factor(day)) %>%
filter(general_location != "Bench_test") %>% # Remove bench test
filter(!(is.na(general_location))) # Remove NA locations
## Remove sites that were not operation the whole time
#### FOR THE SEASONAL SURVIVAL PAGE, KEEP ALL SITES SINCE PEOPLE WANT TO SEE DETECTIONS OF LATER FISH AT NEWLY
#### DEPLOYED SPOTS
gen_locs_days_in_oper <- arrivals_per_day %>%
group_by(general_location) %>%
summarise(days_in_oper = length(day))
#gen_locs_days_in_oper <- gen_locs_days_in_oper[gen_locs_days_in_oper$days_in_oper ==
# max(gen_locs_days_in_oper$days_in_oper),]
arrivals_per_day_in_oper <- arrivals_per_day %>%
filter(general_location %in% gen_locs_days_in_oper$general_location)
fish_per_site <- arrivals_per_day_in_oper %>%
group_by(general_location) %>%
summarise(fish_count = sum(New_arrivals, na.rm=T))
gen_locs_mean_coords <- gen_locs %>%
filter(is.na(stop) & general_location %in% fish_per_site$general_location) %>%
group_by(general_location) %>%
summarise(latitude = mean(latitude), # estimate mean lat and lons for each genloc
longitude = mean(longitude))
fish_per_site <- merge(fish_per_site, gen_locs_mean_coords)
if(!is.na(release_stats$Release_lat[1])){
leaflet(data = fish_per_site) %>%
addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
# Marker data are from the sites data frame. We need the ~ symbols
# to indicate the columns of the data frame.
addPulseMarkers(lng = fish_per_site$longitude, lat = fish_per_site$latitude, label = ~fish_count,
labelOptions = labelOptions(noHide = T, textsize = "15px"), group = "Receiver Sites",
popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
addCircleMarkers(data = release_stats, ~Release_lon, ~Release_lat, label = ~Number_fish_released, stroke = F, color = "blue", fillOpacity = 1,
group = "Release Sites", popup = ~Release_location, labelOptions = labelOptions(noHide = T, textsize = "15px")) %>%
addScaleBar(position = "bottomleft") %>%
addLegend("bottomright", labels = c("Receivers", "Release locations"), colors = c("red","blue")) %>%
addLayersControl(baseGroups = c("Street Map", "Satellite", "Relief"), options = layersControlOptions(collapsed = FALSE))
} else {
leaflet(data = fish_per_site) %>%
addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
# Marker data are from the sites data frame. We need the ~ symbols
# to indicate the columns of the data frame.
addPulseMarkers(lng = fish_per_site$longitude, lat = fish_per_site$latitude, label = ~fish_count,
labelOptions = labelOptions(noHide = T, textsize = "15px"), group = "Receiver Sites",
popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
addScaleBar(position = "bottomleft") %>%
addLayersControl(baseGroups = c("Street Map", "Satellite", "Relief"),
options = layersControlOptions(collapsed = FALSE))
}
}
2.1 Map of unique fish detections at operational realtime detection locations
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) > 0){
detects_study <- detects_study[order(detects_study$TagCode, detects_study$DateTime_PST),]
## Now estimate the time in hours between the previous and next detection, for each detection.
detects_study$prev_genloc <- shift(detects_study$general_location, fill = NA, type = "lag")
#detects_study$prev_genloc <- shift(detects_study$General_Location, fill = NA, type = "lag")
## Now make NA the time diff values when it's between 2 different tagcodes or genlocs
detects_study[which(detects_study$TagCode != shift(detects_study$TagCode, fill = NA, type = "lag")), "prev_genloc"] <- NA
detects_study[which(detects_study$general_location != detects_study$prev_genloc), "prev_genloc"] <- NA
detects_study$mov_score <- 0
detects_study[is.na(detects_study$prev_genloc), "mov_score"] <- 1
detects_study$mov_counter <- cumsum(detects_study$mov_score)
detects_summary <- aggregate(list(first_detect = detects_study$DateTime_PST), by = list(TagCode = detects_study$TagCode, length = detects_study$length, release_time = detects_study$release_time, mov_counter = detects_study$mov_counter ,general_location = detects_study$general_location, river_km = detects_study$river_km, release_rkm = detects_study$release_rkm), min)
detects_summary <- detects_summary[is.na(detects_summary$first_detect) == F,]
releases <- aggregate(list(first_detect = detects_summary$release_time), by = list(TagCode = detects_summary$TagCode, length = detects_summary$length, release_time = detects_summary$release_time, release_rkm = detects_summary$release_rkm), min)
releases$river_km <- releases$release_rkm
releases$mov_counter <- NA
releases$general_location <- NA
detects_summary <- rbindlist(list(detects_summary, releases), use.names = T)
detects_summary <- detects_summary[order(detects_summary$TagCode, detects_summary$first_detect),]
starttime <- as.Date(min(detects_study$release_time), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d"))+1, max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
#par(mar=c(6, 5, 2, 5) + 0.1)
plot_ly(detects_summary, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_lines(x = ~first_detect, y = ~river_km, color = ~TagCode) %>%
add_markers(x = ~first_detect, y = ~river_km, color = ~TagCode, showlegend = F) %>%
layout(showlegend = T,
xaxis = list(title = "<b> Date <b>", mirror=T,ticks="outside",showline=T, range=c(starttime,endtime)),
yaxis = list(title = "<b> Kilometers upstream of the Golden Gate <b>", mirror=T,ticks="outside",showline=T, range=c(max(detects_study$Rel_rkm)+10, min(gen_locs[is.na(gen_locs$stop),"rkm"])-10)),
legend = list(title=list(text='<b> Tag Code </b>')),
margin=list(l = 50,r = 100,b = 50,t = 50)
)
}else{
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Kilometers upstream of the Golden Gate")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}
2.2 Waterfall Detection Plot
_______________________________________________________________________________________________________
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_3 <- detects_study %>% filter(general_location == "Blw_Salt_RT")
if(nrow(detects_3) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_3 <- detects_3 %>%
dplyr::left_join(., detects_3 %>%
group_by(TagCode) %>%
summarise(first_detect = min(DateTime_PST))) %>%
mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
starttime <- as.Date(min(detects_3$release_time), "Etc/GMT+8")
# Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_3$Release))])
tagcount1 <- detects_3 %>%
group_by(Day, Release) %>%
summarise(unique_tags = length(unique(TagCode))) %>%
spread(Release, unique_tags)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == "x"] <- paste(i)
}
}
# Download flow data
flow_day <- readNWISuv(siteNumbers = "11377100", parameterCd="00060", startDate = starttime,
endDate = endtime+1) %>%
mutate(Day = as.Date(format(dateTime, "%Y-%m-%d"))) %>%
group_by(Day) %>%
summarise(parameter_value = mean(X_00060_00000))
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange2 <- daterange1[,order(colnames(daterange1))] %>%
dplyr::left_join(., flow_day, by = "Day")
rownames(daterange2) <- daterange2$Day
daterange2$Date <- daterange2$Day
daterange2$Day <- NULL
daterange2_flow <- daterange2 %>% select(Date, parameter_value)
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))],
id.vars = "Date", variable.name = ".")
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
par(mar=c(6, 5, 2, 5) + 0.1)
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Bend Bridge",
automargin = TRUE
)
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations(text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date,
y=~daterange2_flow$parameter_value,
line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE,
inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T),
yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50))
}
2.3 Detections at Salt Creek versus Sacramento River flows at Bend Bridge for duration of tag life
_______________________________________________________________________________________________________
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_4 <- detects_study %>% filter(general_location == "MeridianBr")
if(nrow(detects_4) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_4 <- detects_4 %>%
dplyr::left_join(., detects_4 %>%
group_by(TagCode) %>%
summarise(first_detect = min(DateTime_PST))) %>%
mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
starttime <- as.Date(min(detects_4$release_time), "Etc/GMT+8")
# Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_4$Release))])
tagcount1 <- detects_4 %>%
group_by(Day, Release) %>%
summarise(unique_tags = length(unique(TagCode))) %>%
spread(Release, unique_tags)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == "x"] <- paste(i)
}
}
# Download flow data
flow_day <- readNWISuv(siteNumbers = "11390500", parameterCd="00060", startDate = starttime,
endDate = endtime+1) %>%
mutate(Day = as.Date(format(dateTime, "%Y-%m-%d"))) %>%
group_by(Day) %>%
summarise(parameter_value = mean(X_00060_00000))
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange2 <- daterange1[,order(colnames(daterange1))] %>%
dplyr::left_join(., flow_day, by = "Day")
rownames(daterange2) <- daterange2$Day
daterange2$Date <- daterange2$Day
daterange2$Day <- NULL
daterange2_flow <- daterange2 %>% select(Date, parameter_value)
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))],
id.vars = "Date", variable.name = ".")
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
par(mar=c(6, 5, 2, 5) + 0.1)
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Wilkins Slough",
automargin = TRUE
)
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations(text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date,
y=~daterange2_flow$parameter_value,
line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE,
inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T),
yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50))
}
2.4 Detections at Meridian Bridge versus Sacramento River flows at Wilkins Slough for duration of tag life
_______________________________________________________________________________________________________
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_5 <- detects_study %>% filter(general_location == "TowerBridge")
if(nrow(detects_5) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_5 <- detects_5 %>%
dplyr::left_join(., detects_5 %>%
group_by(TagCode) %>%
summarise(first_detect = min(DateTime_PST))) %>%
mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
starttime <- as.Date(min(detects_5$release_time), "Etc/GMT+8")
# Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_5$Release))])
tagcount1 <- detects_5 %>%
group_by(Day, Release) %>%
summarise(unique_tags = length(unique(TagCode))) %>%
spread(Release, unique_tags)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == "x"] <- paste(i)
}
}
# Download flow data
flow_day <- readNWISuv(siteNumbers = "11425500", parameterCd="00060", startDate = starttime,
endDate = endtime+1) %>%
mutate(Day = as.Date(format(dateTime, "%Y-%m-%d"))) %>%
group_by(Day) %>%
summarise(parameter_value = mean(X_00060_00000))
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange2 <- daterange1[,order(colnames(daterange1))] %>%
dplyr::left_join(., flow_day, by = "Day")
rownames(daterange2) <- daterange2$Day
daterange2$Date <- daterange2$Day
daterange2$Day <- NULL
daterange2_flow <- daterange2 %>% select(Date, parameter_value)
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))],
id.vars = "Date", variable.name = ".")
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
par(mar=c(6, 5, 2, 5) + 0.1)
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Verona",
automargin = TRUE
)
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations(text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date,
y=~daterange2_flow$parameter_value,
line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE,
inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T),
yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50))
}
2.5 Detections at Tower Bridge (downtown Sacramento) versus Sacramento River flows at Verona for duration of tag life
_______________________________________________________________________________________________________
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_6 <- detects_study %>% filter(general_location == "Benicia_west" | general_location == "Benicia_east")
if(nrow(detects_6) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_6 <- detects_6 %>%
dplyr::left_join(., detects_6 %>%
group_by(TagCode) %>%
summarise(first_detect = min(DateTime_PST))) %>%
mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
starttime <- as.Date(min(detects_6$release_time), "Etc/GMT+8")
# Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_6$Release))])
tagcount1 <- detects_6 %>%
group_by(Day, Release) %>%
summarise(unique_tags = length(unique(TagCode))) %>%
spread(Release, unique_tags)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == "x"] <- paste(i)
}
}
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange1 <- daterange1[,order(colnames(daterange1))]
daterange2 <- daterange1
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
daterange2$Date <- as.Date(row.names(daterange2))
daterange3 <- melt(daterange2, id.vars = "Date", variable.name = ".", )
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
layout(showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T),
yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50))
}
2.6 Detections at Benicia Bridge for duration of tag life
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_tower <- detects_study %>% filter(general_location == "TowerBridge")
if(nrow(detects_tower) == 0){
WR.surv <- data.frame("Release"=NA, "Survival (%)"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA,
"95% upper C.I."=NA, "Detection efficiency (%)"=NA)
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
"95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken
that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
full_width = F, position = "left"))
} else {
study_count <- nrow(study_tagcodes)
# Only do survival to Sac for now
surv <- detects_study %>% filter(river_km > 168 & river_km < 175)
# calculate mean and SD travel time
travel <- aggregate(list(first_detect = surv$DateTime_PST), by = list(Release = surv$Release, TagCode = surv$TagCode, RelDT = surv$RelDT), min)
travel$days <- as.numeric(difftime(travel$first_detect, travel$RelDT, units = "days"))
travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days),n = nrow(travel)))
# Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(surv, TagCode ~ river_km, fun.aggregate = length))
# Sort columns by river km in descending order
gen_loc_sites <- ncol(inp)-1 # Count number of genlocs
if(gen_loc_sites < 2){
WR.surv <- data.frame("Release"=NA, "Survival (%)"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA,
"95% upper C.I."=NA, "Detection efficiency (%)"=NA)
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.",
"Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
survival model). If Yolo Bypass Weirs are overtopping during migration, fish may
have taken that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
full_width = F,position = "left"))
} else {
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)] %>%
dplyr::left_join(study_tagcodes, ., by = "TagCode")
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)] %>%
replace(is.na(.), 0) %>%
replace(., . > 0, 1)
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
surv$Release <- factor(surv$Release, levels = groups)
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
if(length(groups) > 1){
# make sure factor levels have a release that has detections first. if first release in factor order
# has zero detectins, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1,
rel = factor(inp$Release, levels = names(sort(table(surv$Release),decreasing = T))),
stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl,
model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl,
model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)),
silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),
c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", names(sort(table(surv$Release),decreasing = T))), WR.surv)
}
if(length(intersect(colnames(inp),groups)) < 2){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ", 1,sep = "")
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp.inp")
WR.process <- process.data(WRinp, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl,
model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl,
model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),
c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", groups), WR.surv)
}
colnames(WR.surv)[1] <- "Release"
WR.surv <- merge(WR.surv, travel_final, by = "Release", all.x = T)
WR.surv$mean_travel_time <- round(WR.surv$mean_travel_time,1)
WR.surv$sd_travel_time <- round(WR.surv$sd_travel_time,1)
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
"95% upper C.I.", "Detection efficiency (%)", "Mean time to Tower (days)", "SD of time to Tower (days)","Count")
WR.surv <- WR.surv %>% arrange(., Release)
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
survival model), and travel time. If Yolo Bypass Weirs are overtopping during migration, fish may have taken
that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
full_width = F, position = "left"))
}
}
Release | Survival (%) | SE | 95% lower C.I. | 95% upper C.I. | Detection efficiency (%) | Mean time to Tower (days) | SD of time to Tower (days) | Count |
---|---|---|---|---|---|---|---|---|
ALL | 24.6 | 1.5 | 21.7 | 27.7 | 90.1 | 27.3 | 12.6 | 195 |
Week 1 | 7.0 | 1.8 | 4.2 | 11.5 | NA | 16.3 | 2.8 | 14 |
Week 2 | 14.7 | 2.5 | 10.4 | 20.4 | NA | 50.8 | 10.7 | 29 |
Week 3 | 21.8 | 2.9 | 16.6 | 28.1 | NA | 30.1 | 8.6 | 43 |
Week 4 | 54.7 | 3.5 | 47.7 | 61.5 | NA | 21.3 | 4.5 | 109 |
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
try(Delta <- read.csv("Delta_surv.csv", stringsAsFactors = F))
if(nrow(detects_study[is.na(detects_study$DateTime_PST) == F,]) == 0){
WR.surv1 <- data.frame("Measure"=NA, "Estimate"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(WR.surv1) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.2 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
test4 <- detects_study[detects_study$general_location %in% c("TowerBridge", "I80-50_Br", "Benicia_west", "Benicia_east"),]
if(nrow(test4[test4$general_location =="Benicia_west",]) == 0 | nrow(test4[test4$general_location =="Benicia_east",]) == 0){
WR.surv1 <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(WR.surv1) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.2 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
# calculate mean and SD travel time
sac <- test4[test4$general_location %in% c("TowerBridge", "I80-50_Br"),]
ben <- test4[test4$general_location %in% c("Benicia_west", "Benicia_east"),]
travel_sac <- aggregate(list(first_detect_sac = sac$DateTime_PST), by = list(Release = sac$Release, TagCode = sac$TagCode), min)
travel_ben <- aggregate(list(first_detect_ben = ben$DateTime_PST), by = list(Release = ben$Release, TagCode = ben$TagCode), min)
travel <- merge(travel_sac, travel_ben, by = c("Release","TagCode"))
travel$days <- as.numeric(difftime(travel$first_detect_ben, travel$first_detect_sac, units = "days"))
travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days), n = nrow(travel)))
inp <- as.data.frame(reshape2::dcast(test4, TagCode ~ general_location, fun.aggregate = length))
# add together detections at Tower and I80 to ensure good detection entering Delta
if("I80-50_Br" %in% colnames(inp) & "TowerBridge" %in% colnames(inp)){
inp$`I80-50_Br` <- inp$`I80-50_Br` + inp$TowerBridge
} else if("TowerBridge" %in% colnames(inp)){
inp$`I80-50_Br` <- inp$TowerBridge
}
# Sort columns by river km in descending order, this also removes TowerBridge, no longer needed
inp <- inp[,c("TagCode","I80-50_Br", "Benicia_east", "Benicia_west")]
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
groups_w_detects <- names(table(detects_study[which(detects_study$river_km < 53),"Release"]))
inp[,groups] <- 0
for(i in groups){
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
if(length(groups) > 1){
# make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
if(length(groups_w_detects) > 1){
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)),
silent = T, output = F)
} else {
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
}
WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- cbind(Release = groups_w_detects,
round(WR.mark.rel$results$real[seq(from=2,to=length(groups_w_detects)*3,by = 3),
c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
WR.surv <- rbind(WR.surv, WR.surv.rel)
} else {
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
WR.surv <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1))
}
WR.surv1 <- WR.surv
colnames(WR.surv1)[1] <- "Release"
WR.surv1 <- merge(WR.surv1, travel_final, by = "Release", all.x = T)
WR.surv1$mean_travel_time <- round(WR.surv1$mean_travel_time,1)
WR.surv1$sd_travel_time <- round(WR.surv1$sd_travel_time,1)
colnames(WR.surv1) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
"95% upper C.I.", "Mean Delta passage (days)", "SD of Delta Passage (days)","Count")
#colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.2 Minimum through-Delta survival, and travel time: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
if(exists("Delta")==T & is.numeric(WR.surv1[1,2])){
reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))
# Assign whether the results are tentative or final
quality <- "tentative"
if(endtime < as.Date(format(Sys.time(), "%Y-%m-%d"))){
quality <- "final"}
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = "1970-01-01")
Delta$RelDT <- as.POSIXct(Delta$RelDT)
# remove old benicia record for this studyID
Delta <- Delta[!Delta$StudyID %in% unique(detects_study$Study_ID),]
Delta <- rbind(Delta, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
write.csv(Delta, "Delta_surv.csv", row.names = F, quote = F)
}
}
}
Release | Survival (%) | SE | 95% lower C.I. | 95% upper C.I. | Mean Delta passage (days) | SD of Delta Passage (days) | Count |
---|---|---|---|---|---|---|---|
ALL | 81.7 | 2.8 | 75.6 | 86.5 | 4.8 | 1.7 | 159 |
Week 1 | 30.4 | 11.8 | 12.8 | 56.7 | 5.4 | 1.2 | 5 |
Week 2 | 88.3 | 5.8 | 71.6 | 95.8 | 5.3 | 2.5 | 25 |
Week 3 | 81.4 | 5.0 | 69.6 | 89.3 | 4.5 | 1.6 | 32 |
Week 4 | 86.9 | 3.5 | 78.3 | 92.4 | 4.7 | 1.6 | 97 |
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
try(benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F))
detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
if(nrow(detects_benicia) == 0){
if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
WR.surv <- data.frame("Release"="ALL", "estimate"=0, "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
} else {
WR.surv <- data.frame("Release"=NA, "estimate"="NO DETECTIONS YET", "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
}
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else if(length(table(detects_benicia$general_location)) == 1){
if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
WR.surv <- data.frame("Release"="ALL", "estimate"=round(length(unique(detects_benicia$TagCode))/length(unique(detects_study$TagCode))*100,1),
"se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
} else {
WR.surv <- data.frame("Release" = NA, "estimate" = "NOT ENOUGH DETECTIONS", "se" = NA, "lcl" = NA, "ucl" = NA, "Detection_efficiency" = NA)
}
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
# Only do survival to Benicia here
test3 <- detects_study[which(detects_study$river_km < 53),]
# calculate mean and SD travel time
travel <- aggregate(list(first_detect = test3$DateTime_PST), by = list(Release = test3$Release, TagCode = test3$TagCode, RelDT = test3$RelDT), min)
travel$days <- as.numeric(difftime(travel$first_detect, travel$RelDT, units = "days"))
travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days), n = nrow(travel)))
# Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ river_km, fun.aggregate = length))
# Sort columns by river km in descending order
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
groups_w_detects <- names(table(test3$Release))
inp[,groups] <- 0
for(i in groups){
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
if(length(groups) > 1){
# make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
if(length(groups_w_detects) > 1){
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
} else {
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
}
WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- cbind(Release = groups_w_detects, round(WR.mark.rel$results$real[seq(from=1,to=length(groups_w_detects)*2,by = 2),
c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
WR.surv <- rbind(WR.surv, WR.surv.rel)
} else {
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
}
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv1 <- WR.surv
colnames(WR.surv1)[1] <- "Release"
WR.surv1 <- merge(WR.surv1, travel_final, by = "Release", all.x = T)
WR.surv1$mean_travel_time <- round(WR.surv1$mean_travel_time,1)
WR.surv1$sd_travel_time <- round(WR.surv1$sd_travel_time,1)
colnames(WR.surv1) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
"95% upper C.I.", "Detection efficiency (%)", "Mean time to Benicia (days)", "SD of time to Benicia (days)", "Count")
#colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model), and travel time") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}
Release | Survival (%) | SE | 95% lower C.I. | 95% upper C.I. | Detection efficiency (%) | Mean time to Benicia (days) | SD of time to Benicia (days) | Count |
---|---|---|---|---|---|---|---|---|
ALL | 25.3 | 1.5 | 22.4 | 28.4 | 96.4 | 35.3 | 13.9 | 202 |
Week 1 | 2.5 | 1.1 | 1.1 | 5.9 | NA | 23.2 | 2.3 | 5 |
Week 2 | 18.0 | 2.7 | 13.3 | 24.0 | NA | 60.1 | 8.6 | 36 |
Week 3 | 30.5 | 3.3 | 24.5 | 37.3 | NA | 36.6 | 7.0 | 61 |
Week 4 | 50.1 | 3.5 | 43.2 | 57.0 | NA | 26.2 | 4.2 | 100 |
if(exists("benicia")==T & is.numeric(WR.surv1[1,2])){
# Find mean release time per release group, and ALL
reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))
# Assign whether the results are tentative or final
quality <- "tentative"
if(endtime < as.Date(format(Sys.time(), "%Y-%m-%d"))){
quality <- "final"
}
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = "1970-01-01")
benicia$RelDT <- as.POSIXct(benicia$RelDT)
# remove old benicia record for this studyID
benicia <- benicia[!benicia$StudyID == unique(detects_study$Study_ID),]
benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F)
}
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
"No detections yet"
} else {
arrivals <- detects_study %>%
group_by(general_location, TagCode) %>%
summarise(DateTime_PST = min(DateTime_PST)) %>%
arrange(TagCode)
tag_stats <- arrivals %>%
group_by(general_location) %>%
summarise(First_arrival = min(DateTime_PST),
Mean_arrival = mean(DateTime_PST),
Last_arrival = max(DateTime_PST),
Fish_count = length(unique(TagCode))) %>%
mutate(Percent_arrived = round(Fish_count/nrow(study_tagcodes) * 100,2)) %>%
dplyr::left_join(., unique(detects_study[,c("general_location", "river_km")])) %>%
arrange(desc(river_km)) %>%
mutate(First_arrival = format(First_arrival, tz = "Etc/GMT+8"),
Mean_arrival = format(Mean_arrival, tz = "Etc/GMT+8"),
Last_arrival = format(Last_arrival, tz = "Etc/GMT+8")) %>%
na.omit()
print(kable(tag_stats, row.names = F,
caption = "4.1 Detections for all releases combined",
"html") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
count <- 0
for(j in sort(unique(study_tagcodes$Release))){
if(nrow(detects_study[detects_study$Release == j,]) > 0){
count <- count + 1
arrivals1 <- detects_study %>%
filter(Release == j) %>%
group_by(general_location, TagCode) %>%
summarise(DateTime_PST = min(DateTime_PST)) %>%
arrange(TagCode)
rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])
tag_stats1 <- arrivals1 %>%
group_by(general_location) %>%
summarise(First_arrival = min(DateTime_PST),
Mean_arrival = mean(DateTime_PST),
Last_arrival = max(DateTime_PST),
Fish_count = length(unique(TagCode))) %>%
mutate(Percent_arrived = round(Fish_count/rel_count * 100,2)) %>%
dplyr::left_join(., unique(detects_study[,c("general_location", "river_km")])) %>%
arrange(desc(river_km)) %>%
mutate(First_arrival = format(First_arrival, tz = "Etc/GMT+8"),
Mean_arrival = format(Mean_arrival, tz = "Etc/GMT+8"),
Last_arrival = format(Last_arrival, tz = "Etc/GMT+8")) %>%
na.omit()
final_stats <- kable(tag_stats1, row.names = F,
caption = paste("4.2.", count, " Detections for ", j, " release groups", sep = ""),
"html")
print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
cat("\n\n\\pagebreak\n")
print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
cat("\n\n\\pagebreak\n")
}
}
}
general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km |
---|---|---|---|---|---|---|
Blw_Salt_RT | 2023-02-02 13:53:08 | 2023-03-05 15:57:16 | 2023-05-30 06:42:28 | 263 | 32.88 | 457.000 |
MeridianBr | 2022-12-16 05:06:58 | 2023-03-11 09:34:23 | 2023-04-18 07:13:42 | 336 | 42.00 | 290.848 |
TowerBridge | 2022-12-21 23:09:49 | 2023-03-27 01:43:24 | 2023-04-19 23:04:43 | 177 | 22.12 | 172.000 |
I80-50_Br | 2022-12-21 23:59:45 | 2023-03-27 08:09:37 | 2023-04-19 23:23:58 | 182 | 22.75 | 170.748 |
Holland_Cut_Quimby | 2023-04-07 02:24:53 | 2023-04-10 00:11:37 | 2023-04-12 21:58:22 | 2 | 0.25 | 145.000 |
Old_River_Quimby | 2023-04-08 12:08:23 | 2023-04-08 12:08:23 | 2023-04-08 12:08:23 | 1 | 0.12 | 141.000 |
Sac_BlwGeorgiana | 2022-12-30 16:52:39 | 2023-04-02 00:21:28 | 2023-04-18 19:05:02 | 114 | 14.25 | 119.058 |
Sac_BlwGeorgiana2 | 2022-12-30 17:06:32 | 2023-04-01 21:15:46 | 2023-04-18 19:15:01 | 117 | 14.62 | 118.398 |
Benicia_east | 2023-01-03 13:46:19 | 2023-04-06 11:08:03 | 2023-04-23 08:19:02 | 195 | 24.38 | 52.240 |
Benicia_west | 2023-01-03 13:50:10 | 2023-04-06 12:12:58 | 2023-04-23 08:20:28 | 192 | 24.00 | 52.040 |
general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km |
---|---|---|---|---|---|---|
MeridianBr | 2022-12-16 05:06:58 | 2022-12-25 01:08:44 | 2022-12-31 12:08:20 | 15 | 7.5 | 290.848 |
TowerBridge | 2022-12-21 23:09:49 | 2022-12-29 17:11:31 | 2022-12-31 19:07:25 | 13 | 6.5 | 172.000 |
I80-50_Br | 2022-12-21 23:59:45 | 2022-12-30 03:18:43 | 2023-01-04 07:42:39 | 14 | 7.0 | 170.748 |
Sac_BlwGeorgiana | 2022-12-30 16:52:39 | 2023-01-01 19:33:41 | 2023-01-05 18:16:25 | 3 | 1.5 | 119.058 |
Sac_BlwGeorgiana2 | 2022-12-30 17:06:32 | 2023-01-01 19:46:24 | 2023-01-05 18:25:54 | 3 | 1.5 | 118.398 |
Benicia_east | 2023-01-03 13:46:19 | 2023-01-05 23:34:16 | 2023-01-08 20:43:56 | 5 | 2.5 | 52.240 |
Benicia_west | 2023-01-03 13:50:10 | 2023-01-06 01:02:20 | 2023-01-08 20:46:47 | 4 | 2.0 | 52.040 |
general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km |
---|---|---|---|---|---|---|
Blw_Salt_RT | 2023-02-02 13:53:08 | 2023-02-02 17:29:52 | 2023-02-02 19:09:37 | 7 | 3.5 | 457.000 |
MeridianBr | 2023-02-04 19:47:05 | 2023-02-23 11:31:36 | 2023-04-02 01:45:34 | 79 | 39.5 | 290.848 |
TowerBridge | 2023-02-24 07:28:14 | 2023-03-24 05:37:30 | 2023-04-09 02:35:54 | 27 | 13.5 | 172.000 |
I80-50_Br | 2023-02-24 10:18:17 | 2023-03-25 21:36:46 | 2023-04-09 02:58:52 | 25 | 12.5 | 170.748 |
Sac_BlwGeorgiana | 2023-02-27 23:24:56 | 2023-03-26 04:06:56 | 2023-04-09 18:34:46 | 16 | 8.0 | 119.058 |
Sac_BlwGeorgiana2 | 2023-02-28 00:20:36 | 2023-03-26 03:45:08 | 2023-04-09 18:51:29 | 18 | 9.0 | 118.398 |
Benicia_east | 2023-03-17 13:24:35 | 2023-04-04 09:29:13 | 2023-04-17 06:09:05 | 31 | 15.5 | 52.240 |
Benicia_west | 2023-03-17 13:26:46 | 2023-04-02 23:27:04 | 2023-04-17 06:14:05 | 35 | 17.5 | 52.040 |
general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km |
---|---|---|---|---|---|---|
Blw_Salt_RT | 2023-03-01 12:03:27 | 2023-03-02 04:20:49 | 2023-03-04 07:30:57 | 192 | 96.0 | 457.000 |
MeridianBr | 2023-03-04 06:27:22 | 2023-03-09 14:11:17 | 2023-04-16 11:58:33 | 128 | 64.0 | 290.848 |
TowerBridge | 2023-03-12 06:30:53 | 2023-04-01 10:37:06 | 2023-04-18 03:00:08 | 37 | 18.5 | 172.000 |
I80-50_Br | 2023-03-13 02:20:51 | 2023-04-01 18:51:04 | 2023-04-18 03:19:48 | 38 | 19.0 | 170.748 |
Sac_BlwGeorgiana | 2023-03-12 17:47:08 | 2023-04-01 21:50:26 | 2023-04-18 19:05:02 | 27 | 13.5 | 119.058 |
Sac_BlwGeorgiana2 | 2023-03-12 17:54:50 | 2023-04-01 20:27:00 | 2023-04-18 19:15:01 | 28 | 14.0 | 118.398 |
Benicia_east | 2023-03-16 13:11:37 | 2023-04-07 11:43:20 | 2023-04-20 20:31:05 | 61 | 30.5 | 52.240 |
Benicia_west | 2023-03-16 13:16:47 | 2023-04-07 08:29:38 | 2023-04-20 20:34:41 | 59 | 29.5 | 52.040 |
general_location | First_arrival | Mean_arrival | Last_arrival | Fish_count | Percent_arrived | river_km |
---|---|---|---|---|---|---|
Blw_Salt_RT | 2023-03-15 23:29:23 | 2023-03-19 11:59:01 | 2023-05-30 06:42:28 | 64 | 32.0 | 457.000 |
MeridianBr | 2023-03-20 04:40:32 | 2023-04-03 12:08:47 | 2023-04-18 07:13:42 | 114 | 57.0 | 290.848 |
TowerBridge | 2023-03-26 09:30:39 | 2023-04-06 04:58:04 | 2023-04-19 23:04:43 | 100 | 50.0 | 172.000 |
I80-50_Br | 2023-03-26 09:52:51 | 2023-04-06 08:08:04 | 2023-04-19 23:23:58 | 105 | 52.5 | 170.748 |
Holland_Cut_Quimby | 2023-04-07 02:24:53 | 2023-04-10 00:11:37 | 2023-04-12 21:58:22 | 2 | 1.0 | 145.000 |
Old_River_Quimby | 2023-04-08 12:08:23 | 2023-04-08 12:08:23 | 2023-04-08 12:08:23 | 1 | 0.5 | 141.000 |
Sac_BlwGeorgiana | 2023-03-30 00:45:50 | 2023-04-07 15:30:31 | 2023-04-17 03:09:14 | 68 | 34.0 | 119.058 |
Sac_BlwGeorgiana2 | 2023-03-30 00:55:29 | 2023-04-07 15:42:37 | 2023-04-17 03:20:55 | 68 | 34.0 | 118.398 |
Benicia_east | 2023-03-30 14:05:56 | 2023-04-11 02:19:41 | 2023-04-23 08:19:02 | 98 | 49.0 | 52.240 |
Benicia_west | 2023-03-30 14:11:21 | 2023-04-11 03:26:25 | 2023-04-23 08:20:28 | 94 | 47.0 | 52.040 |
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
# THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
"No detections yet"
} else {
arrivals <- detects_study %>%
group_by(general_location, TagCode) %>%
summarise(DateTime_PST = min(DateTime_PST)) %>%
mutate(day = as.Date(DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8"))
gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F) %>%
mutate(day = as.Date(day)) %>%
filter(TagCode == beacon) %>% # Now subset to only look at data for the correct beacon for that day
filter(day >= as.Date(min(study_tagcodes$release_time)) &
day <= endtime) %>% # Now only keep beacon by day for days since fish were released
dplyr::left_join(., gen_locs[,c("location", "general_location","rkm")], by = "location")
arrivals_per_day <- arrivals %>%
group_by(day, general_location) %>%
summarise(New_arrivals = length(TagCode)) %>%
arrange(general_location) %>% na.omit() %>%
mutate(day = as.Date(day)) %>%
dplyr::left_join(unique(beacon_by_day[,c("general_location", "day", "rkm")]),
., by = c("general_location", "day")) %>%
arrange(general_location, day) %>%
mutate(day = factor(day)) %>%
filter(general_location != "Bench_test") %>% # Remove bench test and other NA locations
filter(!(is.na(general_location))) %>%
arrange(desc(rkm)) %>% # Change order of data to plot decreasing river_km
mutate(general_location = factor(general_location, unique(general_location)))
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
crosstab <- xtabs(formula = arrivals_per_day$New_arrivals ~ arrivals_per_day$day + arrivals_per_day$general_location,
addNA =T)
crosstab[is.na(crosstab)] <- ""
crosstab[crosstab==0] <- NA
crosstab <- as.data.frame.matrix(crosstab)
kable(crosstab, align = "c", caption = "4.3 Fish arrivals per day (\"NA\" means receivers were non-operational)") %>%
kable_styling(c("striped", "condensed"), font_size = 11, full_width = F, position = "left", fixed_thead = TRUE) %>%
column_spec(column = 1:ncol(crosstab),width_min = "50px",border_left = T, border_right = T) %>%
column_spec(1, bold = T, width_min = "75px")%>%
scroll_box(height = "700px")
}
Blw_Salt_RT | MeridianBr | TowerBridge | I80-50_Br | MiddleRiver | Clifton_Court_US_Radial_Gates | Holland_Cut_Quimby | CVP_Tank | CVP_Trash_Rack_1 | Clifton_Court_Intake_Canal | Old_River_Quimby | Sac_BlwGeorgiana | Sac_BlwGeorgiana2 | Benicia_east | Benicia_west | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2022-12-13 | NA | ||||||||||||||
2022-12-14 | NA | ||||||||||||||
2022-12-15 | NA | ||||||||||||||
2022-12-16 | NA | 2 | |||||||||||||
2022-12-17 | NA | 2 | |||||||||||||
2022-12-18 | NA | ||||||||||||||
2022-12-19 | NA | 1 | |||||||||||||
2022-12-20 | NA | ||||||||||||||
2022-12-21 | NA | 1 | 1 | ||||||||||||
2022-12-22 | NA | ||||||||||||||
2022-12-23 | NA | ||||||||||||||
2022-12-24 | NA | ||||||||||||||
2022-12-25 | NA | ||||||||||||||
2022-12-26 | NA | ||||||||||||||
2022-12-27 | NA | 1 | |||||||||||||
2022-12-28 | NA | 8 | |||||||||||||
2022-12-29 | NA | 4 | 4 | ||||||||||||
2022-12-30 | NA | 5 | 5 | 2 | 2 | ||||||||||
2022-12-31 | NA | 1 | 3 | 3 | |||||||||||
2023-01-01 | NA | ||||||||||||||
2023-01-02 | NA | ||||||||||||||
2023-01-03 | NA | 1 | 1 | ||||||||||||
2023-01-04 | NA | 1 | |||||||||||||
2023-01-05 | NA | 1 | 1 | 3 | 2 | ||||||||||
2023-01-06 | NA | ||||||||||||||
2023-01-07 | NA | ||||||||||||||
2023-01-08 | NA | 1 | 1 | ||||||||||||
2023-01-09 | NA | ||||||||||||||
2023-01-10 | NA | ||||||||||||||
2023-01-11 | NA | ||||||||||||||
2023-01-12 | NA | ||||||||||||||
2023-01-13 | NA | ||||||||||||||
2023-01-14 | NA | ||||||||||||||
2023-01-15 | NA | ||||||||||||||
2023-01-16 | NA | ||||||||||||||
2023-01-17 | NA | ||||||||||||||
2023-01-18 | NA | ||||||||||||||
2023-01-19 | NA | ||||||||||||||
2023-01-20 | NA | ||||||||||||||
2023-01-21 | NA | ||||||||||||||
2023-01-22 | NA | ||||||||||||||
2023-01-23 | NA | ||||||||||||||
2023-01-24 | NA | ||||||||||||||
2023-01-25 | NA | ||||||||||||||
2023-01-26 | NA | ||||||||||||||
2023-01-27 | NA | ||||||||||||||
2023-01-28 | NA | ||||||||||||||
2023-01-29 | NA | ||||||||||||||
2023-01-30 | NA | ||||||||||||||
2023-01-31 | NA | ||||||||||||||
2023-02-01 | NA | ||||||||||||||
2023-02-02 | NA | ||||||||||||||
2023-02-03 | NA | ||||||||||||||
2023-02-04 | NA | 1 | |||||||||||||
2023-02-05 | 1 | ||||||||||||||
2023-02-06 | 2 | ||||||||||||||
2023-02-07 | |||||||||||||||
2023-02-08 | |||||||||||||||
2023-02-09 | 5 | ||||||||||||||
2023-02-10 | 5 | ||||||||||||||
2023-02-11 | 7 | ||||||||||||||
2023-02-12 | 2 | ||||||||||||||
2023-02-13 | 7 | ||||||||||||||
2023-02-14 | 4 | ||||||||||||||
2023-02-15 | 2 | ||||||||||||||
2023-02-16 | 2 | ||||||||||||||
2023-02-17 | |||||||||||||||
2023-02-18 | 1 | ||||||||||||||
2023-02-19 | 1 | ||||||||||||||
2023-02-20 | 1 | ||||||||||||||
2023-02-21 | 1 | ||||||||||||||
2023-02-22 | |||||||||||||||
2023-02-23 | |||||||||||||||
2023-02-24 | 2 | 1 | 1 | ||||||||||||
2023-02-25 | 1 | ||||||||||||||
2023-02-26 | 1 | ||||||||||||||
2023-02-27 | 1 | 1 | |||||||||||||
2023-02-28 | 3 | 1 | |||||||||||||
2023-03-01 | 74 | ||||||||||||||
2023-03-02 | 112 | 3 | |||||||||||||
2023-03-03 | 5 | 2 | |||||||||||||
2023-03-04 | 1 | 9 | |||||||||||||
2023-03-05 | 16 | 1 | 1 | ||||||||||||
2023-03-06 | 21 | ||||||||||||||
2023-03-07 | 25 | ||||||||||||||
2023-03-08 | 15 | ||||||||||||||
2023-03-09 | 12 | ||||||||||||||
2023-03-10 | 14 | ||||||||||||||
2023-03-11 | 11 | 3 | 3 | ||||||||||||
2023-03-12 | 4 | 1 | 3 | 3 | |||||||||||
2023-03-13 | 7 | 2 | 1 | 1 | 1 | ||||||||||
2023-03-14 | 3 | 1 | |||||||||||||
2023-03-15 | 1 | 3 | NA | ||||||||||||
2023-03-16 | 48 | 1 | 1 | NA | 1 | 1 | 1 | ||||||||
2023-03-17 | 7 | NA | 1 | 2 | |||||||||||
2023-03-18 | 2 | 1 | NA | ||||||||||||
2023-03-19 | 2 | NA | 1 | 3 | |||||||||||
2023-03-20 | 2 | 3 | 3 | 1 | 1 | ||||||||||
2023-03-21 | 1 | 3 | 2 | 1 | 1 | ||||||||||
2023-03-22 | 1 | 2 | 2 | 1 | 1 | ||||||||||
2023-03-23 | 3 | 3 | 1 | 1 | |||||||||||
2023-03-24 | 4 | 2 | 2 | 2 | 2 | ||||||||||
2023-03-25 | 2 | 2 | 3 | 1 | 1 | 1 | 1 | ||||||||
2023-03-26 | 3 | 4 | 5 | 4 | 4 | ||||||||||
2023-03-27 | 1 | 4 | 5 | 4 | 2 | 1 | 2 | 2 | |||||||
2023-03-28 | 5 | 4 | 5 | 4 | 5 | 1 | 1 | ||||||||
2023-03-29 | 4 | 7 | 6 | 2 | 2 | 4 | 4 | ||||||||
2023-03-30 | 1 | 4 | 5 | 4 | 5 | 8 | 8 | ||||||||
2023-03-31 | 2 | 2 | 1 | 1 | 1 | 8 | 7 | ||||||||
2023-04-01 | 10 | 2 | 2 | 4 | 4 | ||||||||||
2023-04-02 | 14 | 4 | 4 | 2 | 3 | ||||||||||
2023-04-03 | 10 | 12 | 14 | 4 | 4 | 3 | 3 | ||||||||
2023-04-04 | 1 | 20 | 13 | 14 | 9 | 10 | 5 | 5 | |||||||
2023-04-05 | 7 | 18 | 21 | 15 | 15 | 1 | 1 | ||||||||
2023-04-06 | 4 | 15 | 17 | 9 | 9 | 1 | 1 | ||||||||
2023-04-07 | 5 | 11 | 9 | 1 | 9 | 9 | 9 | 9 | |||||||
2023-04-08 | 2 | 11 | 11 | 1 | 8 | 8 | 13 | 12 | |||||||
2023-04-09 | 7 | 8 | 8 | 9 | 9 | 17 | 14 | ||||||||
2023-04-10 | 3 | 3 | 3 | 6 | 6 | 20 | 23 | ||||||||
2023-04-11 | 1 | 2 | 7 | 8 | 3 | 3 | 25 | 24 | |||||||
2023-04-12 | 2 | 1 | 1 | 1 | 3 | 3 | 22 | 22 | |||||||
2023-04-13 | 2 | 2 | 3 | 2 | 2 | 12 | 11 | ||||||||
2023-04-14 | 2 | 3 | 3 | 2 | 2 | 4 | 3 | ||||||||
2023-04-15 | 2 | 2 | 7 | 7 | |||||||||||
2023-04-16 | 1 | 2 | 2 | 1 | 1 | 7 | 6 | ||||||||
2023-04-17 | 2 | 2 | 2 | 2 | |||||||||||
2023-04-18 | 1 | 1 | 1 | 1 | 1 | 2 | 2 | ||||||||
2023-04-19 | 1 | 1 | 2 | 2 | |||||||||||
2023-04-20 | 1 | 1 | |||||||||||||
2023-04-21 | |||||||||||||||
2023-04-22 | |||||||||||||||
2023-04-23 | 1 | 1 | |||||||||||||
2023-04-24 | |||||||||||||||
2023-04-25 | |||||||||||||||
2023-04-26 | |||||||||||||||
2023-04-27 | |||||||||||||||
2023-04-28 | |||||||||||||||
2023-04-29 | 1 | ||||||||||||||
2023-04-30 | |||||||||||||||
2023-05-01 | |||||||||||||||
2023-05-02 | |||||||||||||||
2023-05-03 | |||||||||||||||
2023-05-04 | |||||||||||||||
2023-05-05 | |||||||||||||||
2023-05-06 | |||||||||||||||
2023-05-07 | |||||||||||||||
2023-05-08 | |||||||||||||||
2023-05-09 | |||||||||||||||
2023-05-10 | |||||||||||||||
2023-05-11 | |||||||||||||||
2023-05-12 | |||||||||||||||
2023-05-13 | |||||||||||||||
2023-05-14 | |||||||||||||||
2023-05-15 | |||||||||||||||
2023-05-16 | |||||||||||||||
2023-05-17 | |||||||||||||||
2023-05-18 | |||||||||||||||
2023-05-19 | |||||||||||||||
2023-05-20 | |||||||||||||||
2023-05-21 | |||||||||||||||
2023-05-22 | |||||||||||||||
2023-05-23 | |||||||||||||||
2023-05-24 | |||||||||||||||
2023-05-25 | |||||||||||||||
2023-05-26 |
rm(list = ls())
cleanup(ask = F)
For questions or comments, please contact cyril.michel@noaa.gov