Central Valley Enhanced

Acoustic Tagging Project

logo



Water Year 2025 Survival Summary


setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = ""))

latest <- read.csv("latest_download.csv", stringsAsFactors = F)

Study is in progress. Data current as of 2025-04-25 23:00:00. All times in Pacific Standard Time.


1.1 Release statistics for entire study year to date

try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))

library(knitr)
library(kableExtra)
library(lubridate)
library(data.table)
library(ggplot2)
library(RMark)
library(scales)
library(viridis)
library(forcats)
library(reshape2)
library(png)
library(dataRetrieval)
library(rerddap)
library(plotly)
library(odbc)
library(DBI)
library(dbplyr)
library(dplyr)
library(DT)

detects_study <- fread("study_detections.csv", stringsAsFactors = F,
                       colClasses = c(DateTime_PST = "character", RelDT = "character")) %>%
                 mutate(DateTime_PST = as.POSIXct(DateTime_PST, format = "%Y-%m-%d %H:%M:%S", tz="Etc/GMT+8"),
                        release_time = as.POSIXct(RelDT, format = "%Y-%m-%d %H:%M:%S", tz="Etc/GMT+8")) %>%
                 rename(., weight=Weight, length=Length, release_rkm=Rel_rkm, release_location=Rel_loc, river_km=rkm)

detects_study <- detects_study[detects_study$RelDT > "2024-11-10",]

con <- dbConnect(odbc(),
                 Driver = "SQL Server",
                 Server = "calfishtrack-server.database.windows.net",
                 Database = "realtime_detections",
                 UID = "realtime_user",
                 PWD = "Pass@123",
                 Port = 1433)

# Use dbplyr to load realtime_locs and qryHexCodes sql table
all_tagcodes <- tbl(con, "qryHexCodes") %>% collect()

study_tagcodes <- as.data.frame(all_tagcodes[all_tagcodes$StudyID %in% unique(detects_study$Study_ID),])
study_tagcodes$tag_expiration <- study_tagcodes$RelDT + (study_tagcodes$tag_life * 60*60*24)

release_stats <- study_tagcodes %>%
  group_by(StudyID) %>%
  summarise(First_release_time = min(RelDT),
            Last_release_time = max(RelDT),
            Number_fish_released = length(unique(TagID_Hex)),
            Mean_length = mean(Length, na.rm=T),
            Mean_weight = mean(Weight, na.rm=T),
            Release_location = ifelse(length(unique(Rel_loc))==1,Rel_loc,"Many"),
            Release_rkm = ifelse(length(unique(Rel_rkm))==1,as.character(Rel_rkm),"Many"),
            Last_tag_expiration = max(tag_expiration)) %>%
  mutate(Mean_length = round(Mean_length, 1),
         Mean_weight = round(Mean_weight, 1),
         First_release_time = format(First_release_time, tz = "Etc/GMT+8"),
         Last_release_time = format(Last_release_time, tz = "Etc/GMT+8"),
         Last_tag_expiration = format(Last_tag_expiration, tz = "Etc/GMT+8")) %>%
  arrange(Last_tag_expiration)

names(release_stats) <-gsub("_"," ",names(release_stats))

release_stats_all <- study_tagcodes %>%
  summarise(First_release_time = min(RelDT),
            Last_release_time = max(RelDT),
            Number_fish_released = length(unique(TagID_Hex)),
            Mean_length = mean(Length, na.rm=T),
            Mean_weight = mean(Weight, na.rm=T),
            Release_location = ifelse(length(unique(Rel_loc))==1,Rel_loc,"Many"),
            Release_rkm = ifelse(length(unique(Rel_rkm))==1,as.character(Rel_rkm),"Many"),
            Last_tag_expiration = max(tag_expiration)) %>%
  mutate(Mean_length = round(Mean_length, 1),
         Mean_weight = round(Mean_weight, 1),
         First_release_time = format(First_release_time, tz = "Etc/GMT+8"),
         Last_release_time = format(Last_release_time, tz = "Etc/GMT+8"),
         Last_tag_expiration = format(Last_tag_expiration, tz = "Etc/GMT+8")) %>%
  arrange(Last_tag_expiration)

names(release_stats_all) <-gsub("_"," ",names(release_stats_all))

datatable(release_stats_all, rownames = FALSE, options = list(dom = 't'))


1.2 Release statistics per study group for entire study year to date

datatable(release_stats, rownames = FALSE, options = list(dom = 't', pageLength = 100, autowidth = T)) 


detects_study <- detects_study[order(detects_study$TagCode, detects_study$DateTime_PST),]
## Now estimate the time in hours between the previous and next detection, for each detection. 
detects_study$prev_genloc <- shift(detects_study$general_location, fill = NA, type = "lag")
#detects_study$prev_genloc <- shift(detects_study$General_Location, fill = NA, type = "lag")
## Now make NA the time diff values when it's between 2 different tagcodes or genlocs
detects_study[which(detects_study$TagCode != shift(detects_study$TagCode, fill = NA, type = "lag")), "prev_genloc"] <- NA
detects_study[which(detects_study$general_location != detects_study$prev_genloc), "prev_genloc"] <- NA
detects_study$mov_score <- 0
detects_study[is.na(detects_study$prev_genloc), "mov_score"] <- 1
detects_study$mov_counter <- cumsum(detects_study$mov_score)

detects_summary <- aggregate(list(first_detect = detects_study$DateTime_PST), by = list(Study_ID = detects_study$Study_ID,TagCode = detects_study$TagCode, release_time = detects_study$release_time, mov_counter = detects_study$mov_counter ,general_location = detects_study$general_location, river_km = detects_study$river_km, release_rkm = detects_study$release_rkm), min)

detects_summary <- detects_summary[is.na(detects_summary$first_detect) == F,]
releases <- aggregate(list(first_detect = detects_study$release_time), by = list(Study_ID = detects_study$Study_ID, TagCode = detects_study$TagCode, release_time = detects_study$release_time, release_location=detects_study$release_location,release_rkm = detects_study$release_rkm), min)
releases$river_km <- releases$release_rkm
releases$mov_counter <- NA
releases$general_location <- NA
releases$general_location <- releases$release_location
releases$release_location <- NULL

detects_summary <- rbindlist(list(detects_summary, releases), use.names = T)
detects_summary <- detects_summary[order(detects_summary$TagCode, detects_summary$first_detect),]

starttime <- as.Date(min(detects_summary$release_time), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d"))+1, max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
#par(mar=c(6, 5, 2, 5) + 0.1)

# fig <- plot_ly(detects_summary, x = ~first_detect, y = ~river_km, color = ~Study_ID,  split = ~TagCode, width = 900, height = 600, dynamicTicks = TRUE, connectgaps = TRUE, mode = "lines+markers", type = "scatter",hoverinfo = "text",showlegend = FALSE,
#         text = ~paste("</br> Study_ID: ", Study_ID,
#                       "</br> TagCode: ", TagCode,
#                       "</br> Arrival: ", first_detect,
#                       "</br> Location: ", general_location)) %>%
#         layout(#showlegend = F,
#            xaxis = list(title = "<b> Date <b>", mirror=T,ticks="outside",showline=T, range=c(starttime,endtime)),
#            yaxis = list(title = "<b> Kilometers upstream of the Golden Gate <b>", mirror=T,ticks="outside",showline=T)
#            #legend = list(title=list(text='<b> Study ID </b> <br> (double click on legend<br> items to isolate)')),
#            #margin=list(l = 50,r = 100,b = 50,t = 50)
#       )


p <- ggplot(data = detects_summary, aes(x = first_detect, y = river_km, color = Study_ID, group = TagCode)) +
    geom_path(lwd = 1, alpha = 0.5) +
    geom_point(alpha = 0.5) +
    xlab("Date") +
    ylab("Kilometers upstream of the Golden Gate")+
    #xlim(starttime,endtime) +
    theme_bw()

fig <- ggplotly(p)

fig <- fig %>% toWebGL()

fig

1.3 Waterfall Detection Plot for whole season (color coded by study)


1.4 Survival to Benicia for all major Chinook salmon tagging efforts (n > 100).


1.5 Through-Delta survival (City of Sacramento to Benicia) for all major Chinook salmon tagging efforts (n > 100).



1.6 Historic survival to Benicia estimates (PROVISIONAL DATA)

Click to expand



1.7 Historic through-Delta survival estimates (PROVISIONAL DATA)

Click to expand



For questions or comments, please contact