Dear all,
I split all the data by thier ids and did the following. It is working but its very very slow.... any help?
dir="C:/daily_flowa/"
files <- list.files(path = dir, pattern = ".*\\.nc$",
ignore.case = TRUE, full.names = TRUE)
outdir <- file.path(dir, "trend")
dir.create(outdir)
ncdf_timeseries <- function(filename)
{
nc <- nc_open(filename)
origin <- sub("days since ", "", nc$dim$time$units)
x1 <- xts(
data.frame(value = ncvar_get(nc, "Qout")),
order.by = as.Date(nc$dim$time$vals, origin = origin)
)
x=ts(x1,start=1979)
nc_close(nc)
return(x)
}
listOfDataFrames <- list()
for (i in seq_along(files))
{
river <- ncdf_timeseries(filename = files[i])
tren=mk.test(river)
slp=sens.slope(river)
sample_filename <- basename(files[i])
sample_filename <- gsub(".nc","",sample_filename)
frt=cbind(sample_filename, tren$p.value, tren$statistic, slp$estimates)
frt2=as.data.frame(frt)
names(frt2)=c("name", "p_Value", "MK_Z", "Slope")
listOfDataFrames[[i]] <- frt2
fn <- file.path(outdir, sub("\\.nc$", ".csv", basename(files[i]),
ignore.case = TRUE))
message("file ", i , " produced ")
}
mktest <- do.call("rbind", listOfDataFrames)
write.csv(mktest,"C:/dailyflows/MK_test.csv")