######Recommended Practice: Classification using weighted, seasonal VCI data (See "in detail" page for more information) ###### ######Please follow the comments in the code as well as the step-by-step guide #Enter the path to the a folder where you have enough free space to store your MODIS data and the resulting products #Please do _not_ use backslashes ('\'). Folder names should not have empty spaces. dataPath <- "X:/YOUR/PATH/HERE/" #Enter the path to your TIMESAT 3.3 installation including the subfolder "/compiled/Win64" or "/compiled/Linux64" respectively. Folder names should not have empty spaces. TSpath <- "X:/YOUR/PATH/HERE/timesatv3_3/compiled/Win64/" #Enter the path to your appEEARS download-list (including ".txt"!). If you downloaded the data from appEEARS beforehand, enter the path to the data here (all data needs to be in one folder) #Please do _not_ use backslashes ('\'). Folder names should not have empty spaces. downloadList <- "X:/YOUR/PATH/HERE/DL.txt" #Hemisphere of your study-area: 1 = north ; 0 = south hemisphere <- 0 #Quality masking: replace the '1' with a '0' to not apply MODIS pixel reliability data. cloudmask <- 1 #Enter your appEEARS/Earthdata username and password here. No account? Register here: https://urs.earthdata.nasa.gov/home aUsername <- "USER" aPassword <- "PASSWORD" #Define the period of analysis with a vector of two components: the first is the starting year and the second the final year. If you want to process all available years just comment next line with a "#" #AnalysisPeriod <- c(2001, 2017) #Define name of the crop shapefile (.shp). This corresponds to an existent shapefile that contains polygons with all kind of crops (or any particular type). Do _not_ use extensions. If you have no crop shapefile layer just comment next 2 lines. Folder names should not have empty spaces. #cropLayer <- "mergedcropareas" #pathCropLayer <- "C:/your/path/here/croplayers/" #No need to enter more - please mark the code from line 1 to 373 and click on 'run' (or press ctrl + enter) ############################################################# ############################################################# ############################################################# ############################################################# ############################################################# ####Installing all needed packages install.packages("zoo") install.packages("raster") install.packages("curl") install.packages("rgdal") install.packages("httr") install.packages("jsonlite") install.packages("rts") install.packages("stringr") library(zoo) library(raster) library(curl) library(rgdal) library(httr) library(jsonlite) library(rts) library(stringr) #If you downloaded your data from appEEARS beforehand, comment out the following 21 lines filesl <- scan(downloadList, what='list', sep='\n') #Fetching download persmission token secret <- base64_enc(paste(aUsername, aPassword, sep = ":")) response <- POST("https://appeears.earthdatacloud.nasa.gov/api/login", add_headers("Authorization" = paste("Basic", gsub("\n", "", secret)), "Content-Type" = "application/x-www-form-urlencoded;charset=UTF-8"), body = "grant_type=client_credentials") token_response <- prettify(toJSON(content(response), auto_unbox = TRUE)) tokenjson <- parse_json(token_response) handler <- new_handle() handle_setheaders(handler, "Authorization" = paste0("Bearer ",tokenjson$token)) #Downloading Files (except 'VI quality control' data as they are not needed) for (d in 1:length(filesl)){ if ((grepl("_VI_Quality_",filesl[d]) == F)){ curl_download(url=filesl[d], destfile=paste0(dataPath,"/",basename(filesl[d])),quiet = T, handle = handler) } print(paste0('Downloading source data (Step 1 of 6): ',round(d / length(filesl) * 100, digits=2),'%')) } #####End of download block #####Defining function for later parts of the chain ##### ##### ##### ##### ##Function to read TIMESAT outputs using TIMESAT-tools bintiff <- function (rast,base_image){ timesatbinobj <- file(rast, "rb") raw_vector <- readBin(timesatbinobj, n=file.info(rast)$size, what="int", signed=T, endian="little",size=2) close(timesatbinobj) data_matrix <- matrix(raw_vector,nrow(base_image), ncol(base_image),byrow=T) tiff_raster <- raster(data_matrix, template=base_image) return(tiff_raster) } ##### ##### ##### ##### End of function #Creating a temp-directory, setting memory limit dir.create(paste0(dataPath,'/temp/')) rasterOptions(tmpdir=paste0(dataPath,'/temp/')) memory.limit(50000) #Listing up all downloaded TIF-files in the data folder rasterData <- list.files(path=dataPath, pattern='.tif$', recursive=F, ignore.case=T, full.names=T) rasterFiles <- basename(rasterData) #Chunk-size for processing (in Pixel) #You can reduce these numbers if you run into RAM-Problems early on # If you run into RAM-problems, reduce to 500 or 250. #Only reduce the numbers if you have memory issues ("Error: cannot allocate vector of size [...]") chwidth <- 1000 chheight <- 1000 ##################STARTING EVI/VCI BLOCK ################## ################## ################## #Listing all EVI rasters and their corresponding pixel reliability data EVIrasterData <- rasterData[grepl('EVI',basename(rasterData))] EVIqc <- rasterData[grepl('pixel_reliability',basename(rasterData))] #Automatically adjusting chunk size for small scenes if needed #Should the study-area be small enough the fit into one chunk, the image will be split into two chunks for the code to work properly exRST <- raster(EVIrasterData[1]) if (as.numeric(ncol(exRST)) <= chwidth || as.numeric(nrow(exRST)) <= chheight){ chwidth <- ceiling(ncol(exRST)/2) } #Parsing all DOY and Years from the filenames DOYs <- unique(substr(basename(EVIrasterData),38,40)) YEARs <- unique(substr(basename(EVIrasterData),34,37)) ##VCI: chunk-wise calculation #Creating output folders if (!file.exists(paste0(dataPath,'/VCI'))) dir.create(paste0(dataPath,'/VCI')) if (!file.exists(paste0(dataPath,'/EVI'))) dir.create(paste0(dataPath,'/EVI')) #Determining chunk-shapefile #Determining chunks (rounding if necessary) h <- ceiling(ncol(exRST)/ceiling(ncol(exRST) / chwidth)) v <- ceiling(nrow(exRST)/ceiling(nrow(exRST) / chheight)) #Creating shapefile for each chunk split <- aggregate(exRST,fact=c(h,v)) split[] <- 1:ncell(split) splitshp <- rasterToPolygons(split) names(splitshp) <- 'shapes' notiles <- ncell(split) #Filter-values for quality masking: "0" and "1" in the pixel reliability bands are accepted as sufficient quality goodValues <- c(0,1) #Masking clouds/snow; Splitting data into chunks for (d in 1:length(DOYs)){ #Filtering all relevant data for this DOY vrasterData <- EVIrasterData[grepl(paste0(DOYs[d],'_aid'),EVIrasterData)] #..and their corresponding pixel reliability data vQC <- EVIqc[grepl(paste0(DOYs[d],'_aid'),EVIqc)] #Reading years of available data vYear <- substr(basename(vrasterData),34,37) for (y in 1:length(vYear)){ viPRE <- raster(vrasterData[y]) #Applying quality mask to each image (if masking was activated) if (cloudmask == 1){ qc <- raster(vQC[y]) viPRE <- overlay(viPRE, qc, fun = function(x, y) { x[!(y %in% goodValues)] <- NA return(x) }) } #####Splitting (masked) data into Chunks for(i in 1:ncell(split)){ ex <- extent(splitshp[splitshp$shapes==i,]) exx <- crop(viPRE,ex) writeRaster(exx,filename=paste0(dataPath,'/temp/',DOYs[d],'_',vYear[y],'_EVICHUNK',formatC(i, width=3, flag='0')),format='GTiff', overwrite=TRUE) } } #(Progress report) print(paste0('Data preparation (VCI) & masking (Step 2 of 6): ',round(d / length(DOYs) * 100, digits=2),'%')) } #Applying VCI calculation for each chunk #List all chunks EVIchunks <- list.files(path=paste0(dataPath,'/temp/'), pattern='_EVICHUNK', recursive=F, ignore.case=T, full.names=T) for (d in 1:length(DOYs)){ for (t in 1:notiles){ #Filtering relevant chunks for this specific date sEVIchunks <-EVIchunks[grepl(DOYs[d],substr(basename(EVIchunks),1,3))] sEVIchunks <- sEVIchunks[grepl(paste0('_EVICHUNK',formatC(t, width=3, flag='0')),sEVIchunks)] #Listing years of data available for each DOY #Checking which years are available for a chunk as not every year might be available for a day (in the southern hemisphere) vvYear <- substr(basename(sEVIchunks),5,8) if (length(sEVIchunks) > 0){ sT <- stack(sEVIchunks) #Removing filler-values from EVI data sT[sT<(-2999)] <- NA #VCI formula application vimax <- stackApply(sT , rep(1, nlayers (sT)), max, na.rm=T) vimin <- stackApply(sT , rep(1, nlayers (sT)), min, na.rm=T) z <- vimax - vimin VCI <- ((sT -vimin)/z)*100 #Writing VCI-chunks for each available year for (y in 1:length(vvYear)){ writeRaster(VCI[[y]],filename=paste0(dataPath,'/temp/',DOYs[d],'_',vvYear[y],'_VCICHUNK',formatC(t, width=3, flag='0')),format='GTiff', overwrite=TRUE) } } } #(Progress report) print(paste0('VCI processing (Step 3 of 6): ',round(d / length(DOYs) * 100, digits=2),'%')) } ##VCI chunk merging and output #Listing all created chunks VCIchunks <- list.files(path=paste0(dataPath,'/temp/'), pattern='_VCICHUNK', recursive=F, ignore.case=T, full.names=T) #Looping through each available DOY_YEAR combination for (y in 1:length(YEARs)){ for (d in 1:length(DOYs)){ #Listing relevant chunks (EVI, VCI) for this specific date (quality-masked EVI data is also exported for optional further use) sVCIchunks <- VCIchunks[grepl(paste0(DOYs[d],'_',YEARs[y],'_'),VCIchunks)] sEVIchunks <- EVIchunks[grepl(paste0(DOYs[d],'_',YEARs[y],'_'),EVIchunks)] #Creating raster-list of the EVI chunks if (length(sEVIchunks) > 0){ if (length(sEVIchunks) > 1){ sMos <- list() for (o in 1:length(sEVIchunks)){ sMos <- append(sMos, raster(sEVIchunks[o])) } #Mosaicking the EVI chunks #"mean"-function is only set as a placeholder for the mosaicing-call to work, the raster-chunks are not overlapping sMos$fun = mean Mos <- do.call(mosaic, sMos) Mos <- Mos*0.0001 #Output of mosaicked, scaled EVI image writeRaster(Mos,filename=paste0(dataPath,'/EVI/',DOYs[d],'_',YEARs[y],'_EVI'),format='GTiff', overwrite=TRUE) }else{ #Fail-safe in case only one chunk is available exp <- raster(sEVIchunks[1]) #Scaling EVI values from source-data exp <- exp*0.0001 writeRaster(exp,filename=paste0(dataPath,'/EVI/',DOYs[d],'_',YEARs[y],'_EVI'),format='GTiff', overwrite=TRUE) } } #Creating raster-list of the VCI chunks if (length(sVCIchunks) > 0){ if (length(sVCIchunks) > 1){ sMos <- list() for (o in 1:length(sVCIchunks)){ sMos <- append(sMos, raster(sVCIchunks[o])) } #Mosaicking the VCI chunks sMos$fun = mean Mos <- do.call(mosaic, sMos) #Output: final VCI mosaic writeRaster(Mos,filename=paste0(dataPath,'/VCI/',DOYs[d],'_',YEARs[y],'_VCI'),format='GTiff', overwrite=TRUE) }else{ #Fail-safe in case only one chunk is available writeRaster(raster(sVCIchunks[1]),filename=paste0(dataPath,'/VCI/',DOYs[d],'_',YEARs[y],'_VCI'),format='GTiff', overwrite=TRUE) } } print(paste0('EVI output & VCI output (Step 4 of 6): ',round((d+(length(DOYs)*(y-1)))/(length(DOYs)*length(YEARs))*100, digits=2),'%')) } } #Removing temp files to free space tmp <- list.files(path=paste0(dataPath,'/temp/'), recursive=F, ignore.case=T, full.names=T) file.remove(tmp) ################## STARTING TIMESAT PREPARATION ################## ################## ################## ################## #Step 1: Changing EVI file-format #Step 2: Building image-list #Step 3: Output TIMESAT Settings-file #List all EVI TIFFs allEVITIF <- list.files(paste0(dataPath,"/EVI/"), full.names=T, pattern=".tif$",recursive=F) #Creating output folders if (!file.exists(paste0(dataPath,'/timesatPREP'))) dir.create(paste0(dataPath,'/timesatPREP')) if (!file.exists(paste0(dataPath,'/forTIMESAT'))) dir.create(paste0(dataPath,'/forTIMESAT')) if (!file.exists(paste0(dataPath,'/timesat'))) dir.create(paste0(dataPath,'/timesat')) #Change file-format to binary for TIMESAT for(i in 1:length(allEVITIF)){ tc <- raster(allEVITIF[i]) #NAs can cause trouble with TIMESAT. Replacing with -999 tc[is.na(tc)] <- -999 #switch filename-convention for correct sorting flname <- paste0(substr(basename(allEVITIF[i]),5,8),"_",substr(basename(allEVITIF[i]),1,3),"_EVI-TS") writeRaster(tc, filename=paste0(dataPath,'/timesatPREP/',flname), format="IDRISI", overwrite=T) #(Progress report) print(paste0("Preparing data for TIMESAT... ", i , "/" , length(allEVITIF))) } #List new, TIMESAT-compatible files allEVITS <- list.files(paste0(dataPath,"/timesatPREP/"), full.names=T, pattern=".rst$",recursive=F) #Create TIMESAT image-list imglist <- c(length(allEVITS),allEVITS) fileCr<-file( paste0(dataPath,'/forTIMESAT/TIMESATlist.txt')) writeLines(imglist,fileCr) close(fileCr) #################Recreate TIMESAT settings-file convention - line by line ################# setlines <- c() #1: Headerline setlines <- c(setlines, "Version: 3.3 ") #2: Jobname setlines <- c(setlines, "RCMDPract %Job_name (no blanks) ") setlines <- c(setlines, "1 %Image /series mode (1/0) ") setlines <- c(setlines, "0 %Trend (1/0) ") setlines <- c(setlines, "0 %Use mask data (1/0) ") #6: Link to Filelist setlines <- c(setlines, paste0(dataPath,"/forTIMESAT/TIMESATlist.txt %Data file list/name ")) setlines <- c(setlines, paste0("dummy %Mask file list/name ")) setlines <- c(setlines, paste0("3 %Image file type ")) setlines <- c(setlines, paste0("0 %Byte order (1/0) ")) #7: File dimensions setlines <- c(setlines, paste0(nrow(raster(allEVITS[1])), " ", ncol(raster(allEVITS[1])) ," %File dimension (nrow ncol) ")) #Processing extent (=whole image) setlines <- c(setlines, paste0("1 ", nrow(raster(allEVITS[1])), " 1 ", ncol(raster(allEVITS[1])) ," %Processing window (start row stop row start col stop col) ")) #Number of years and images per year #Correcting the number of seasons, as seasons start in July of one and end in June of the next year in the southern hemisphere if (hemisphere == 0){ ycorr <- 1 }else{ ycorr <- 0 } setlines <- c(setlines, paste0(length(YEARs)-ycorr," 23 %No. years and no. points per year ")) #Valid value-range (0 to 1 for EVI) setlines <- c(setlines, paste0("0 1 %Valid data range (lower upper) ")) #More settings... (mostly not relevant for this practice but needed for the TIMESAT settings-file) setlines <- c(setlines, paste0("-1e+06 1e+06 1 %Mask range 1 and weight ")) setlines <- c(setlines, paste0("-1e+06 1e+06 1 %Mask range 2 and weight ")) setlines <- c(setlines, paste0("-1e+06 1e+06 1 %Mask range 3 and weight ")) setlines <- c(setlines, paste0("0 %Amplitude cutoff value ")) setlines <- c(setlines, paste0("0 %Print functions and weights (1/0) ")) setlines <- c(setlines, paste0("1 1 0 %Output files (1/0 1/0 1/0) ")) setlines <- c(setlines, paste0("0 %Use land cover (1/0) ")) setlines <- c(setlines, paste0(" %Name of landcover file ")) setlines <- c(setlines, paste0("0 %Spike method ")) setlines <- c(setlines, paste0("2 %Spike value ")) setlines <- c(setlines, paste0("0 %Spatial half dimension ")) setlines <- c(setlines, paste0("1 %No. of landcover classes ")) setlines <- c(setlines, paste0("************ ")) setlines <- c(setlines, paste0("1 %Land cover code for class 1 ")) setlines <- c(setlines, paste0("1 %Season parameter ")) setlines <- c(setlines, paste0("1 %No. of envelope iterations (1-3) ")) setlines <- c(setlines, paste0("2 %Adaptation strength (1-10) ")) setlines <- c(setlines, paste0("0 -99999 %Force minimum (1/0) and value ")) setlines <- c(setlines, paste0("2 %Fitting method (1-4) ")) setlines <- c(setlines, paste0("1 %Weight update method ")) setlines <- c(setlines, paste0("4 %Window size for Sav-Gol. ")) setlines <- c(setlines, paste0("1 %Spatio-temporal smoothing factor. ")) setlines <- c(setlines, paste0("2 %Spatio-temporal adaptation factor. ")) setlines <- c(setlines, paste0("1 %Season start method ")) #Thresholds for season start/stop. Using standard-values. TSthr <- 0.5 setlines <- c(setlines, paste0(TSthr, " ", TSthr," %Season start / stop values ")) #Writing TIMESAT settingsfile fileCr<-file( paste0(dataPath,'/forTIMESAT/TIMESAT_input.set')) writeLines(setlines,fileCr) close(fileCr) #Part I finished if (length(allEVITS) > 0){ print("DONE - Please proceed in TIMESAT. Do NOT close this R-Session") } ######################################################### PART II ################## STARTING TIMESAT POST-PROCESSING ################## ################## ################## ################## ################## ################## ################## ################## #Reading raw TIMESAT output (Variant B; Timesat-fortran tools) path_tpaFile <-paste0(TSpath,"/RCMDPract_TS.tpa") #Path to TIMESAT-seas2img-tool for converting pTSpath <- gsub("compiled/Win64/","compiled/Win64", TSpath) seasImg <- gsub("compiled/Win64","timesat_fortran/tools/TSF_seas2img", pTSpath) #TIMESAT will not calculate phenology for the very first season, hence the following correction-variable #For more information, check the TIMESAT manual: http://web.nateko.lu.se/timesat/docs/TIMESAT32_software_manual.pdf ; Page 22 if (hemisphere == 1){ corr <- 1 }else{ corr <- 2 } #SeasonIndicators in .tpa file: 1 = SoS, 2 = EoS, Peaktime = 5 noSeasons <- length(YEARs) - corr setwd(paste0(dataPath,"/temp/")) base_image = exRST #Looping through season-indocators and writing output for each for (d in 1:3){ SeasonIndicator <- d #Peaktime is indicator #5 if (d == 3){ SeasonIndicator <- 5 } if(SeasonIndicator == 1){ descrinfo <- "SoS" }else if(SeasonIndicator == 2){ descrinfo <- "EoS" }else if(SeasonIndicator == 5){ descrinfo <- "PoS" } #Loop through seasons, create output for each for (s in 1:noSeasons){ startIX <- 1 + (23*(s)) endIX <- 23 + (23*(s)) print(paste0("Reading/converting ", descrinfo, " ", s,"/",noSeasons,"...")) #Calling seas2img via command-prompt #(Parameters: infile, seaspar, datemin, datemax, misseason, misspix, nameout, filetype) system(paste0("\"",seasImg,"\" \"",path_tpaFile,"\" ",SeasonIndicator, " ", startIX," ",endIX, " 0 0 NSE",SeasonIndicator,"_",s," 2" ), wait=TRUE, show.output.on.console = F) #Convert binary to tiff r <- bintiff(paste0(dataPath,"/temp/NSE",SeasonIndicator,"_",s,"_season1"),base_image)%%23 writeRaster(r, filename = paste0(dataPath,"/timesat/",as.numeric(YEARs[s])+1,"_",descrinfo), format="GTiff", overwrite=T) } } #Parsing Tiffs of Start, Peak, End of Season timesatRasterALL <- list.files(paste0(dataPath,"/timesat/"), pattern=".tif$", recursive=F, full.names=T) timesatRasterSOS <- stack(timesatRasterALL[grepl("_SoS",timesatRasterALL)]) timesatRasterPOS <- stack(timesatRasterALL[grepl("_PoS",timesatRasterALL)]) timesatRasterEOS <- stack(timesatRasterALL[grepl("_EoS",timesatRasterALL)]) ###Checking for integrity of seasonal metrics #(Prepare a stack of original data for later) allDetSeas = timesatRasterSOS #Filter out TIMESAT-NAs timesatRasterSOS[timesatRasterSOS == 0] <- NA timesatRasterPOS[timesatRasterPOS == 0] <- NA timesatRasterEOS[timesatRasterEOS == 0] <- NA ##Check for difference between Start, Peak, End to remove unreliable data (in days) # thresh1: minimum allowed difference between peak of the season (POS) and start of the season (SOS) in days thresh1 <- 20 # thresh2: maximum allowed difference between start of the season (SOS) and end of the season (EOS) or the equivalent length of the season (LOS) in days thresh2 <- 240 # thresh3: minimum allowed difference between end of the season (EOS) and peak of the season (POS) in days thresh3 <- 10 #Convert to image-index thresh1 <- thresh1/16 thresh2 <- thresh2/16 thresh3 <- thresh3/16 #Detect all pixels for which less than 75% of all possible seasons were detected (filters out heavily mixed pixels with less vegetation) allDetSeas[allDetSeas == 0] <- NA allDetSeas <- allDetSeas/allDetSeas nSeasons <- calc(allDetSeas, fun = sum, na.rm=T) nSeasons[nSeasons<(0.75*length(YEARs)-corr)] <- NA #Binarize the mask unrelpxmask <- nSeasons/nSeasons #Applying checks and masks for(i in 1:nlayers(timesatRasterSOS)){ diffT1 <- timesatRasterPOS[[i]] - timesatRasterSOS[[i]] diffT2 <- timesatRasterEOS[[i]] - timesatRasterSOS[[i]] diffT3 <- timesatRasterEOS[[i]] - timesatRasterPOS[[i]] #Preparing Masks diffT1[diffT1thresh2] <- NA diffT3[diffT3 AnalysisPeriod[1] && sy < AnalysisPeriod[2])) || exists("AnalysisPeriod") == F){ if (!(sy == max(as.numeric(YEARs)) && hemisphere == 0)){ #Check for hemisphere, load VCI data for the season if (hemisphere == 1){ lyVCI <- allVCI[grepl(sy,allVCI)] dscr <- sy }else{ #Load correct data if southern hemisphere was selected (seasonality shift) lyVCI <- c(allVCI[grepl(sy,allVCI)],allVCI[grepl(sy+1,allVCI)]) #Adjusting output filenames if southern hemisphere was selected if (sy == min(as.numeric(YEARs))){ lyVCI <- lyVCI[1:23] }else{ lyVCI <- lyVCI[13:35] } #Adjusting output filenames if southern hemisphere was selected dscr <- paste0(sy,"_",sy+1) } #Stacking all VCI for the year yVCI <- stack(lyVCI) #Creating VCI means for the three "blocks" of seasonal timing #####Start of season variables BlockA <- stack() #Iterating through all 23 images of the season for (i in 1:23){ #Read start-of-season dates s <- SoS #Read dates up to 16 days before peak of season e <- (PoS - 1) #Check if current iteration lies within the seasonal dates of each pixel s[s > i] <- NA e[e < i] <- NA #Creating a binary mask and apply to the VCI image m <- s*e v <- yVCI[[i]]*(m/m) #Add layer to stack BlockA <- stack(BlockA, v) } #Create mean of this time-block BlockA <- calc(BlockA, fun=mean, na.rm=T) #####Peak of season variables #See comments above BlockB <- stack() for (i in 1:23){ #Extract period of current phenology analysis s <- PoS e <- PoS s[s > i] <- NA e[e < i] <- NA m <- s*e v <- yVCI[[i]]*(m/m) BlockB <- stack(BlockB, v) } BlockB <- calc(BlockB, fun=mean, na.rm=T) #####End of season variables #See comments above BlockC <- stack() for (i in 1:23){ s <- (PoS + 1) e <- EoS s[s > i] <- NA e[e < i] <- NA m <- s*e v <- yVCI[[i]]*(m/m) BlockC <- stack(BlockC, v) } BlockC <- calc(BlockC, fun=mean, na.rm=T) #Weighting: 5-7-1 start-peak-end of season mean #Result: weighted VCI by season WeightedStack <- stack(BlockA,BlockA,BlockA,BlockA,BlockA,BlockB,BlockB,BlockB,BlockB,BlockB,BlockB,BlockB,BlockC) WeightedVCI <- calc(WeightedStack, fun=mean, na.rm=T) #Output: five classes cWeightedVCI <- reclassify(WeightedVCI, c(-1, 10, 4, 10, 20, 3, 20, 30, 2, 30,40, 1,40,101,0)) #Live-plotting of results cc <- c("#d4d4d4","#ffff00","#ff7400","#ff0000","#ff0083") plot(cWeightedVCI, main=paste0('Output: Season ',dscr, " (Classes D0~D4)"),col=cc) #Mask to crop-layer (if defined) if (exists("cropLayer")){ cropFile <- readOGR(dsn = pathCropLayer, layer = cropLayer) cWeightedVCI <- crop(cWeightedVCI,cropFile) cWeightedVCI<- mask(cWeightedVCI, cropFile) } writeRaster(cWeightedVCI,filename=paste0(dataPath,"/weighted/",dscr,"_FiveCl.tif"),format="GTiff",overwrite=T) #Output: three classes dWeightedVCI <- reclassify(WeightedVCI, c(-1, 10, 2, 10, 40, 1, 40, 101, 0)) if (exists("cropLayer")){ cropFile <- readOGR(dsn = pathCropLayer, layer = cropLayer) dWeightedVCI <- crop(dWeightedVCI,cropFile) dWeightedVCI<- mask(dWeightedVCI, cropFile) } writeRaster(dWeightedVCI,filename=paste0(dataPath,"/weighted/",dscr,"_ThreeCl.tif"),format="GTiff",overwrite=T) #(Progress report) print(paste0('Weighting indices (Step 6 of 6): ',round((sy-as.numeric(min(YEARs))) / (as.numeric(max(YEARs))-as.numeric(min(YEARs))) * 100, digits=2),'%')) } } } #Deleting all temp files tmp <- list.files(path=paste0(dataPath,'/temp/'), recursive=F, ignore.case=T, full.names=T) file.remove(tmp) #Finished print(paste0(Sys.time(),' +++++ Finished. You will find the data in the subfolders \'VCI\' and \'weighted\' at ', dataPath,"."))