# Cloud and Shadow Detection Analysis # This script analyzes cloud and shadow detection parameters using the diagnostic GeoTIFF files # and polygon-based classification to help optimize the detection algorithms # Load required packages library(terra) library(sf) library(dplyr) library(ggplot2) library(reshape2) library(exactextractr) # For accurate polygon extraction # Define diagnostic directory diagnostic_dir <- "C:/Users/timon/Resilience BV/4020 SCane ESA DEMO - Documenten/General/4020 SCDEMO Team/4020 TechnicalData/WP3/smartcane/cloud_mask_diagnostics_20250515-164357" # Simple logging function for this standalone script safe_log <- function(message, level = "INFO") { cat(paste0("[", level, "] ", message, "\n")) } safe_log("Starting cloud detection analysis on diagnostic rasters") # Load all diagnostic rasters safe_log("Loading diagnostic raster files...") # Load original bands red_band <- terra::rast(file.path(diagnostic_dir, "diagnostic_red_band.tif")) green_band <- terra::rast(file.path(diagnostic_dir, "diagnostic_green_band.tif")) blue_band <- terra::rast(file.path(diagnostic_dir, "diagnostic_blue_band.tif")) nir_band <- terra::rast(file.path(diagnostic_dir, "diagnostic_nir_band.tif")) # Load derived indices brightness <- terra::rast(file.path(diagnostic_dir, "diagnostic_brightness.tif")) ndvi <- terra::rast(file.path(diagnostic_dir, "diagnostic_ndvi.tif")) blue_ratio <- terra::rast(file.path(diagnostic_dir, "diagnostic_blue_ratio.tif")) green_nir_ratio <- terra::rast(file.path(diagnostic_dir, "diagnostic_green_nir_ratio.tif")) ndwi <- terra::rast(file.path(diagnostic_dir, "diagnostic_ndwi.tif")) # Load cloud detection parameters bright_pixels <- terra::rast(file.path(diagnostic_dir, "param_bright_pixels.tif")) very_bright_pixels <- terra::rast(file.path(diagnostic_dir, "param_very_bright_pixels.tif")) blue_dominant <- terra::rast(file.path(diagnostic_dir, "param_blue_dominant.tif")) low_ndvi <- terra::rast(file.path(diagnostic_dir, "param_low_ndvi.tif")) green_dominant_nir <- terra::rast(file.path(diagnostic_dir, "param_green_dominant_nir.tif")) high_ndwi <- terra::rast(file.path(diagnostic_dir, "param_high_ndwi.tif")) # Load shadow detection parameters dark_pixels <- terra::rast(file.path(diagnostic_dir, "param_dark_pixels.tif")) very_dark_pixels <- terra::rast(file.path(diagnostic_dir, "param_very_dark_pixels.tif")) low_nir <- terra::rast(file.path(diagnostic_dir, "param_low_nir.tif")) shadow_ndvi <- terra::rast(file.path(diagnostic_dir, "param_shadow_ndvi.tif")) low_red_to_blue <- terra::rast(file.path(diagnostic_dir, "param_low_red_to_blue.tif")) high_blue_to_nir_ratio <- terra::rast(file.path(diagnostic_dir, "param_high_blue_to_nir_ratio.tif")) blue_nir_ratio_raw <- terra::rast(file.path(diagnostic_dir, "param_blue_nir_ratio_raw.tif")) red_blue_ratio_raw <- terra::rast(file.path(diagnostic_dir, "param_red_blue_ratio_raw.tif")) # Load edge detection parameters brightness_focal_sd <- terra::rast(file.path(diagnostic_dir, "param_brightness_focal_sd.tif")) edge_pixels <- terra::rast(file.path(diagnostic_dir, "param_edge_pixels.tif")) # Load final masks cloud_mask <- terra::rast(file.path(diagnostic_dir, "mask_cloud.tif")) shadow_mask <- terra::rast(file.path(diagnostic_dir, "mask_shadow.tif")) combined_mask <- terra::rast(file.path(diagnostic_dir, "mask_combined.tif")) dilated_mask <- terra::rast(file.path(diagnostic_dir, "mask_dilated.tif")) safe_log("Raster data loaded successfully") # Try to read the classification polygons if they exist tryCatch({ # Check if the classes.geojson file exists in the diagnostic directory classes_file <- file.path(diagnostic_dir, "classes.geojson") # If no classes file in this directory, look for the most recent one if (!file.exists(classes_file)) { # Look in parent directory for most recent cloud_mask_diagnostics folder potential_dirs <- list.dirs(path = dirname(diagnostic_dir), full.names = TRUE, recursive = FALSE) # Filter for diagnostic directories and find the most recent one that has classes.geojson diagnostic_dirs <- potential_dirs[grepl("cloud_mask_diagnostics_", potential_dirs)] for (dir in rev(sort(diagnostic_dirs))) { # Reverse sort to get newest first potential_file <- file.path(dir, "classes.geojson") if (file.exists(potential_file)) { classes_file <- potential_file break } } } # Check if we found a classes file if (file.exists(classes_file)) { safe_log(paste("Using classification polygons from:", classes_file)) # Load the classification polygons classifications <- sf::st_read(classes_file, quiet = TRUE) %>% rename(class = type) # Remove empty polygons classifications <- classifications[!sf::st_is_empty(classifications), ] # Create a list to store all rasters we want to extract values from extraction_rasters <- list( # Original bands red = red_band, green = green_band, blue = blue_band, nir = nir_band, # Derived indices brightness = brightness, ndvi = ndvi, blue_ratio = blue_ratio, green_nir_ratio = green_nir_ratio, ndwi = ndwi, # Cloud detection parameters bright_pixels = terra::ifel(bright_pixels, 1, 0), very_bright_pixels = terra::ifel(very_bright_pixels, 1, 0), blue_dominant = terra::ifel(blue_dominant, 1, 0), low_ndvi = terra::ifel(low_ndvi, 1, 0), green_dominant_nir = terra::ifel(green_dominant_nir, 1, 0), high_ndwi = terra::ifel(high_ndwi, 1, 0), # Shadow detection parameters dark_pixels = terra::ifel(dark_pixels, 1, 0), very_dark_pixels = terra::ifel(very_dark_pixels, 1, 0), low_nir = terra::ifel(low_nir, 1, 0), shadow_ndvi = terra::ifel(shadow_ndvi, 1, 0), low_red_to_blue = terra::ifel(low_red_to_blue, 1, 0), high_blue_to_nir_ratio = terra::ifel(high_blue_to_nir_ratio, 1, 0), blue_nir_ratio_raw = (blue_band / (nir_band + 0.01)), red_blue_ratio_raw = (red_band / (blue_band + 0.01)), # Edge detection parameters brightness_focal_sd = brightness_focal_sd, edge_pixels = terra::ifel(edge_pixels, 1, 0), # Final masks cloud_mask = terra::ifel(cloud_mask, 1, 0), shadow_mask = terra::ifel(shadow_mask, 1, 0), combined_mask = terra::ifel(combined_mask, 1, 0), dilated_mask = terra::ifel(dilated_mask, 1, 0) ) # Create a stack of all rasters extraction_stack <- terra::rast(extraction_rasters) # User-provided simplified extraction for mean statistics per polygon pivot_stats_sf <- cbind( classifications, round(exactextractr::exact_extract(extraction_stack, classifications, fun = "mean", progress = FALSE), 2) ) %>% sf::st_drop_geometry() # Convert to a regular data frame for easier downstream processing all_stats <- sf::st_drop_geometry(pivot_stats_sf) # Ensure 'class_name' column exists, if not, use 'class' as 'class_name' if (!("class_name" %in% colnames(all_stats)) && ("class" %in% colnames(all_stats))) { all_stats$class_name <- all_stats$class if (length(valid_class_ids) == 0) { safe_log("No valid (non-NA) class IDs found for exactextractr processing.", "WARNING") } for (class_id in valid_class_ids) { # Subset polygons for this class class_polygons_sf <- classifications[which(classifications$class == class_id), ] # Use which for NA-safe subsetting if (nrow(class_polygons_sf) == 0) { safe_log(paste("Skipping empty class (no polygons after filtering):", class_id), "WARNING") next } tryCatch({ safe_log(paste("Processing class:", class_id)) # Check if the polygon overlaps with the raster extent (check based on the combined extent of class polygons) rast_extent <- terra::ext(extraction_stack) poly_extent <- sf::st_bbox(class_polygons_sf) if (poly_extent["xmin"] > rast_extent["xmax"] || poly_extent["xmax"] < rast_extent["xmin"] || poly_extent["ymin"] > rast_extent["ymax"] || poly_extent["ymax"] < rast_extent["ymin"]) { safe_log(paste("Skipping class that doesn't overlap with raster:", class_id), "WARNING") next } # exact_extract will process each feature in class_polygons_sf # and return a list of data frames (one per feature) per_polygon_stats_list <- exactextractr::exact_extract( extraction_stack, class_polygons_sf, function(values, coverage_fraction) { # Filter pixels by coverage (e.g., >50% of the pixel is covered by the polygon) valid_pixels_idx <- coverage_fraction > 0.5 df_filtered <- values[valid_pixels_idx, , drop = FALSE] if (nrow(df_filtered) == 0) { # If no pixels meet coverage, return a data frame with NAs # to maintain structure, matching expected column names. # Column names are derived from the extraction_stack stat_cols <- paste0(names(extraction_stack), "_mean") na_df <- as.data.frame(matrix(NA_real_, nrow = 1, ncol = length(stat_cols))) names(na_df) <- stat_cols return(na_df) } # Calculate mean for each band (column in df_filtered) stats_per_band <- lapply(names(df_filtered), function(band_name) { col_data <- df_filtered[[band_name]] if (length(col_data) > 0 && sum(!is.na(col_data)) > 0) { mean_val <- mean(col_data, na.rm = TRUE) return(setNames(mean_val, paste0(band_name, "_mean"))) } else { return(setNames(NA_real_, paste0(band_name, "_mean"))) } }) # Combine all stats (named values) into a single named vector then data frame return(as.data.frame(t(do.call(c, stats_per_band)))) }, summarize_df = FALSE, # Important: get a list of DFs, one per polygon force_df = TRUE # Ensure the output of the summary function is treated as a DF ) # Combine all stats for this class if we have any if (length(per_polygon_stats_list) > 0) { # per_polygon_stats_list is now a list of single-row data.frames class_stats_df <- do.call(rbind, per_polygon_stats_list) # Remove rows that are all NA (from polygons with no valid pixels) class_stats_df <- class_stats_df[rowSums(is.na(class_stats_df)) < ncol(class_stats_df), ] if (nrow(class_stats_df) > 0) { # Add class information class_stats_df$class <- class_id # Get class_name from the first polygon (assuming it's consistent for the class_id) # Ensure class_polygons_sf is not empty before accessing class_name if ("class_name" %in% names(class_polygons_sf) && nrow(class_polygons_sf) > 0) { class_stats_df$class_name <- as.character(class_polygons_sf$class_name[1]) } else { class_stats_df$class_name <- as.character(class_id) # Fallback } # Add to overall results all_stats <- rbind(all_stats, class_stats_df) safe_log(paste("Successfully extracted data for", nrow(class_stats_df), "polygons in class", class_id)) } else { safe_log(paste("No valid data extracted for class (after NA removal):", class_id), "WARNING") } } else { safe_log(paste("No data frames returned by exact_extract for class:", class_id), "WARNING") } }, error = function(e) { safe_log(paste("Error processing class", class_id, "with exact_extract:", e$message), "ERROR") }) } # Save the extracted statistics to a CSV file if (nrow(all_stats) > 0) { stats_file <- file.path(diagnostic_dir, "class_spectral_stats_mean.csv") # New filename write.csv(all_stats, stats_file, row.names = FALSE) safe_log(paste("Saved MEAN spectral statistics by class to:", stats_file)) } else { safe_log("No statistics were generated to save.", "WARNING") } # Calculate optimized thresholds for cloud/shadow detection (using only _mean columns) if (nrow(all_stats) > 0 && ncol(all_stats) > 2) { # Check if all_stats has data and parameter columns threshold_results <- data.frame( parameter = character(), best_threshold = numeric(), direction = character(), target_class = character(), vs_class = character(), accuracy = numeric(), stringsAsFactors = FALSE ) # Define class pairs to analyze class_pairs <- list( # Cloud vs various surfaces c("cloud", "crop"), c("cloud", "bare_soil_dry"), c("cloud", "bare_soil_wet"), # Shadow vs various surfaces c("shadow_over_crop", "crop"), c("shadow_over_bare_soil", "bare_soil_dry"), c("shadow_over_bare_soil", "bare_soil_wet") ) # For now, let's assume all _mean parameters derived from extraction_rasters are relevant for clouds/shadows # This part might need more specific logic if you want to distinguish cloud/shadow params cloud_params <- grep("_mean$", names(extraction_rasters), value = TRUE) params logic # Parameters to analyze for shadows (now only _mean versions)tatistics by class to:", stats_file)) shadow_params <- cloud_params # Simplified: using the same set for now, adjust if specific shadow params are needed lds for cloud/shadow detection # Find optimal thresholdsframe( if (length(class_pairs) > 0 && (length(cloud_params) > 0 || length(shadow_params) > 0)) { for (pair in class_pairs) {c(), target_class <- pair[1](), vs_class <- pair[2](), vs_class = character(), # Select appropriate parameters based on whether we're analyzing clouds or shadows accuracy = numeric(), if (grepl("cloud", target_class)) { params_to_check <- cloud_params } else { params_to_check <- shadow_paramsto analyze } # For each parameter, find the best threshold to separate the classesc("cloud", "crop"), for (param in params_to_check) { if (param %in% colnames(all_stats)) { # Get values for both classes target_values <- all_stats[all_stats$class_name == target_class, param] vs_values <- all_stats[all_stats$class_name == vs_class, param] c("shadow_over_crop", "crop"), c("shadow_over_bare_soil", "bare_soil_dry"), if (length(target_values) > 0 && length(vs_values) > 0) {_soil_wet") # Calculate mean and sd for both classes target_mean <- mean(target_values, na.rm = TRUE) target_sd <- sd(target_values, na.rm = TRUE)# Parameters to analyze for clouds vs_mean <- mean(vs_values, na.rm = TRUE), "blue_ratio_mean", "ndvi_mean", vs_sd <- sd(vs_values, na.rm = TRUE) # Determine if higher or lower values indicate the target classshadow_params <- c("brightness_mean", "dark_pixels_mean", "very_dark_pixels_mean", if (target_mean > vs_mean) {r_mean", "shadow_ndvi_mean", "blue_nir_ratio_raw_mean", direction <- ">"_ratio_raw_mean", "low_red_to_blue_mean") # Try different thresholds potential_thresholds <- seq(olds min(min(target_values, na.rm = TRUE), vs_mean + 0.5 * vs_sd),r (pair in class_pairs) { max(max(vs_values, na.rm = TRUE), target_mean - 0.5 * target_sd), length.out = 20 ) } else { appropriate parameters based on whether we're analyzing clouds or shadows direction <- "<"{ # Try different thresholds params_to_check <- cloud_params potential_thresholds <- seq(} else { min(min(vs_values, na.rm = TRUE), target_mean + 0.5 * target_sd), max(max(target_values, na.rm = TRUE), vs_mean - 0.5 * vs_sd), length.out = 20 )st threshold to separate the classes } # Calculate accuracy for each threshold# Get values for both classes best_accuracy <- 0_class, param] best_threshold <- ifelse(direction == ">", min(potential_thresholds), max(potential_thresholds))e == vs_class, param] for (threshold in potential_thresholds) {ues) > 0) { if (direction == ">") { correct_target <- sum(target_values > threshold, na.rm = TRUE)a.rm = TRUE) correct_vs <- sum(vs_values <= threshold, na.rm = TRUE)target_sd <- sd(target_values, na.rm = TRUE) } else { vs_mean <- mean(vs_values, na.rm = TRUE) correct_target <- sum(target_values < threshold, na.rm = TRUE) correct_vs <- sum(vs_values >= threshold, na.rm = TRUE) } er values indicate the target class total_target <- length(target_values) total_vs <- length(vs_values) accuracy <- (correct_target + correct_vs) / (total_target + total_vs)lds <- seq( min(min(target_values, na.rm = TRUE), vs_mean + 0.5 * vs_sd), if (accuracy > best_accuracy) {max(vs_values, na.rm = TRUE), target_mean - 0.5 * target_sd), best_accuracy <- accuracy0 best_threshold <- threshold } } # Add to resultslds <- seq( threshold_results <- rbind(threshold_results, data.frame( min(min(vs_values, na.rm = TRUE), target_mean + 0.5 * target_sd), parameter = gsub("_mean", "", param), max(max(target_values, na.rm = TRUE), vs_mean - 0.5 * vs_sd), best_threshold = best_threshold, length.out = 20 direction = direction, target_class = target_class, vs_class = vs_class, accuracy = best_accuracy,# Calculate accuracy for each threshold stringsAsFactors = FALSE ))direction == ">", min(potential_thresholds), max(potential_thresholds)) } } }ction == ">") { } } else { # Save threshold results correct_target <- sum(target_values < threshold, na.rm = TRUE) thresholds_file <- file.path(diagnostic_dir, "optimal_thresholds.csv")shold, na.rm = TRUE) write.csv(threshold_results, thresholds_file, row.names = FALSE) safe_log(paste("Saved optimal threshold recommendations to:", thresholds_file)) # Generate box plots for key parameters to visualize class differencestotal_vs <- length(vs_values) if (requireNamespace("ggplot2", quietly = TRUE) && nrow(all_stats) > 0) { # Reshape data for plotting (only _mean columns) + correct_vs) / (total_target + total_vs) mean_cols <- grep("_mean$", colnames(all_stats), value = TRUE) if (length(mean_cols) > 0) {f (accuracy > best_accuracy) { plot_data <- reshape2::melt(all_stats, best_accuracy <- accuracy id.vars = c("class", "class_name"), best_threshold <- threshold measure.vars = mean_cols, # Use only _mean columns variable.name = "parameter", value.name = "value") # Create directory for plotsnd(threshold_results, data.frame( plots_dir <- file.path(diagnostic_dir, "class_plots"), param), dir.create(plots_dir, showWarnings = FALSE, recursive = TRUE)t_threshold, # Create plots for selected key parameters (ensure they are _mean versions)ass, # Adjust key_params to reflect the new column names (e.g., "brightness_mean")vs_class = vs_class, key_params_plot <- intersect(c( accuracy = best_accuracy, "brightness_mean", "ndvi_mean", "blue_ratio_mean", "ndwi_mean", stringsAsFactors = FALSE "blue_nir_ratio_raw_mean", "red_blue_ratio_raw_mean" )) ), mean_cols) # Ensure these params exist } } for (param in key_params_plot) { # param_data <- plot_data[plot_data$parameter == param,] # Exact match for parameter # No, grepl was fine if plot_data only contains _mean parameters now. # Let's ensure plot_data only has the _mean parameters for simplicity here. param_data <- plot_data[plot_data$parameter == param, ]thresholds_file <- file.path(diagnostic_dir, "optimal_thresholds.csv") if (nrow(param_data) > 0) {file)) param_name <- gsub("_mean", "", param) o visualize class differences p <- ggplot2::ggplot(param_data, ggplot2::aes(x = class_name, y = value, fill = class_name)) +s) > 0) { ggplot2::geom_boxplot() + ggplot2::theme_minimal() + ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1)) + id.vars = c("class", "class_name"), ggplot2::labs(ariable.name = "parameter", title = paste("Distribution of", param_name, "by Land Cover Class"), x = "Class", y = param_name,# Create directory for plots fill = "Class"ass_plots") )_dir, showWarnings = FALSE, recursive = TRUE) # Save the plot plot_file <- file.path(plots_dir, paste0("boxplot_", param_name, ".png"))ey_params <- c( ggplot2::ggsave(plot_file, p, width = 10, height = 6, dpi = 150) "brightness_mean", "ndvi_mean", "blue_ratio_mean", "ndwi_mean", }, "red_blue_ratio_raw_mean" } # Create a summary plot showing multiple parameters summary_data <- plot_data[plot_data$parameter %in% ram_data <- plot_data[grepl(param, plot_data$parameter),] c("brightness_mean", "ndvi_mean", "blue_nir_ratio_raw_mean", "red_blue_ratio_raw_mean"),] "", param) if (nrow(summary_data) > 0) {= class_name)) + # Clean up parameter names for displayboxplot() + summary_data$parameter <- gsub("_mean$", "", summary_data$parameter) # Remove _mean suffix for display summary_data$parameter <- gsub("_raw$", "", summary_data$parameter) # Keep this if _raw_mean was a thing(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1)) + # Create faceted plot"Distribution of", param_name, "by Land Cover Class"), p <- ggplot2::ggplot(summary_data, x = "Class", ggplot2::aes(x = class_name, y = value, fill = class_name)) + y = param_name, ggplot2::geom_boxplot() +ss" ggplot2::facet_wrap(~parameter, scales = "free_y") + ggplot2::theme_minimal() + ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1)) + # Save the plot ggplot2::labs( plot_file <- file.path(plots_dir, paste0("boxplot_", param_name, ".png")) title = "Key Spectral Parameters by Land Cover Class", ggplot2::ggsave(plot_file, p, width = 10, height = 6, dpi = 150) x = "Class", y = "Value", fill = "Class" ) summary_data <- plot_data[plot_data$parameter %in% # Save the summary plot"brightness_mean", "ndvi_mean", summary_file <- file.path(plots_dir, "spectral_parameters_summary.png")atio_raw_mean", "red_blue_ratio_raw_mean"),] ggplot2::ggsave(summary_file, p, width = 12, height = 8, dpi = 150) } # Clean up parameter names for display safe_log(paste("Generated spectral parameter plots in:", plots_dir))r <- gsub("_mean", "", summary_data$parameter) }w", "", summary_data$parameter) } else { safe_log("Package 'exactextractr' not available. Install it for more accurate polygon extraction.", "WARNING") # Fall back to simple extraction using terra (calculating only mean)::aes(x = class_name, y = value, fill = class_name)) + class_stats <- data.frame() _wrap(~parameter, scales = "free_y") + valid_class_names_fallback <- unique(classifications$class_name) valid_class_names_fallback <- valid_class_names_fallback[!is.na(valid_class_names_fallback)](axis.text.x = ggplot2::element_text(angle = 45, hjust = 1)) + if (length(valid_class_names_fallback) == 0) {pectral Parameters by Land Cover Class", safe_log("No valid (non-NA) class names found for fallback terra::extract processing.", "WARNING") x = "Class", } y = "Value", for (class_name_fb in valid_class_names_fallback) { class_polygons_fb <- classifications[which(classifications$class_name == class_name_fb), ] # Save the summary plot if(nrow(class_polygons_fb) == 0) next summary_file <- file.path(plots_dir, "spectral_parameters_summary.png") ) class_vect_fb <- terra::vect(class_polygons_fb) } # Extract values for each raster for (i in seq_along(extraction_rasters)) {} raster_name <- names(extraction_rasters)[i] # terra::extract returns a data.frame with ID and layer valuesractr' not available. Install it for more accurate polygon extraction.", "WARNING") # For multiple polygons, it will have multiple rows per polygon if ID is not unique # We need to aggregate per polygon, then per class if not already handled by exact_extract style # However, for simplicity here, let's assume terra::extract gives one value per polygon for the mean # This part of fallback might need more robust aggregation if polygons are complex r (class_name in unique(classifications$class_name)) { # A more robust terra::extract approach for means per polygon:s[classifications$class_name == class_name, ] extracted_values_list <- terra::extract(extraction_rasters[[i]], class_vect_fb, fun = mean, na.rm = TRUE, ID = FALSE) # extracted_values_list will be a data.frame with one column (the layer) and rows corresponding to polygons if (nrow(extracted_values_list) > 0 && ncol(extracted_values_list) > 0) {r (i in seq_along(extraction_rasters)) { # Average over all polygons in this class for this rastertraction_rasters)[i] mean_val_for_class <- mean(extracted_values_list[[1]], na.rm = TRUE)ct(extraction_rasters[[i]], class_vect) if (!is.na(mean_val_for_class)) { stats_row <- data.frame( class_name = class_name_fb, # Using class_name as the identifier here parameter = paste0(raster_name, "_mean"), value = mean_val_for_class), ) TRUE), class_stats <- rbind(class_stats, stats_row) sd = sd(values[,2], na.rm = TRUE), } min = min(values[,2], na.rm = TRUE), } } ) } class_stats <- rbind(class_stats, stats) # Save the statistics (if any were generated) } if(nrow(class_stats) > 0) { # Reshape class_stats from long to wide for consistency if needed, or save as is. # For now, save as long format. stats_file <- file.path(diagnostic_dir, "class_spectral_stats_simple_mean_long.csv") write.csv(class_stats, stats_file, row.names = FALSE) stats_file <- file.path(diagnostic_dir, "class_spectral_stats_simple.csv") safe_log(paste("Saved simple MEAN (long format) spectral statistics by class to:", stats_file)) write.csv(class_stats, stats_file, row.names = FALSE) } else {e spectral statistics by class to:", stats_file)) safe_log("No statistics generated by fallback method.", "WARNING") } }ve RMarkdown generation # Remove RMarkdown generation # safe_log("RMarkdown report generation has been removed as per user request.") NING") } else {} safe_log("No classification polygons file (classes.geojson) found. Skipping spectral analysis.", "WARNING")}, error = function(e) { }cessing or spectral analysis:", e$message), "ERROR") }, error = function(e) {}) safe_log(paste("Error in classification polygon processing or spectral analysis:", e$message), "ERROR") }) detection analysis script finished.") safe_log("Cloud detection analysis script finished.")# Clean up workspace rm(list = ls()) # Clean up workspace rm(list = ls())