Enhance functionality and maintainability across multiple scripts
- Updated settings.local.json to include new read permissions for r_app directory. - Adjusted the harvest readiness condition in 80_utils_cane_supply.R to lower the imminent probability threshold. - Removed unused get_status_trigger function from 80_utils_common.R to streamline code. - Added total area analyzed feature in 90_CI_report_with_kpis_agronomic_support.Rmd, including area calculations in summary tables. - Updated translations_90.json to include new keys for total area analyzed and area label. - Created create_field_checklist.R and create_field_checklist.py scripts to generate Excel checklists from GeoJSON data, sorting fields by area and splitting assignments among team members.
This commit is contained in:
parent
003bb8255e
commit
711a005e52
|
|
@ -9,7 +9,8 @@
|
|||
"Bash(/c/Users/timon/AppData/Local/r-miniconda/python.exe -c \":*)",
|
||||
"Bash(python3 -c \":*)",
|
||||
"Bash(Rscript -e \":*)",
|
||||
"Bash(\"/c/Program Files/R/R-4.4.3/bin/x64/Rscript.exe\" -e \":*)"
|
||||
"Bash(\"/c/Program Files/R/R-4.4.3/bin/x64/Rscript.exe\" -e \":*)",
|
||||
"Read(//c/Users/timon/Documents/SmartCane_code/r_app/**)"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
|||
186
create_field_checklist.R
Normal file
186
create_field_checklist.R
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
# Creates an Excel checklist from pivot.geojson
|
||||
# Fields sorted largest to smallest, split across Timon/Joey/Dimitra side-by-side
|
||||
|
||||
# Install packages if needed
|
||||
if (!requireNamespace("jsonlite", quietly = TRUE)) install.packages("jsonlite", repos = "https://cloud.r-project.org")
|
||||
if (!requireNamespace("openxlsx", quietly = TRUE)) install.packages("openxlsx", repos = "https://cloud.r-project.org")
|
||||
|
||||
library(jsonlite)
|
||||
library(openxlsx)
|
||||
|
||||
# ---- Load GeoJSON ----
|
||||
geojson_path <- "laravel_app/storage/app/angata/pivot.geojson"
|
||||
gj <- fromJSON(geojson_path, simplifyVector = FALSE)
|
||||
features <- gj$features
|
||||
cat(sprintf("Total features: %d\n", length(features)))
|
||||
|
||||
# ---- Shoelace area (degrees²) ----
|
||||
shoelace <- function(ring) {
|
||||
n <- length(ring)
|
||||
lons <- sapply(ring, `[[`, 1)
|
||||
lats <- sapply(ring, `[[`, 2)
|
||||
area <- 0
|
||||
for (i in seq_len(n)) {
|
||||
j <- (i %% n) + 1
|
||||
area <- area + lons[i] * lats[j] - lons[j] * lats[i]
|
||||
}
|
||||
abs(area) / 2
|
||||
}
|
||||
|
||||
# ---- Approx area in m² ----
|
||||
area_m2 <- function(ring) {
|
||||
R <- 6371000
|
||||
lats <- sapply(ring, `[[`, 2)
|
||||
mean_lat <- mean(lats)
|
||||
lat_rad <- mean_lat * pi / 180
|
||||
m_per_deg_lat <- R * pi / 180
|
||||
m_per_deg_lon <- R * cos(lat_rad) * pi / 180
|
||||
shoelace(ring) * m_per_deg_lat * m_per_deg_lon
|
||||
}
|
||||
|
||||
# ---- Compute feature areas ----
|
||||
compute_area <- function(feat) {
|
||||
geom <- feat$geometry
|
||||
total <- 0
|
||||
if (geom$type == "MultiPolygon") {
|
||||
for (polygon in geom$coordinates) {
|
||||
total <- total + area_m2(polygon[[1]]) # outer ring
|
||||
}
|
||||
} else if (geom$type == "Polygon") {
|
||||
total <- total + area_m2(geom$coordinates[[1]])
|
||||
}
|
||||
total
|
||||
}
|
||||
|
||||
field_names <- sapply(features, function(f) f$properties$field)
|
||||
areas_m2 <- sapply(features, compute_area)
|
||||
areas_ha <- areas_m2 / 10000
|
||||
|
||||
df <- data.frame(
|
||||
field = field_names,
|
||||
area_ha = round(areas_ha, 2),
|
||||
stringsAsFactors = FALSE
|
||||
)
|
||||
|
||||
# Sort largest to smallest
|
||||
df <- df[order(df$area_ha, decreasing = TRUE), ]
|
||||
df$rank <- seq_len(nrow(df))
|
||||
|
||||
cat("\nTop 10 fields by area:\n")
|
||||
print(head(df[, c("rank", "field", "area_ha")], 10))
|
||||
|
||||
# ---- Split: Timon=1st, Joey=2nd, Dimitra=3rd ----
|
||||
idx <- seq_len(nrow(df))
|
||||
timon <- df[idx %% 3 == 1, ]
|
||||
joey <- df[idx %% 3 == 2, ]
|
||||
dimitra <- df[idx %% 3 == 0, ]
|
||||
|
||||
cat(sprintf("\nSplit: Timon=%d, Joey=%d, Dimitra=%d\n",
|
||||
nrow(timon), nrow(joey), nrow(dimitra)))
|
||||
|
||||
# ---- Build Excel ----
|
||||
wb <- createWorkbook()
|
||||
addWorksheet(wb, "Field Checklist")
|
||||
|
||||
# Header colors
|
||||
col_timon <- "1F6AA5"
|
||||
col_joey <- "2E7D32"
|
||||
col_dimitra <- "7B1FA2"
|
||||
alt_timon <- "D6E4F0"
|
||||
alt_joey <- "D7F0D8"
|
||||
alt_dimitra <- "EDD7F0"
|
||||
|
||||
header_font <- createStyle(fontName = "Calibri", fontSize = 11, fontColour = "FFFFFF",
|
||||
halign = "CENTER", valign = "center", textDecoration = "bold",
|
||||
border = "TopBottomLeftRight")
|
||||
sub_font <- createStyle(fontName = "Calibri", fontSize = 10, fontColour = "FFFFFF",
|
||||
halign = "CENTER", valign = "center", textDecoration = "bold",
|
||||
border = "TopBottomLeftRight")
|
||||
|
||||
# Title row
|
||||
writeData(wb, "Field Checklist",
|
||||
"Angata Pivot Field Checklist — sorted largest to smallest",
|
||||
startRow = 1, startCol = 1)
|
||||
mergeCells(wb, "Field Checklist", cols = 1:14, rows = 1)
|
||||
addStyle(wb, "Field Checklist",
|
||||
createStyle(fontName = "Calibri", fontSize = 13, textDecoration = "bold",
|
||||
halign = "CENTER", valign = "center",
|
||||
fgFill = "F0F0F0"),
|
||||
rows = 1, cols = 1)
|
||||
setRowHeights(wb, "Field Checklist", rows = 1, heights = 28)
|
||||
|
||||
# Person block writer
|
||||
write_person_block <- function(wb, ws_name, data, start_col, hdr_color, alt_color, person_name) {
|
||||
end_col <- start_col + 3
|
||||
|
||||
# Person name header (row 2)
|
||||
mergeCells(wb, ws_name, cols = start_col:end_col, rows = 2)
|
||||
writeData(wb, ws_name, person_name, startRow = 2, startCol = start_col)
|
||||
addStyle(wb, ws_name,
|
||||
createStyle(fontName = "Calibri", fontSize = 12, fontColour = "FFFFFF",
|
||||
textDecoration = "bold", halign = "CENTER", valign = "center",
|
||||
fgFill = hdr_color, border = "TopBottomLeftRight",
|
||||
borderColour = "999999"),
|
||||
rows = 2, cols = start_col:end_col)
|
||||
|
||||
# Sub-headers (row 3)
|
||||
sub_headers <- c("#", "Field", "Area (ha)", "Checked \u2713")
|
||||
writeData(wb, ws_name, as.data.frame(t(sub_headers)),
|
||||
startRow = 3, startCol = start_col, colNames = FALSE)
|
||||
addStyle(wb, ws_name,
|
||||
createStyle(fontName = "Calibri", fontSize = 10, fontColour = "FFFFFF",
|
||||
textDecoration = "bold", halign = "CENTER", valign = "center",
|
||||
fgFill = hdr_color, border = "TopBottomLeftRight",
|
||||
borderColour = "999999"),
|
||||
rows = 3, cols = start_col:end_col)
|
||||
|
||||
# Data rows (starting row 4)
|
||||
n <- nrow(data)
|
||||
for (i in seq_len(n)) {
|
||||
row_num <- i + 3
|
||||
bg <- if (i %% 2 == 0) alt_color else "FFFFFF"
|
||||
# Rank
|
||||
writeData(wb, ws_name, i, startRow = row_num, startCol = start_col)
|
||||
# Field name
|
||||
writeData(wb, ws_name, data$field[i], startRow = row_num, startCol = start_col + 1)
|
||||
# Area
|
||||
writeData(wb, ws_name, data$area_ha[i], startRow = row_num, startCol = start_col + 2)
|
||||
# Checked (empty)
|
||||
writeData(wb, ws_name, "", startRow = row_num, startCol = start_col + 3)
|
||||
|
||||
row_style <- createStyle(fontName = "Calibri", fontSize = 10, halign = "center",
|
||||
fgFill = bg, border = "TopBottomLeftRight",
|
||||
borderColour = "CCCCCC")
|
||||
field_style <- createStyle(fontName = "Calibri", fontSize = 10, halign = "left",
|
||||
fgFill = bg, border = "TopBottomLeftRight",
|
||||
borderColour = "CCCCCC")
|
||||
addStyle(wb, ws_name, row_style, rows = row_num, cols = start_col)
|
||||
addStyle(wb, ws_name, field_style, rows = row_num, cols = start_col + 1)
|
||||
addStyle(wb, ws_name, row_style, rows = row_num, cols = start_col + 2)
|
||||
addStyle(wb, ws_name, row_style, rows = row_num, cols = start_col + 3)
|
||||
}
|
||||
}
|
||||
|
||||
write_person_block(wb, "Field Checklist", timon, 1, col_timon, alt_timon, "Timon")
|
||||
write_person_block(wb, "Field Checklist", joey, 6, col_joey, alt_joey, "Joey")
|
||||
write_person_block(wb, "Field Checklist", dimitra, 11, col_dimitra, alt_dimitra, "Dimitra")
|
||||
|
||||
# Column widths (col 5 and 10 = spacers)
|
||||
setColWidths(wb, "Field Checklist",
|
||||
cols = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14),
|
||||
widths = c(5, 14, 10, 12, 2, 5, 14, 10, 12, 2, 5, 14, 10, 12))
|
||||
|
||||
# Row heights
|
||||
setRowHeights(wb, "Field Checklist", rows = 2:3, heights = c(22, 18))
|
||||
max_rows <- max(nrow(timon), nrow(joey), nrow(dimitra))
|
||||
setRowHeights(wb, "Field Checklist", rows = 4:(max_rows + 3), heights = 16)
|
||||
|
||||
# Freeze panes below header
|
||||
freezePane(wb, "Field Checklist", firstActiveRow = 4)
|
||||
|
||||
# Save
|
||||
out_path <- "angata_field_checklist.xlsx"
|
||||
saveWorkbook(wb, out_path, overwrite = TRUE)
|
||||
cat(sprintf("\nExcel saved to: %s\n", out_path))
|
||||
cat(sprintf("Total: %d fields — Timon: %d, Joey: %d, Dimitra: %d\n",
|
||||
nrow(df), nrow(timon), nrow(joey), nrow(dimitra)))
|
||||
194
create_field_checklist.py
Normal file
194
create_field_checklist.py
Normal file
|
|
@ -0,0 +1,194 @@
|
|||
"""
|
||||
Creates an Excel checklist from pivot.geojson, with fields sorted largest to smallest,
|
||||
split across Timon (1st), Joey (2nd), Dimitra (3rd) in a single sheet side-by-side.
|
||||
"""
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
|
||||
try:
|
||||
import openpyxl
|
||||
from openpyxl.styles import Font, PatternFill, Alignment, Border, Side
|
||||
from openpyxl.utils import get_column_letter
|
||||
except ImportError:
|
||||
print("Installing openpyxl...")
|
||||
import subprocess, sys
|
||||
subprocess.check_call([sys.executable, "-m", "pip", "install", "openpyxl"])
|
||||
import openpyxl
|
||||
from openpyxl.styles import Font, PatternFill, Alignment, Border, Side
|
||||
from openpyxl.utils import get_column_letter
|
||||
|
||||
|
||||
def shoelace_area(coords):
|
||||
"""Compute area in square degrees using the shoelace formula."""
|
||||
n = len(coords)
|
||||
area = 0.0
|
||||
for i in range(n):
|
||||
j = (i + 1) % n
|
||||
area += coords[i][0] * coords[j][1]
|
||||
area -= coords[j][0] * coords[i][1]
|
||||
return abs(area) / 2.0
|
||||
|
||||
|
||||
def polygon_area_m2(ring):
|
||||
"""
|
||||
Approximate polygon area in m² using spherical coordinates.
|
||||
ring: list of [lon, lat] pairs
|
||||
"""
|
||||
R = 6371000 # Earth radius in meters
|
||||
area_deg2 = shoelace_area(ring)
|
||||
# Convert to m² using mean latitude
|
||||
mean_lat = sum(p[1] for p in ring) / len(ring)
|
||||
lat_rad = math.radians(mean_lat)
|
||||
# 1 degree lat ≈ R * pi/180 meters, 1 degree lon ≈ R * cos(lat) * pi/180 meters
|
||||
m_per_deg_lat = R * math.pi / 180
|
||||
m_per_deg_lon = R * math.cos(lat_rad) * math.pi / 180
|
||||
return area_deg2 * m_per_deg_lat * m_per_deg_lon
|
||||
|
||||
|
||||
def feature_area(feature):
|
||||
"""Compute total area of a feature (MultiPolygon or Polygon) in m²."""
|
||||
geom = feature["geometry"]
|
||||
total = 0.0
|
||||
if geom["type"] == "MultiPolygon":
|
||||
for polygon in geom["coordinates"]:
|
||||
# First ring is outer boundary
|
||||
total += polygon_area_m2(polygon[0])
|
||||
elif geom["type"] == "Polygon":
|
||||
total += polygon_area_m2(geom["coordinates"][0])
|
||||
return total
|
||||
|
||||
|
||||
# Load GeoJSON
|
||||
geojson_path = r"C:\Users\timon\Documents\SmartCane_code\laravel_app\storage\app\angata\pivot.geojson"
|
||||
with open(geojson_path, "r", encoding="utf-8") as f:
|
||||
gj = json.load(f)
|
||||
|
||||
features = gj["features"]
|
||||
print(f"Total features: {len(features)}")
|
||||
|
||||
# Compute areas and sort
|
||||
fields = []
|
||||
for feat in features:
|
||||
field_name = feat["properties"].get("field", "?")
|
||||
area = feature_area(feat)
|
||||
fields.append({"field": field_name, "area_m2": area, "area_ha": area / 10000})
|
||||
|
||||
fields.sort(key=lambda x: x["area_m2"], reverse=True)
|
||||
|
||||
# Print top 10 for verification
|
||||
print("\nTop 10 fields by area:")
|
||||
for i, f in enumerate(fields[:10]):
|
||||
print(f" {i+1:3d}. Field {f['field']:15s} {f['area_ha']:.2f} ha")
|
||||
|
||||
# Split: index 0,3,6,... → Timon; 1,4,7,... → Joey; 2,5,8,... → Dimitra
|
||||
timon = [f for i, f in enumerate(fields) if i % 3 == 0]
|
||||
joey = [f for i, f in enumerate(fields) if i % 3 == 1]
|
||||
dimitra = [f for i, f in enumerate(fields) if i % 3 == 2]
|
||||
|
||||
print(f"\nSplit: Timon={len(timon)}, Joey={len(joey)}, Dimitra={len(dimitra)}")
|
||||
|
||||
# Create Excel
|
||||
wb = openpyxl.Workbook()
|
||||
ws = wb.active
|
||||
ws.title = "Field Checklist"
|
||||
|
||||
# Color palette
|
||||
colors = {
|
||||
"timon": {"header_bg": "1F6AA5", "alt_bg": "D6E4F0"},
|
||||
"joey": {"header_bg": "2E7D32", "alt_bg": "D7F0D8"},
|
||||
"dimitra": {"header_bg": "7B1FA2", "alt_bg": "EDD7F0"},
|
||||
}
|
||||
|
||||
white_font = Font(name="Calibri", bold=True, color="FFFFFF", size=11)
|
||||
black_font = Font(name="Calibri", size=10)
|
||||
bold_black = Font(name="Calibri", bold=True, size=10)
|
||||
center_align = Alignment(horizontal="center", vertical="center", wrap_text=True)
|
||||
|
||||
thin = Side(style="thin", color="CCCCCC")
|
||||
border = Border(left=thin, right=thin, top=thin, bottom=thin)
|
||||
|
||||
# Column layout: each person gets 3 columns (Rank, Field, Area ha, Checked)
|
||||
# Spacing: col 1-4 = Timon, col 5 = spacer, col 6-9 = Joey, col 10 = spacer, col 11-14 = Dimitra
|
||||
persons = [
|
||||
("Timon", timon, 1),
|
||||
("Joey", joey, 6),
|
||||
("Dimitra", dimitra, 11),
|
||||
]
|
||||
|
||||
# Row 1: Person name header (merged across 4 cols)
|
||||
ws.row_dimensions[1].height = 25
|
||||
ws.row_dimensions[2].height = 20
|
||||
|
||||
for name, data, start_col in persons:
|
||||
c = colors[name.lower()]
|
||||
# Merge cells for person name
|
||||
end_col = start_col + 3
|
||||
ws.merge_cells(
|
||||
start_row=1, start_column=start_col,
|
||||
end_row=1, end_column=end_col
|
||||
)
|
||||
cell = ws.cell(row=1, column=start_col, value=name)
|
||||
cell.font = white_font
|
||||
cell.fill = PatternFill("solid", fgColor=c["header_bg"])
|
||||
cell.alignment = center_align
|
||||
cell.border = border
|
||||
|
||||
# Sub-headers
|
||||
sub_headers = ["#", "Field", "Area (ha)", "Checked ✓"]
|
||||
for i, hdr in enumerate(sub_headers):
|
||||
cell = ws.cell(row=2, column=start_col + i, value=hdr)
|
||||
cell.font = white_font
|
||||
cell.fill = PatternFill("solid", fgColor=c["header_bg"])
|
||||
cell.alignment = center_align
|
||||
cell.border = border
|
||||
|
||||
# Data rows
|
||||
for row_i, field in enumerate(data):
|
||||
row_num = row_i + 3
|
||||
ws.row_dimensions[row_num].height = 16
|
||||
alt = row_i % 2 == 1
|
||||
bg = c["alt_bg"] if alt else "FFFFFF"
|
||||
fill = PatternFill("solid", fgColor=bg)
|
||||
|
||||
values = [row_i + 1, field["field"], round(field["area_ha"], 2), ""]
|
||||
for col_i, val in enumerate(values):
|
||||
cell = ws.cell(row=row_num, column=start_col + col_i, value=val)
|
||||
cell.font = black_font
|
||||
cell.fill = fill
|
||||
cell.border = border
|
||||
cell.alignment = Alignment(horizontal="center" if col_i != 1 else "left",
|
||||
vertical="center")
|
||||
|
||||
# Column widths
|
||||
col_widths = {1: 5, 2: 14, 3: 10, 4: 12, # Timon
|
||||
5: 2, # spacer
|
||||
6: 5, 7: 14, 8: 10, 9: 12, # Joey
|
||||
10: 2, # spacer
|
||||
11: 5, 12: 14, 13: 10, 14: 12} # Dimitra
|
||||
|
||||
for col, width in col_widths.items():
|
||||
ws.column_dimensions[get_column_letter(col)].width = width
|
||||
|
||||
# Freeze header rows
|
||||
ws.freeze_panes = "A3"
|
||||
|
||||
# Title row above everything — insert a title row
|
||||
ws.insert_rows(1)
|
||||
ws.merge_cells("A1:N1")
|
||||
title_cell = ws.cell(row=1, column=1, value="Angata Pivot Field Checklist — sorted largest to smallest")
|
||||
title_cell.font = Font(name="Calibri", bold=True, size=13, color="1A1A1A")
|
||||
title_cell.alignment = Alignment(horizontal="center", vertical="center")
|
||||
title_cell.fill = PatternFill("solid", fgColor="F0F0F0")
|
||||
ws.row_dimensions[1].height = 28
|
||||
|
||||
# Re-freeze after insert
|
||||
ws.freeze_panes = "A4"
|
||||
|
||||
out_path = r"C:\Users\timon\Documents\SmartCane_code\angata_field_checklist.xlsx"
|
||||
wb.save(out_path)
|
||||
print(f"\nExcel saved to: {out_path}")
|
||||
print(f"Total fields: {len(fields)}")
|
||||
print(f" Timon: {len(timon)} fields")
|
||||
print(f" Joey: {len(joey)} fields")
|
||||
print(f" Dimitra: {len(dimitra)} fields")
|
||||
|
|
@ -170,7 +170,7 @@ calculate_status_alert <- function(imminent_prob, age_week, mean_ci,
|
|||
|
||||
# Priority 1: HARVEST READY - highest business priority
|
||||
# Field is mature (≥12 months) AND harvest model predicts imminent harvest
|
||||
if (!is.na(imminent_prob) && imminent_prob > 0.5 && !is.na(age_week) && age_week >= 52) {
|
||||
if (!is.na(imminent_prob) && imminent_prob > 0.3 && !is.na(age_week) && age_week >= 52) {
|
||||
return("harvest_ready")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -650,48 +650,6 @@ get_phase_by_age <- function(age_weeks) {
|
|||
return("Unknown")
|
||||
}
|
||||
|
||||
#' Get status trigger based on CI values and field age
|
||||
get_status_trigger <- function(ci_values, ci_change, age_weeks) {
|
||||
if (is.na(age_weeks) || length(ci_values) == 0) return(NA_character_)
|
||||
|
||||
ci_values <- ci_values[!is.na(ci_values)]
|
||||
if (length(ci_values) == 0) return(NA_character_)
|
||||
|
||||
pct_above_2 <- sum(ci_values > 2) / length(ci_values) * 100
|
||||
pct_at_or_above_2 <- sum(ci_values >= 2) / length(ci_values) * 100
|
||||
ci_cv <- if (mean(ci_values, na.rm = TRUE) > 0) sd(ci_values) / mean(ci_values, na.rm = TRUE) else 0
|
||||
mean_ci <- mean(ci_values, na.rm = TRUE)
|
||||
|
||||
if (age_weeks >= 0 && age_weeks <= 6) {
|
||||
if (pct_at_or_above_2 >= 70) {
|
||||
return("germination_complete")
|
||||
} else if (pct_above_2 > 10) {
|
||||
return("germination_started")
|
||||
}
|
||||
}
|
||||
|
||||
if (age_weeks >= 45) {
|
||||
return("harvest_ready")
|
||||
}
|
||||
|
||||
if (age_weeks > 6 && !is.na(ci_change) && ci_change < -1.5 && ci_cv < 0.25) {
|
||||
return("stress_detected_whole_field")
|
||||
}
|
||||
|
||||
if (age_weeks > 6 && !is.na(ci_change) && ci_change > 1.5) {
|
||||
return("strong_recovery")
|
||||
}
|
||||
|
||||
if (age_weeks >= 4 && age_weeks < 39 && !is.na(ci_change) && ci_change > 0.2) {
|
||||
return("growth_on_track")
|
||||
}
|
||||
|
||||
if (age_weeks >= 39 && age_weeks < 45 && mean_ci > 3.5) {
|
||||
return("maturation_progressing")
|
||||
}
|
||||
|
||||
return(NA_character_)
|
||||
}
|
||||
|
||||
#' Extract planting dates from harvesting data
|
||||
extract_planting_dates <- function(harvesting_data, field_boundaries_sf = NULL) {
|
||||
|
|
|
|||
|
|
@ -590,7 +590,17 @@ if (exists("summary_tables") && !is.null(summary_tables) && length(summary_table
|
|||
if (!is.na(total_fields)) {
|
||||
cat("\n\n", tr_key("tot_fields_analyzed"))
|
||||
}
|
||||
|
||||
|
||||
# Total area analyzed
|
||||
if (exists("field_details_table") && !is.null(field_details_table)) {
|
||||
area_col_name <- paste0("Area_", get_area_unit_label(AREA_UNIT_PREFERENCE))
|
||||
unit_label <- get_area_unit_label(AREA_UNIT_PREFERENCE)
|
||||
if (area_col_name %in% names(field_details_table)) {
|
||||
total_area <- sum(field_details_table[[area_col_name]], na.rm = TRUE)
|
||||
cat("\n\n", tr_key("tot_area_analyzed"))
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
cat(tr_key("kpi_na"))
|
||||
}
|
||||
|
|
@ -609,15 +619,89 @@ if (exists("summary_tables") && !is.null(summary_tables) && length(summary_table
|
|||
tryCatch({
|
||||
# KPI metadata for display
|
||||
kpi_display_order <- list(
|
||||
uniformity = list(display = "Field Uniformity", level_col = "interpretation", count_col = "field_count"),
|
||||
area_change = list(display = "Area Change", level_col = "interpretation", count_col = "field_count"),
|
||||
growth_decline = list(display = "4-Week Trend", level_col = "trend_interpretation", count_col = "field_count"),
|
||||
patchiness = list(display = "Field Patchiness", level_col = "gini_category", count_col = "field_count", detail_col = "patchiness_risk"),
|
||||
tch_forecast = list(display = "TCH Forecasted", level_col = "tch_category", detail_col = "range", count_col = "field_count"),
|
||||
gap_filling = list(display = "Gaps", level_col = "gap_level", count_col = "field_count")
|
||||
uniformity = list(display = "Field Uniformity", level_col = "interpretation", count_col = "field_count", area_col = "area_sum"),
|
||||
area_change = list(display = "Area Change", level_col = "interpretation", count_col = "field_count", area_col = "area_sum"),
|
||||
growth_decline = list(display = "4-Week Trend", level_col = "trend_interpretation", count_col = "field_count", area_col = "area_sum"),
|
||||
patchiness = list(display = "Field Patchiness", level_col = "gini_category", count_col = "field_count", detail_col = "patchiness_risk", area_col = "area_sum"),
|
||||
tch_forecast = list(display = "TCH Forecasted", level_col = "tch_category", count_col = "field_count", detail_col = "range", area_col = "area_sum"),
|
||||
gap_filling = list(display = "Gaps", level_col = "gap_level", count_col = "field_count", area_col = "area_sum")
|
||||
)
|
||||
|
||||
standardize_kpi <- function(df, level_col, count_col, detail_col = NULL) {
|
||||
# Enrich summary_tables with area_sum from field_details_table (mirrors script 91 pattern)
|
||||
area_col_name <- paste0("Area_", get_area_unit_label(AREA_UNIT_PREFERENCE))
|
||||
unit_label <- get_area_unit_label(AREA_UNIT_PREFERENCE)
|
||||
has_area <- exists("field_details_table") && !is.null(field_details_table) &&
|
||||
area_col_name %in% names(field_details_table)
|
||||
if (has_area) {
|
||||
fdt <- field_details_table
|
||||
if (!is.null(summary_tables$uniformity) && "Uniformity_Category" %in% names(fdt)) {
|
||||
summary_tables$uniformity <- summary_tables$uniformity %>%
|
||||
left_join(fdt %>% group_by(interpretation = Uniformity_Category) %>%
|
||||
summarise(area_sum = sum(.data[[area_col_name]], na.rm = TRUE), .groups = "drop"),
|
||||
by = "interpretation")
|
||||
}
|
||||
if (!is.null(summary_tables$area_change) && "Area_Change_Interpretation" %in% names(fdt)) {
|
||||
summary_tables$area_change <- summary_tables$area_change %>%
|
||||
left_join(fdt %>% group_by(interpretation = Area_Change_Interpretation) %>%
|
||||
summarise(area_sum = sum(.data[[area_col_name]], na.rm = TRUE), .groups = "drop"),
|
||||
by = "interpretation")
|
||||
}
|
||||
if (!is.null(summary_tables$growth_decline) && "Trend_Interpretation" %in% names(fdt)) {
|
||||
summary_tables$growth_decline <- summary_tables$growth_decline %>%
|
||||
left_join(fdt %>% group_by(trend_interpretation = Trend_Interpretation) %>%
|
||||
summarise(area_sum = sum(.data[[area_col_name]], na.rm = TRUE), .groups = "drop"),
|
||||
by = "trend_interpretation")
|
||||
}
|
||||
if (!is.null(summary_tables$patchiness) && all(c("Patchiness_Risk", "Gini_Coefficient") %in% names(fdt))) {
|
||||
summary_tables$patchiness <- summary_tables$patchiness %>%
|
||||
left_join(
|
||||
fdt %>%
|
||||
mutate(gini_category = case_when(
|
||||
Gini_Coefficient < 0.2 ~ "Uniform (Gini<0.2)",
|
||||
Gini_Coefficient < 0.4 ~ "Moderate (Gini 0.2-0.4)",
|
||||
TRUE ~ "High (Gini≥0.4)"
|
||||
)) %>%
|
||||
group_by(gini_category, patchiness_risk = Patchiness_Risk) %>%
|
||||
summarise(area_sum = sum(.data[[area_col_name]], na.rm = TRUE), .groups = "drop"),
|
||||
by = c("gini_category", "patchiness_risk")
|
||||
)
|
||||
}
|
||||
if (!is.null(summary_tables$gap_filling) && "Gap_Level" %in% names(fdt)) {
|
||||
summary_tables$gap_filling <- summary_tables$gap_filling %>%
|
||||
left_join(fdt %>% group_by(gap_level = Gap_Level) %>%
|
||||
summarise(area_sum = sum(.data[[area_col_name]], na.rm = TRUE), .groups = "drop"),
|
||||
by = "gap_level")
|
||||
}
|
||||
# TCH forecast: reproduce the same quartile logic used when building summary_tables so we
|
||||
# can assign each field to a tch_category and sum its area
|
||||
if (!is.null(summary_tables$tch_forecast) && "TCH_Forecasted" %in% names(fdt)) {
|
||||
tch_vals <- fdt %>% dplyr::filter(!is.na(TCH_Forecasted)) %>% dplyr::pull(TCH_Forecasted)
|
||||
if (length(tch_vals) > 0) {
|
||||
if (length(unique(tch_vals)) == 1) {
|
||||
area_tch <- fdt %>%
|
||||
dplyr::filter(!is.na(TCH_Forecasted)) %>%
|
||||
dplyr::summarise(area_sum = sum(.data[[area_col_name]], na.rm = TRUE)) %>%
|
||||
dplyr::mutate(tch_category = "All equal")
|
||||
} else {
|
||||
q25 <- quantile(tch_vals, 0.25, na.rm = TRUE)
|
||||
q75 <- quantile(tch_vals, 0.75, na.rm = TRUE)
|
||||
area_tch <- fdt %>%
|
||||
dplyr::filter(!is.na(TCH_Forecasted)) %>%
|
||||
dplyr::mutate(tch_category = dplyr::case_when(
|
||||
TCH_Forecasted >= q75 ~ "Top 25%",
|
||||
TCH_Forecasted >= q25 ~ "Middle 50%",
|
||||
TRUE ~ "Bottom 25%"
|
||||
)) %>%
|
||||
dplyr::group_by(tch_category) %>%
|
||||
dplyr::summarise(area_sum = sum(.data[[area_col_name]], na.rm = TRUE), .groups = "drop")
|
||||
}
|
||||
summary_tables$tch_forecast <- summary_tables$tch_forecast %>%
|
||||
left_join(area_tch, by = "tch_category")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
standardize_kpi <- function(df, level_col, count_col, detail_col = NULL, area_col = NULL) {
|
||||
if (is.null(level_col) || !(level_col %in% names(df)) || is.null(count_col) || !(count_col %in% names(df))) {
|
||||
return(NULL)
|
||||
}
|
||||
|
|
@ -631,10 +715,16 @@ if (exists("summary_tables") && !is.null(summary_tables) && length(summary_table
|
|||
display_level <- df[[level_col]]
|
||||
}
|
||||
|
||||
area_vals <- if (!is.null(area_col) && area_col %in% names(df))
|
||||
round(df[[area_col]], 1)
|
||||
else
|
||||
rep(NA_real_, nrow(df))
|
||||
|
||||
df %>%
|
||||
dplyr::transmute(
|
||||
Level = if (level_col == "trend_interpretation") map_trend_to_arrow(display_level, include_text = TRUE) else as.character(display_level),
|
||||
Count = as.integer(round(as.numeric(.data[[count_col]]))),
|
||||
Area = area_vals,
|
||||
Percent = if (is.na(total)) {
|
||||
NA_real_
|
||||
} else {
|
||||
|
|
@ -652,9 +742,10 @@ if (exists("summary_tables") && !is.null(summary_tables) && length(summary_table
|
|||
kpi_df <- summary_tables[[kpi_key]]
|
||||
if (is.null(kpi_df) || !is.data.frame(kpi_df) || nrow(kpi_df) == 0) next
|
||||
|
||||
# Pass detail_col if it exists in config
|
||||
# Pass detail_col and area_col if present in config
|
||||
detail_col <- if (!is.null(config$detail_col)) config$detail_col else NULL
|
||||
kpi_rows <- standardize_kpi(kpi_df, config$level_col, config$count_col, detail_col)
|
||||
kpi_rows <- standardize_kpi(kpi_df, config$level_col, config$count_col, detail_col,
|
||||
config[["area_col"]])
|
||||
|
||||
if (!is.null(kpi_rows)) {
|
||||
kpi_rows$KPI <- config$display
|
||||
|
|
@ -676,10 +767,14 @@ if (exists("summary_tables") && !is.null(summary_tables) && length(summary_table
|
|||
kpi_group_sizes <- rle(combined_df$KPI_group)$lengths
|
||||
|
||||
display_df <- combined_df %>%
|
||||
dplyr::select(KPI = KPI_display, Level, Count, Percent)
|
||||
dplyr::select(KPI = KPI_display, Level, Count, Area, Percent)
|
||||
|
||||
# Translate the table for visualization
|
||||
names(display_df) <- c(tr_key("KPI"), tr_key("Level"), tr_key("Count"), tr_key("Percent"))
|
||||
names(display_df) <- c(
|
||||
tr_key("KPI"), tr_key("Level"), tr_key("Count"),
|
||||
paste0(tr_key("Area"), " (", unit_label, ")"),
|
||||
tr_key("Percent")
|
||||
)
|
||||
display_df[, 1:2] <- lapply(display_df[, 1:2], function(col) sapply(col, tr_key))
|
||||
ft <- flextable(display_df) %>%
|
||||
merge_v(j = tr_key("KPI")) %>%
|
||||
|
|
@ -1179,11 +1274,15 @@ tryCatch({
|
|||
minus_2_ww <- get_week_year(as.Date(today) - lubridate::weeks(2))
|
||||
minus_3_ww <- get_week_year(as.Date(today) - lubridate::weeks(3))
|
||||
|
||||
message(paste("Processing", nrow(AllPivots_merged), "fields for weeks:",
|
||||
message(paste("Processing", nrow(AllPivots_merged), "fields for weeks:",
|
||||
current_ww$week, minus_1_ww$week, minus_2_ww$week, minus_3_ww$week))
|
||||
|
||||
|
||||
# load_per_field_mosaic() is defined in 90_report_utils.R (sourced above)
|
||||
|
||||
# Pre-compute area column name once (not in scope inside purrr::walk closure)
|
||||
area_col_name <- paste0("Area_", get_area_unit_label(AREA_UNIT_PREFERENCE))
|
||||
unit_label <- get_area_unit_label(AREA_UNIT_PREFERENCE)
|
||||
|
||||
# Iterate through fields using purrr::walk
|
||||
purrr::walk(AllPivots_merged$field, function(field_name) {
|
||||
tryCatch({
|
||||
|
|
@ -1275,6 +1374,14 @@ tryCatch({
|
|||
sprintf("**%s:** %.2f", tr_key("cv_value"), field_kpi$CV),
|
||||
sprintf("**%s:** %.2f", tr_key("mean_ci"), field_kpi$Mean_CI)
|
||||
)
|
||||
|
||||
# Prepend area as first item (static field attribute)
|
||||
if (area_col_name %in% names(field_kpi) && !is.na(field_kpi[[area_col_name]])) {
|
||||
kpi_parts <- c(
|
||||
sprintf("**%s:** %.1f %s", tr_key("Area"), field_kpi[[area_col_name]], unit_label),
|
||||
kpi_parts
|
||||
)
|
||||
}
|
||||
|
||||
# Add Weekly_CI_Change if available (note: capital C and I)
|
||||
if (!is.null(field_kpi$Weekly_CI_Change) && !is.na(field_kpi$Weekly_CI_Change)) {
|
||||
|
|
|
|||
|
|
@ -544,9 +544,11 @@ if (exists("summary_data") && !is.null(summary_data) && "field_analysis" %in% na
|
|||
tryCatch({
|
||||
# Use per-field field_analysis data from RDS (already loaded in load_kpi_data chunk)
|
||||
if (exists("summary_data") && !is.null(summary_data) && "field_analysis" %in% names(summary_data)) {
|
||||
analysis_data <- summary_data$field_analysis %>%
|
||||
select(Field_id, Status_Alert) %>%
|
||||
rename(Status_trigger = Status_Alert) # Rename to Status_trigger for compatibility with hexbin logic
|
||||
fa <- summary_data$field_analysis
|
||||
status_col <- intersect(c("Status_Alert", "Status_trigger"), names(fa))[1]
|
||||
analysis_data <- fa %>%
|
||||
select(Field_id, all_of(status_col)) %>%
|
||||
rename(Status_trigger = all_of(status_col))
|
||||
} else {
|
||||
analysis_data <- tibble(Field_id = character(), Status_trigger = character())
|
||||
}
|
||||
|
|
@ -619,10 +621,9 @@ tryCatch({
|
|||
filter(Status_trigger != "harvest_ready" | is.na(Status_trigger))
|
||||
|
||||
# Generate breaks for color gradients
|
||||
breaks_vec <- c(0, 5, 10, 15, 20, 30, 35)
|
||||
breaks_vec <- c(5, 10, 15, 20, 30, 35)
|
||||
labels_vec <- as.character(breaks_vec)
|
||||
labels_vec[length(labels_vec)] <- ">30"
|
||||
labels_vec[1] <- "0.1"
|
||||
|
||||
# Calculate data bounds for coordinate limits (prevents basemap scale conflicts)
|
||||
# Use actual data bounds without dummy points to avoid column mismatch
|
||||
|
|
@ -635,36 +636,45 @@ tryCatch({
|
|||
ceiling(max(points_processed$Y, na.rm = TRUE) * 20) / 20
|
||||
)
|
||||
|
||||
# ggplot2's StatBinhex computes the hex grid origin as c(min(x), min(y)) of each layer's
|
||||
# own data. Feeding different subsets (points_ready / points_not_ready) to the two layers
|
||||
# gives different origins → misaligned hexes. Fix: pass points_processed to BOTH layers.
|
||||
# For the coloured layer, use ready_area = area_value for harvest-ready fields, 0 elsewhere.
|
||||
# The scale censors 0 → NA → transparent, so non-ready hexes are invisible in that layer.
|
||||
points_processed <- points_processed %>%
|
||||
mutate(ready_area = ifelse(Status_trigger == "harvest_ready", area_value, 0))
|
||||
|
||||
# Create hexbin map with enhanced aesthetics, basemap, and proper legend
|
||||
ggplot() +
|
||||
# OpenStreetMap basemap (zoom=11 appropriate for agricultural fields)
|
||||
ggspatial::annotation_map_tile(type = "osm", zoom = 11, progress = "none", alpha = 0.5) +
|
||||
# Hexbin for NOT ready fields (light background)
|
||||
# Hexbin background: ALL fields as white hexes (anchors the hex grid to the full extent)
|
||||
geom_hex(
|
||||
data = points_not_ready,
|
||||
aes(x = X, y = Y, weight = area_value, alpha = tr_key("hexbin_not_ready")),
|
||||
data = points_processed,
|
||||
aes(x = X, y = Y, weight = area_value, alpha = tr_key("hexbin_not_ready")),
|
||||
binwidth = c(0.012, 0.012),
|
||||
fill = "#ffffff",
|
||||
colour = "#0000009a",
|
||||
linewidth = 0.1
|
||||
) +
|
||||
# Hexbin for READY fields (colored gradient)
|
||||
# Hexbin overlay: same grid (same data), coloured only where ready_area > 0
|
||||
geom_hex(
|
||||
data = points_ready,
|
||||
aes(x = X, y = Y, weight = area_value),
|
||||
data = points_processed,
|
||||
aes(x = X, y = Y, weight = ready_area),
|
||||
binwidth = c(0.012, 0.012),
|
||||
alpha = 0.9,
|
||||
colour = "#0000009a",
|
||||
linewidth = 0.1
|
||||
) +
|
||||
# Color gradient scale for area
|
||||
# Color gradient scale — values of 0 (non-ready hexes) are censored → NA → transparent
|
||||
scale_fill_viridis_b(
|
||||
option = "viridis",
|
||||
direction = -1,
|
||||
breaks = breaks_vec,
|
||||
labels = labels_vec,
|
||||
limits = c(0, 35),
|
||||
oob = scales::squish,
|
||||
limits = c(0.001, 35),
|
||||
oob = scales::oob_censor,
|
||||
na.value = "transparent",
|
||||
name = tr_key("hexbin_legend_acres")
|
||||
) +
|
||||
# Alpha scale for "not ready" status indication
|
||||
|
|
@ -722,7 +732,10 @@ if (exists("summary_data") && !is.null(summary_data) && "field_analysis" %in% na
|
|||
if (is.null(summary_data$field_analysis_summary) || !("field_analysis_summary" %in% names(summary_data)) ||
|
||||
!is.data.frame(summary_data$field_analysis_summary)) {
|
||||
|
||||
# Create summary by aggregating by Status_Alert and Phase categories
|
||||
# Detect which status column is present (Status_Alert from cane_supply, Status_trigger from others)
|
||||
status_col <- intersect(c("Status_Alert", "Status_trigger"), names(field_analysis_df))[1]
|
||||
|
||||
# Create summary by aggregating by status and Phase categories
|
||||
phase_summary <- field_analysis_df %>%
|
||||
filter(!is.na(Phase)) %>%
|
||||
group_by(Phase) %>%
|
||||
|
|
@ -736,21 +749,21 @@ if (exists("summary_data") && !is.null(summary_data) && "field_analysis" %in% na
|
|||
|
||||
# Create Status trigger summary - includes both active alerts and "No active triggers"
|
||||
trigger_summary <- tryCatch({
|
||||
# Active alerts (fields with non-NA Status_Alert)
|
||||
# Active alerts (fields with non-NA status)
|
||||
active_alerts <- field_analysis_df %>%
|
||||
filter(!is.na(Status_Alert), Status_Alert != "") %>%
|
||||
group_by(Status_Alert) %>%
|
||||
filter(!is.na(.data[[status_col]]), .data[[status_col]] != "") %>%
|
||||
group_by(across(all_of(status_col))) %>%
|
||||
summarise(
|
||||
Acreage = sum(Acreage, na.rm = TRUE),
|
||||
Field_count = n_distinct(Field_id),
|
||||
.groups = "drop"
|
||||
) %>%
|
||||
mutate(Category = Status_Alert) %>%
|
||||
mutate(Category = .data[[status_col]]) %>%
|
||||
select(Category, Acreage, Field_count)
|
||||
|
||||
# No active triggers (fields with NA Status_Alert)
|
||||
|
||||
# No active triggers (fields with NA status)
|
||||
no_alerts <- field_analysis_df %>%
|
||||
filter(is.na(Status_Alert) | Status_Alert == "") %>%
|
||||
filter(is.na(.data[[status_col]]) | .data[[status_col]] == "") %>%
|
||||
summarise(
|
||||
Acreage = sum(Acreage, na.rm = TRUE),
|
||||
Field_count = n_distinct(Field_id),
|
||||
|
|
|
|||
|
|
@ -456,8 +456,8 @@ rmarkdown::render(
|
|||
# rmarkdown::render(
|
||||
rmarkdown::render(
|
||||
"r_app/91_CI_report_with_kpis_cane_supply.Rmd",
|
||||
params = list(data_dir = "angata", report_date = as.Date("2026-02-23")),
|
||||
output_file = "SmartCane_Report_cane_supply_angata_2026-02-23_en.docx",
|
||||
params = list(data_dir = "angata", report_date = as.Date("2026-03-17")),
|
||||
output_file = "SmartCane_Report_cane_supply_angata_2026-03-17_en.docx",
|
||||
output_dir = "laravel_app/storage/app/angata/reports"
|
||||
)
|
||||
#
|
||||
|
|
|
|||
|
|
@ -512,6 +512,16 @@
|
|||
"es-mx": "**Total de parcelas analizadas:** {total_fields}",
|
||||
"pt-br": "**Total de campos analisados:** {total_fields}"
|
||||
},
|
||||
"tot_area_analyzed": {
|
||||
"en": "**Total Area Analyzed:** {round(total_area, 1)} {unit_label}",
|
||||
"es-mx": "**Área total analizada:** {round(total_area, 1)} {unit_label}",
|
||||
"pt-br": "**Área total analisada:** {round(total_area, 1)} {unit_label}"
|
||||
},
|
||||
"Area": {
|
||||
"en": "Area",
|
||||
"es-mx": "Área",
|
||||
"pt-br": "Área"
|
||||
},
|
||||
"Medium": {
|
||||
"en": "Medium",
|
||||
"es-mx": "Medio",
|
||||
|
|
|
|||
Loading…
Reference in a new issue