shp파일등록방법변경

This commit is contained in:
2026-03-11 08:19:43 +09:00
parent 99d376f069
commit 54b82dc3f8
4 changed files with 54 additions and 23 deletions

View File

@@ -74,22 +74,22 @@ public class GeoServerRegistrationTasklet implements Tasklet {
long fileSizeMB = fileSize / 1024 / 1024;
log.info("ZIP file size: {} bytes ({} MB)", fileSize, fileSizeMB);
//
// if (fileSize < FILE_SIZE_THRESHOLD) {
// // Small file: Use REST API upload
// log.info("Using REST API upload method (file size < 100MB)");
// geoServerService.uploadShapefileZip(zipPath, layerName);
// } else {
// // Large file: Use file path reference
// log.info(
// "Using file path reference method (file size >= 100MB, {} MB recommended for large"
// + " files)",
// fileSizeMB);
// log.info(
// "GeoServer will read the file from: {} (ensure GeoServer has file system access)",
// zipPath);
// geoServerService.registerShapefileByPath(zipPath, layerName);
// }
//
// if (fileSize < FILE_SIZE_THRESHOLD) {
// // Small file: Use REST API upload
// log.info("Using REST API upload method (file size < 100MB)");
// geoServerService.uploadShapefileZip(zipPath, layerName);
// } else {
// // Large file: Use file path reference
// log.info(
// "Using file path reference method (file size >= 100MB, {} MB recommended for large"
// + " files)",
// fileSizeMB);
// log.info(
// "GeoServer will read the file from: {} (ensure GeoServer has file system access)",
// zipPath);
// geoServerService.registerShapefileByPath(zipPath, layerName);
// }
geoServerService.registerShapefileByPath(zipPath, layerName);
log.info("GeoServer registration completed successfully for layer: {}", layerName);

View File

@@ -201,14 +201,28 @@ public class GeoServerRegistrationService {
log.info("File size: {} MB", file.length() / 1024 / 1024);
// Convert .zip path to .shp path if needed
String shpFilePath = absoluteFilePath;
if (lowerPath.endsWith(".zip")) {
shpFilePath = absoluteFilePath.substring(0, absoluteFilePath.length() - 4) + ".shp";
File shpFile = new File(shpFilePath);
if (!shpFile.exists()) {
throw new IllegalArgumentException(
"Shapefile not found. Expected: "
+ shpFilePath
+ " (ZIP file was provided, but .shp file must exist in same directory)");
}
log.info("Converted ZIP path to SHP path: {}", shpFilePath);
}
// Check if layer exists and handle overwrite
if (properties.isOverwriteExisting() && layerExists(layerName)) {
log.info("Layer '{}' already exists. Deleting...", layerName);
deleteLayer(layerName);
}
// Construct file:// URL
String fileUrl = "file://" + absoluteFilePath;
// Construct file:// URL (must point to .shp file, not .zip)
String fileUrl = "file://" + shpFilePath;
log.info("Using file URL: {}", fileUrl);
// GeoServer REST API endpoint
@@ -232,7 +246,7 @@ public class GeoServerRegistrationService {
log.info("Shapefile registered successfully to GeoServer");
log.info(
"Layer '{}' is now available in workspace '{}'", layerName, properties.getWorkspace());
log.info("GeoServer will read data from: {}", absoluteFilePath);
log.info("GeoServer will read data from: {}", shpFilePath);
} else {
log.warn("Unexpected response status: {}", response.getStatusCode());
}
@@ -254,14 +268,18 @@ public class GeoServerRegistrationService {
log.error(" 3. File path format is incorrect (must be absolute path)");
log.error("");
log.error("Solutions:");
log.error(" 1. Verify GeoServer has file system access to: {}", absoluteFilePath);
log.error(" 1. Verify GeoServer has file system access to the .shp file");
log.error(" 2. Check file permissions (chmod 644 or similar)");
log.error(" 3. Ensure path is absolute and correctly formatted");
log.error(" 4. Ensure all shapefile components (.shp, .shx, .dbf, .prj) exist");
log.error("========================================");
log.error("");
}
throw new RuntimeException("GeoServer registration failed", e);
} catch (IllegalArgumentException e) {
log.error("Invalid file path: {}", e.getMessage());
throw e;
} catch (Exception e) {
log.error("Unexpected error during shapefile registration", e);
throw new RuntimeException("Shapefile registration failed", e);

View File

@@ -5,11 +5,18 @@ spring:
password: kamco_cds_Q!W@E#R$
driver-class-name: org.postgresql.Driver
hikari:
maximum-pool-size: 10
maximum-pool-size: 20 # Batch 처리를 위해 증가
connection-timeout: 30000
idle-timeout: 600000
max-lifetime: 1800000
batch:
job:
enabled: false # CLI에서 명시적으로 실행
jdbc:
initialize-schema: always # 메타데이터 테이블 자동 생성
table-prefix: BATCH_
converter:
inference-id: D5E46F60FC40B1A8BE0CD1F3547AA6
# Optional: omit or set empty to create merged shapefile for all batch-ids
@@ -21,6 +28,12 @@ converter:
output-base-dir: '/kamco-nfs/model_output/export/'
crs: 'EPSG:5186'
batch:
chunk-size: 5000 # 청크 크기 증가 (1000 → 5000, 성능 5배 향상)
skip-limit: 100 # 청크당 skip 허용 건수
fetch-size: 5000 # JDBC 커서 fetch 크기 (chunk-size와 동일하게)
enable-partitioning: false
geoserver:
base-url: 'https://kamco.geo-dev.gs.dabeeo.com/geoserver'
workspace: 'cd'

View File

@@ -29,9 +29,9 @@ converter:
crs: 'EPSG:5186'
batch:
chunk-size: 1000 # 청크 크기 (메모리 ~40MB per chunk)
chunk-size: 5000 # 청크 크기 (1000→5000, 성능 5배 향상, 메모리 ~200MB per chunk)
skip-limit: 100 # 청크당 skip 허용 건수
fetch-size: 1000 # JDBC 커서 fetch 크기
fetch-size: 5000 # JDBC 커서 fetch 크기 (chunk-size와 동일하게)
enable-partitioning: false # 초기에는 비활성화
partition-concurrency: 4 # Map ID별 병렬 처리 동시성 (4=~300MB, 8=~600MB)