This commit is contained in:
2026-02-02 12:29:00 +09:00
parent f8f5cef6e1
commit 495ef7d86c
175 changed files with 45128 additions and 0 deletions

View File

@@ -0,0 +1,51 @@
spring:
config:
activate:
on-profile: dev
jpa:
show-sql: true
hibernate:
ddl-auto: validate
properties:
hibernate:
default_batch_fetch_size: 100 # ✅ 성능 - N+1 쿼리 방지
order_updates: true # ✅ 성능 - 업데이트 순서 정렬로 데드락 방지
use_sql_comments: true # ⚠️ 선택 - SQL에 주석 추가 (디버깅용)
format_sql: true # ⚠️ 선택 - SQL 포맷팅 (가독성)
datasource:
url: jdbc:postgresql://192.168.2.127:15432/kamco_training_db
# url: jdbc:postgresql://localhost:15432/kamco_training_db
username: kamco_training_user
password: kamco_training_user_2025_!@#
hikari:
minimum-idle: 10
maximum-pool-size: 20
connection-timeout: 60000 # 60초 연결 타임아웃
idle-timeout: 300000 # 5분 유휴 타임아웃
max-lifetime: 1800000 # 30분 최대 수명
leak-detection-threshold: 60000 # 연결 누수 감지
transaction:
default-timeout: 300 # 5분 트랜잭션 타임아웃
jwt:
secret: "kamco_token_dev_dfc6446d-68fc-4eba-a2ff-c80a14a0bf3a"
access-token-validity-in-ms: 86400000 # 1일
refresh-token-validity-in-ms: 604800000 # 7일
token:
refresh-cookie-name: kamco-dev # 개발용 쿠키 이름
refresh-cookie-secure: false # 로컬 http 테스트면 false
springdoc:
swagger-ui:
persist-authorization: true # 스웨거 새로고침해도 토큰 유지, 로컬스토리지에 저장
member:
init_password: kamco1234!
swagger:
local-port: 9080

View File

@@ -0,0 +1,37 @@
spring:
config:
activate:
on-profile: prod
jpa:
show-sql: false
hibernate:
ddl-auto: validate
properties:
hibernate:
default_batch_fetch_size: 100 # ✅ 성능 - N+1 쿼리 방지
order_updates: true # ✅ 성능 - 업데이트 순서 정렬로 데드락 방지
use_sql_comments: true # ⚠️ 선택 - SQL에 주석 추가 (디버깅용)
datasource:
url: jdbc:postgresql://10.100.0.10:25432/temp
username: temp
password: temp123!
hikari:
minimum-idle: 10
maximum-pool-size: 20
jwt:
secret: "kamco_token_prod_dfc6446d-68fc-4eba-a2ff-c80a14a0bf3a"
access-token-validity-in-ms: 86400000 # 1일
refresh-token-validity-in-ms: 604800000 # 7일
token:
refresh-cookie-name: kamco # 개발용 쿠키 이름
refresh-cookie-secure: true # 로컬 http 테스트면 false
member:
init_password: kamco1234!
swagger:
local-port: 9080

View File

@@ -0,0 +1,106 @@
server:
port: 8080
spring:
application:
name: kamco-training-api
profiles:
active: dev # 사용할 프로파일 지정 (ex. dev, prod, test)
datasource:
driver-class-name: org.postgresql.Driver
hikari:
jdbc:
time_zone: UTC
batch_size: 50
# 권장 설정
minimum-idle: 2
maximum-pool-size: 2
connection-timeout: 20000
idle-timeout: 300000
max-lifetime: 1800000
leak-detection-threshold: 60000
data:
redis:
host: localhost
port: 6379
password:
jpa:
hibernate:
ddl-auto: update # 스키마 자동 관리 활성화
properties:
hibernate:
hbm2ddl:
auto: update
javax:
persistence:
validation:
mode: none
jdbc:
batch_size: 50
default_batch_fetch_size: 100
show-sql: false
servlet:
multipart:
enabled: true
max-file-size: 100MB
max-request-size: 100MB
logging:
level:
org:
springframework:
web: DEBUG
security: DEBUG
root: INFO
# actuator
management:
health:
readinessstate:
enabled: true
livenessstate:
enabled: true
diskspace:
enabled: true
endpoint:
health:
probes:
enabled: true
show-details: when-authorized
endpoints:
jmx:
exposure:
exclude: "*"
web:
base-path: /monitor
exposure:
include:
- "health"
# GeoJSON 파일 모니터링 설정
geojson:
monitor:
watch-directory: ~/geojson/upload
processed-directory: ~/geojson/processed
error-directory: ~/geojson/error
temp-directory: /tmp/geojson_extract
cron-expression: "0/30 * * * * *" # 매 30초마다 실행
supported-extensions:
- zip
- tar
- tar.gz
- tgz
max-file-size: 104857600 # 100MB
# 파일 업로드 설정
file:
#sync-root-dir: D:/app/original-images/
sync-root-dir: /app/original-images/
sync-tmp-dir: ${file.sync-root-dir}tmp/
sync-file-extention: tfw,tif
#dataset-dir: D:/app/dataset/
dataset-dir: /app/dataset/
dataset-tmp-dir: ${file.dataset-dir}tmp/

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,125 @@
<!DOCTYPE html>
<html lang="ko">
<head>
<meta charset="UTF-8">
<title>Chunk Upload Test</title>
</head>
<body>
<h2>대용량 파일 청크 업로드 테스트</h2>
* Chunk 테스트 사이즈 10M (10 * 1024 * 1024) - 성능에 따라 변경가능<br><br>
* 학습데이터 등록 API호출후 리턴 받은 식별키</br></br>
datasetUid(학습데이터식별키) : <input id="datasetUid" placeholder="1"><br><br>
* API 호출시 파일정보 추출해서 자동 할당해야 함.(script 예제참고)</br></br>
chunkIndex : <input id="chunkIndex" placeholder="chunkIndex" readonly><br><br>
chunkTotalIndex : <input id="chunkTotalIndex" placeholder="chunkTotalIndex" readonly><br><br>
* API 호출시 파일정보 추출해서 자동 할당해야 함.(script 예제참고)</br></br>
fileSize : <input id="fileSize" placeholder="fileSize" readonly><br><br>
<!--
fileHash : <input id="fileHash" placeholder="fileHash"><br><br> -->
<input type="file" id="chunkFile"><br><br>
<button onclick="startUpload()">업로드 시작</button>
<br><br>
* 진행율(%)</br></br>
<div style="width:500px;height:30px;border:1px solid #cccccc;"><div id="prgssbar" style="width:100%;height:30px;background:#eeeeee;"></div></div>
<br><br>
* 결과메세지</br></br>
<div id="status" style="padding:20px;width:500px;height:300px;border:1px solid #000000;"></div>
<script>
async function startUpload() {
const file = document.getElementById('chunkFile').files[0];
const fileName = file.name;
const datasetUid = Number(document.getElementById('datasetUid').value);
//const chunkIndex = document.getElementById('chunkIndex').value;
if (!file) return alert("파일을 선택하세요.");
const CHUNK_SIZE = 10 * 1024 * 1024; // 5MB
const fileSize = file.size;
const totalChunks = Math.ceil(fileSize / CHUNK_SIZE);
const chunkTotalIndex = totalChunks - 1;
//const uploadId = crypto.randomUUID(); // 고유 ID 생성
var uuid = "";
document.getElementById('fileSize').value = file.size;
document.getElementById('chunkTotalIndex').value = chunkTotalIndex;
for (let i = 0; i < totalChunks; i++) {
//for (let i = 0; i < 1; i++) {
const start = i * CHUNK_SIZE;
const end = Math.min(start + CHUNK_SIZE, file.size);
const chunk = file.slice(start, end);
document.getElementById('chunkIndex').value = i;
const formData = new FormData();
formData.append("datasetUid", datasetUid);
formData.append("fileSize", fileSize);
formData.append("fileName", fileName);
formData.append("chunkIndex", i);
formData.append("chunkTotalIndex", chunkTotalIndex);
formData.append("chunkFile", chunk);
try {
const response = await fetch('/api/upload/chunk-upload-dataset', { method: 'POST', body: formData });
// 2. 응답 상태 확인 (200 OK 등)
if (!response.ok) {
throw new Error(`서버 에러: ${response.status}`);
}
// 3. 서버가 보낸 데이터 읽기 (JSON 형태라고 가정)
const result = await response.json();
document.getElementById('status').innerText = JSON.stringify(result, null, 2);
if( result.data.res != "success")
{
//오류 경고창 띄우는 것으로 처리하시면 됩니다.
break;
}
uuid = result.data.uuid;
document.getElementById('prgssbar').style.width = result.data.uploadRate+"%";
} catch (error) {
console.error(`${i}번째 청크 업로드 실패:`, error);
break; // 오류 발생 시 중단
}
}
// 모든 청크 전송 후 최종 완료 요청
var mergeResult = await completeUpload(uuid);
document.getElementById('status').innerText = JSON.stringify(mergeResult, null, 2);
}
async function completeUpload(uuid) {
try {
const response = await fetch(`/api/upload/chunk-upload-complete/${uuid}`, {
method: 'PUT',
headers: {
'Content-Type': 'application/json'
},
});
if (!response.ok) {
throw new Error(`서ver 응답 에러: ${response.status}`);
}
const result = await response.json();
return result;
} catch (error) {
console.error("완료 요청 중 오류 발생:", error);
}
}
</script>
</body>
</html>