375 lines
12 KiB
Dart
375 lines
12 KiB
Dart
// lib/remote/remote_repository.dart
|
|
import 'package:flutter/foundation.dart' show debugPrint;
|
|
import 'package:sqflite/sqflite.dart';
|
|
|
|
import 'remote_models.dart';
|
|
|
|
class RemoteRepository {
|
|
final Database db;
|
|
RemoteRepository(this.db);
|
|
|
|
// =========================
|
|
// Helpers PRAGMA / schema
|
|
// =========================
|
|
|
|
Future<void> _ensureColumns(
|
|
DatabaseExecutor dbExec, {
|
|
required String table,
|
|
required Map<String, String> columnsAndTypes,
|
|
}) async {
|
|
try {
|
|
final rows = await dbExec.rawQuery('PRAGMA table_info($table);');
|
|
final existing = rows.map((r) => (r['name'] as String)).toSet();
|
|
|
|
for (final entry in columnsAndTypes.entries) {
|
|
final col = entry.key;
|
|
final typ = entry.value;
|
|
if (!existing.contains(col)) {
|
|
final sql = 'ALTER TABLE $table ADD COLUMN $col $typ;';
|
|
try {
|
|
await dbExec.execute(sql);
|
|
debugPrint('[RemoteRepository] executed: $sql');
|
|
} catch (e, st) {
|
|
debugPrint('[RemoteRepository] failed to execute $sql: $e\n$st');
|
|
}
|
|
}
|
|
}
|
|
} catch (e, st) {
|
|
debugPrint('[RemoteRepository] _ensureColumns($table) error: $e\n$st');
|
|
}
|
|
}
|
|
|
|
/// Assicura che le colonne GPS e alcune colonne "remote*" esistano nella tabella `entry`.
|
|
Future<void> _ensureEntryColumns(DatabaseExecutor dbExec) async {
|
|
await _ensureColumns(dbExec, table: 'entry', columnsAndTypes: const {
|
|
// GPS
|
|
'latitude': 'REAL',
|
|
'longitude': 'REAL',
|
|
'altitude': 'REAL',
|
|
// Campi remoti
|
|
'remoteId': 'TEXT',
|
|
'remotePath': 'TEXT',
|
|
'remoteThumb1': 'TEXT',
|
|
'remoteThumb2': 'TEXT',
|
|
'origin': 'INTEGER',
|
|
'provider': 'TEXT',
|
|
'trashed': 'INTEGER',
|
|
});
|
|
// Indice "normale" per velocizzare il lookup su remoteId
|
|
try {
|
|
await dbExec.execute(
|
|
'CREATE INDEX IF NOT EXISTS idx_entry_remoteId ON entry(remoteId);',
|
|
);
|
|
} catch (e, st) {
|
|
debugPrint('[RemoteRepository] create index error: $e\n$st');
|
|
}
|
|
}
|
|
|
|
// =========================
|
|
// Retry su SQLITE_BUSY
|
|
// =========================
|
|
|
|
bool _isBusy(Object e) {
|
|
final s = e.toString();
|
|
return s.contains('SQLITE_BUSY') || s.contains('database is locked');
|
|
}
|
|
|
|
Future<T> _withRetryBusy<T>(Future<T> Function() fn) async {
|
|
const maxAttempts = 3;
|
|
var delay = const Duration(milliseconds: 250);
|
|
for (var i = 0; i < maxAttempts; i++) {
|
|
try {
|
|
return await fn();
|
|
} catch (e) {
|
|
if (!_isBusy(e) || i == maxAttempts - 1) rethrow;
|
|
await Future.delayed(delay);
|
|
delay *= 2; // 250 → 500 → 1000 ms
|
|
}
|
|
}
|
|
// non dovrebbe arrivare qui
|
|
return await fn();
|
|
}
|
|
|
|
// =========================
|
|
// Normalizzazione SOLO per diagnostica (non cambia cosa salvi)
|
|
// =========================
|
|
|
|
String _normPath(String? p) {
|
|
if (p == null || p.isEmpty) return '';
|
|
var s = p.trim().replaceAll(RegExp(r'/+'), '/');
|
|
if (!s.startsWith('/')) s = '/$s';
|
|
return s;
|
|
}
|
|
|
|
/// Candidato "canonico" (inserisce '/original/' dopo '/photos/<User>/'
|
|
/// se manca). Usato solo per LOG/HINT, NON per scrivere.
|
|
String _canonCandidate(String? rawPath, String fileName) {
|
|
var s = _normPath(rawPath);
|
|
final seg = s.split('/'); // ['', 'photos', '<User>', maybe 'original', ...]
|
|
if (seg.length >= 4 && seg[1] == 'photos' && seg[3] != 'original' && seg[3] != 'thumbs') {
|
|
seg.insert(3, 'original');
|
|
}
|
|
if (fileName.isNotEmpty) {
|
|
seg[seg.length - 1] = fileName;
|
|
}
|
|
return seg.join('/');
|
|
}
|
|
|
|
// =========================
|
|
// Utilities
|
|
// =========================
|
|
|
|
bool _isVideoItem(RemotePhotoItem it) {
|
|
final mt = (it.mimeType ?? '').toLowerCase();
|
|
final p = (it.path).toLowerCase();
|
|
return mt.startsWith('video/') ||
|
|
p.endsWith('.mp4') ||
|
|
p.endsWith('.mov') ||
|
|
p.endsWith('.m4v') ||
|
|
p.endsWith('.mkv') ||
|
|
p.endsWith('.webm');
|
|
}
|
|
|
|
Map<String, Object?> _buildEntryRow(RemotePhotoItem it, {int? existingId}) {
|
|
// ⚠️ NON correggo: salvo esattamente quello che arriva (come ora)
|
|
return <String, Object?>{
|
|
'id': existingId,
|
|
'contentId': null,
|
|
'uri': null,
|
|
'path': it.path,
|
|
'sourceMimeType': it.mimeType,
|
|
'width': it.width,
|
|
'height': it.height,
|
|
'sourceRotationDegrees': null,
|
|
'sizeBytes': it.sizeBytes,
|
|
'title': it.name,
|
|
'dateAddedSecs': DateTime.now().millisecondsSinceEpoch ~/ 1000,
|
|
'dateModifiedMillis': null,
|
|
'sourceDateTakenMillis': it.takenAtUtc?.millisecondsSinceEpoch,
|
|
'durationMillis': it.durationMillis,
|
|
// REMOTI VISIBILI (come nel tuo file attuale)
|
|
'trashed': 0,
|
|
'origin': 1,
|
|
'provider': 'json@patachina',
|
|
// GPS (possono essere null)
|
|
'latitude': it.lat,
|
|
'longitude': it.lng,
|
|
'altitude': it.alt,
|
|
// campi remoti (⚠️ path “raw”, senza forzare /original/)
|
|
'remoteId': it.id,
|
|
'remotePath': it.path,
|
|
'remoteThumb1': it.thub1,
|
|
'remoteThumb2': it.thub2,
|
|
};
|
|
}
|
|
|
|
Map<String, Object?> _buildAddressRow(int newId, RemoteLocation location) {
|
|
return <String, Object?>{
|
|
'id': newId,
|
|
'addressLine': location.address,
|
|
'countryCode': null,
|
|
'countryName': location.country,
|
|
'adminArea': location.region,
|
|
'locality': location.city,
|
|
};
|
|
}
|
|
|
|
// =========================
|
|
// Upsert a chunk (DIAGNOSTICA inclusa)
|
|
// =========================
|
|
|
|
Future<void> upsertAll(List<RemotePhotoItem> items, {int chunkSize = 200}) async {
|
|
debugPrint('RemoteRepository.upsertAll: items=${items.length}');
|
|
if (items.isEmpty) return;
|
|
|
|
await _withRetryBusy(() => _ensureEntryColumns(db));
|
|
|
|
// Ordina: prima immagini, poi video
|
|
final images = <RemotePhotoItem>[];
|
|
final videos = <RemotePhotoItem>[];
|
|
for (final it in items) {
|
|
(_isVideoItem(it) ? videos : images).add(it);
|
|
}
|
|
final ordered = <RemotePhotoItem>[...images, ...videos];
|
|
|
|
for (var offset = 0; offset < ordered.length; offset += chunkSize) {
|
|
final end = (offset + chunkSize < ordered.length) ? offset + chunkSize : ordered.length;
|
|
final chunk = ordered.sublist(offset, end);
|
|
|
|
try {
|
|
await _withRetryBusy(() => db.transaction((txn) async {
|
|
final batch = txn.batch();
|
|
|
|
for (final it in chunk) {
|
|
// === DIAGNOSTICA PRE-LOOKUP ===
|
|
final raw = it.path;
|
|
final norm = _normPath(raw);
|
|
final cand = _canonCandidate(raw, it.name);
|
|
final hasOriginal = raw.contains('/original/');
|
|
final hasLeading = raw.startsWith('/');
|
|
debugPrint(
|
|
'[repo-upsert] in: rid=${it.id.substring(0,8)} name=${it.name} '
|
|
'raw="$raw" (original=${hasOriginal?"Y":"N"}, leading=${hasLeading?"Y":"N"})'
|
|
);
|
|
|
|
// Lookup record esistente SOLO per remoteId (comportamento attuale)
|
|
int? existingId;
|
|
try {
|
|
final existing = await txn.query(
|
|
'entry',
|
|
columns: ['id'],
|
|
where: 'origin=1 AND remoteId = ?',
|
|
whereArgs: [it.id],
|
|
limit: 1,
|
|
);
|
|
existingId = existing.isNotEmpty ? (existing.first['id'] as int?) : null;
|
|
} catch (e, st) {
|
|
debugPrint('[RemoteRepository] lookup existingId failed for remoteId=${it.id}: $e\n$st');
|
|
}
|
|
|
|
// === DIAGNOSTICA HINT: esisterebbe una riga “compatibile” per path? ===
|
|
// 1) path canonico (con /original/)
|
|
try {
|
|
final byCanon = await txn.query(
|
|
'entry',
|
|
columns: ['id'],
|
|
where: 'origin=1 AND remotePath = ?',
|
|
whereArgs: [cand],
|
|
limit: 1,
|
|
);
|
|
if (byCanon.isNotEmpty && existingId == null) {
|
|
final idCand = byCanon.first['id'];
|
|
debugPrint(
|
|
'[repo-upsert][HINT] trovata riga per CAND-remotePath="$cand" -> id=$idCand '
|
|
'(il lookup corrente per remoteId NON la vede: possibile causa duplicato)'
|
|
);
|
|
}
|
|
} catch (_) {}
|
|
|
|
// 2) path raw normalizzato (solo slash)
|
|
try {
|
|
final byNorm = await txn.query(
|
|
'entry',
|
|
columns: ['id'],
|
|
where: 'origin=1 AND remotePath = ?',
|
|
whereArgs: [norm],
|
|
limit: 1,
|
|
);
|
|
if (byNorm.isNotEmpty && existingId == null) {
|
|
final idNorm = byNorm.first['id'];
|
|
debugPrint(
|
|
'[repo-upsert][HINT] trovata riga per RAW-NORM-remotePath="$norm" -> id=$idNorm '
|
|
'(il lookup corrente per remoteId NON la vede: possibile causa duplicato)'
|
|
);
|
|
}
|
|
} catch (_) {}
|
|
|
|
// Riga completa (⚠️ salviamo il RAW come stai facendo ora)
|
|
final row = _buildEntryRow(it, existingId: existingId);
|
|
|
|
// Insert/replace
|
|
try {
|
|
batch.insert(
|
|
'entry',
|
|
row,
|
|
conflictAlgorithm: ConflictAlgorithm.replace,
|
|
);
|
|
} on DatabaseException catch (e, st) {
|
|
debugPrint('[RemoteRepository] batch insert failed for remoteId=${it.id}: $e\n$st');
|
|
|
|
final rowNoGps = Map<String, Object?>.from(row)
|
|
..remove('latitude')
|
|
..remove('longitude')
|
|
..remove('altitude');
|
|
|
|
batch.insert(
|
|
'entry',
|
|
rowNoGps,
|
|
conflictAlgorithm: ConflictAlgorithm.replace,
|
|
);
|
|
}
|
|
}
|
|
|
|
await batch.commit(noResult: true);
|
|
|
|
// Secondo pass per address (immutato)
|
|
for (final it in chunk) {
|
|
if (it.location == null) continue;
|
|
|
|
try {
|
|
final rows = await txn.query(
|
|
'entry',
|
|
columns: ['id'],
|
|
where: 'origin=1 AND remoteId = ?',
|
|
whereArgs: [it.id],
|
|
limit: 1,
|
|
);
|
|
if (rows.isEmpty) continue;
|
|
final newId = rows.first['id'] as int;
|
|
|
|
final addr = _buildAddressRow(newId, it.location!);
|
|
await txn.insert(
|
|
'address',
|
|
addr,
|
|
conflictAlgorithm: ConflictAlgorithm.replace,
|
|
);
|
|
} catch (e, st) {
|
|
debugPrint('[RemoteRepository] insert address failed for remoteId=${it.id}: $e\n$st');
|
|
}
|
|
}
|
|
}));
|
|
} catch (e, st) {
|
|
debugPrint('[RemoteRepository] upsert chunk ${offset}..${end - 1} ERROR: $e\n$st');
|
|
rethrow;
|
|
}
|
|
}
|
|
}
|
|
|
|
// =========================
|
|
// Unicità & deduplica (immutato)
|
|
// =========================
|
|
|
|
Future<void> ensureUniqueRemoteId() async {
|
|
try {
|
|
await db.execute(
|
|
'CREATE UNIQUE INDEX IF NOT EXISTS uq_entry_remote_remoteId '
|
|
'ON entry(remoteId) WHERE origin=1',
|
|
);
|
|
debugPrint('[RemoteRepository] ensured UNIQUE index on entry(remoteId) for origin=1');
|
|
} catch (e, st) {
|
|
debugPrint('[RemoteRepository] ensureUniqueRemoteId error: $e\n$st');
|
|
}
|
|
}
|
|
|
|
Future<int> deduplicateRemotes() async {
|
|
try {
|
|
final deleted = await db.rawDelete(
|
|
'DELETE FROM entry '
|
|
'WHERE origin=1 AND remoteId IS NOT NULL AND id NOT IN ('
|
|
' SELECT MAX(id) FROM entry '
|
|
' WHERE origin=1 AND remoteId IS NOT NULL '
|
|
' GROUP BY remoteId'
|
|
')',
|
|
);
|
|
debugPrint('[RemoteRepository] deduplicateRemotes deleted=$deleted');
|
|
return deleted;
|
|
} catch (e, st) {
|
|
debugPrint('[RemoteRepository] deduplicateRemotes error: $e\n$st');
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
Future<void> sanitizeRemotes() async {
|
|
await deduplicateRemotes();
|
|
await ensureUniqueRemoteId();
|
|
}
|
|
|
|
// =========================
|
|
// Utils
|
|
// =========================
|
|
|
|
Future<int> countRemote() async {
|
|
final rows = await db.rawQuery('SELECT COUNT(1) AS c FROM entry WHERE origin=1');
|
|
return (rows.first['c'] as int?) ?? 0;
|
|
}
|
|
}
|