566 lines
19 KiB
Dart
566 lines
19 KiB
Dart
// lib/remote/remote_repository.dart
|
||
import 'package:flutter/foundation.dart' show debugPrint;
|
||
import 'package:sqflite/sqflite.dart';
|
||
|
||
import 'remote_models.dart';
|
||
import 'remote_db_uris.dart'; // <-- helper per URI fittizi aves-remote://...
|
||
|
||
class RemoteRepository {
|
||
final Database db;
|
||
RemoteRepository(this.db);
|
||
|
||
// =========================
|
||
// Helpers PRAGMA / schema
|
||
// =========================
|
||
|
||
Future<void> _ensureColumns(
|
||
DatabaseExecutor dbExec, {
|
||
required String table,
|
||
required Map<String, String> columnsAndTypes,
|
||
}) async {
|
||
try {
|
||
final rows = await dbExec.rawQuery('PRAGMA table_info($table);');
|
||
final existing = rows.map((r) => (r['name'] as String)).toSet();
|
||
|
||
for (final entry in columnsAndTypes.entries) {
|
||
final col = entry.key;
|
||
final typ = entry.value;
|
||
if (!existing.contains(col)) {
|
||
final sql = 'ALTER TABLE $table ADD COLUMN $col $typ;';
|
||
try {
|
||
await dbExec.execute(sql);
|
||
debugPrint('[RemoteRepository] executed: $sql');
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] failed to execute $sql: $e\n$st');
|
||
}
|
||
}
|
||
}
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] _ensureColumns($table) error: $e\n$st');
|
||
}
|
||
}
|
||
|
||
/// Assicura che tutte le entry remote abbiano un uri costruito da remoteId.
|
||
Future<void> ensureRemoteUris(DatabaseExecutor dbExec) async {
|
||
try {
|
||
await dbExec.execute('''
|
||
UPDATE entry
|
||
SET uri = 'remote://' || remoteId
|
||
WHERE origin = 1
|
||
AND remoteId IS NOT NULL
|
||
AND remoteId != ''
|
||
AND (uri IS NULL OR uri = '' OR uri NOT LIKE 'remote://%');
|
||
''');
|
||
debugPrint('[RemoteRepository] ensureRemoteUris: migration applied');
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] ensureRemoteUris error: $e\n$st');
|
||
}
|
||
}
|
||
|
||
/// Assicura che le colonne GPS e alcune colonne "remote*" esistano nella tabella `entry`.
|
||
Future<void> _ensureEntryColumns(DatabaseExecutor dbExec) async {
|
||
await _ensureColumns(dbExec, table: 'entry', columnsAndTypes: const {
|
||
// Core (alcune basi legacy potrebbero non averle ancora)
|
||
'uri': 'TEXT',
|
||
|
||
// GPS
|
||
'latitude': 'REAL',
|
||
'longitude': 'REAL',
|
||
'altitude': 'REAL',
|
||
|
||
// Campi remoti
|
||
'remoteId': 'TEXT',
|
||
'remotePath': 'TEXT',
|
||
'remoteThumb1': 'TEXT',
|
||
'remoteThumb2': 'TEXT',
|
||
'origin': 'INTEGER',
|
||
'provider': 'TEXT',
|
||
'trashed': 'INTEGER',
|
||
'remoteRotation': 'INTEGER',
|
||
});
|
||
|
||
// Indice "normale" per velocizzare il lookup su remoteId
|
||
try {
|
||
await dbExec.execute(
|
||
'CREATE INDEX IF NOT EXISTS idx_entry_remoteId ON entry(remoteId);',
|
||
);
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] create index error: $e\n$st');
|
||
}
|
||
}
|
||
|
||
// =========================
|
||
// Retry su SQLITE_BUSY
|
||
// =========================
|
||
|
||
bool _isBusy(Object e) {
|
||
final s = e.toString();
|
||
return s.contains('SQLITE_BUSY') || s.contains('database is locked');
|
||
}
|
||
|
||
Future<T> _withRetryBusy<T>(Future<T> Function() fn) async {
|
||
const maxAttempts = 3;
|
||
var delay = const Duration(milliseconds: 250);
|
||
for (var i = 0; i < maxAttempts; i++) {
|
||
try {
|
||
return await fn();
|
||
} catch (e) {
|
||
if (!_isBusy(e) || i == maxAttempts - 1) rethrow;
|
||
await Future.delayed(delay);
|
||
delay *= 2; // 250 → 500 → 1000 ms
|
||
}
|
||
}
|
||
// non dovrebbe arrivare qui
|
||
return await fn();
|
||
}
|
||
|
||
// =========================
|
||
// Normalizzazione (solo supporto)
|
||
// =========================
|
||
|
||
String _normPath(String? p) {
|
||
if (p == null || p.isEmpty) return '';
|
||
var s = p.trim().replaceAll(RegExp(r'/+'), '/');
|
||
if (!s.startsWith('/')) s = '/$s';
|
||
return s;
|
||
}
|
||
|
||
/// Candidato "canonico" (inserisce '/original/' dopo '/photos/<User>/'
|
||
/// se manca). Usato per lookup/fallback.
|
||
String _canonCandidate(String? rawPath, String fileName) {
|
||
var s = _normPath(rawPath);
|
||
final seg = s.split('/'); // ['', 'photos', '<User>', maybe 'original', ...]
|
||
if (seg.length >= 4 && seg[1] == 'photos' && seg[3] != 'original' && seg[3] != 'thumbs') {
|
||
seg.insert(3, 'original');
|
||
}
|
||
if (fileName.isNotEmpty) {
|
||
seg[seg.length - 1] = fileName;
|
||
}
|
||
return seg.join('/');
|
||
}
|
||
|
||
// =========================
|
||
// Utilities
|
||
// =========================
|
||
|
||
bool _isVideoItem(RemotePhotoItem it) {
|
||
final mt = (it.mimeType ?? '').toLowerCase();
|
||
final p = (it.path).toLowerCase();
|
||
return mt.startsWith('video/') ||
|
||
p.endsWith('.mp4') ||
|
||
p.endsWith('.mov') ||
|
||
p.endsWith('.m4v') ||
|
||
p.endsWith('.mkv') ||
|
||
p.endsWith('.webm');
|
||
}
|
||
|
||
Map<String, Object?> _buildEntryRow(RemotePhotoItem it, {int? existingId}) {
|
||
// ============================================================
|
||
// REMARK ORIGINALE (da ripristinare quando avrai ImageProvider)
|
||
final syntheticUri = RemoteDbUris.make(remoteId: it.id, remotePath: it.path);
|
||
// ============================================================
|
||
|
||
// TEMPORARY FIX: usare URL HTTP basato su thub2
|
||
//final syntheticUri = 'https://prova.patachina.it/${it.thub2}';
|
||
//final syntheticUri = 'https://picsum.photos/400';
|
||
|
||
|
||
int _makeContentId() {
|
||
final base = (it.id.isNotEmpty ? it.id : it.path);
|
||
final h = base.hashCode & 0x7fffffff;
|
||
return 1_000_000_000 + (h % 900_000_000);
|
||
}
|
||
|
||
final nowMs = DateTime.now().millisecondsSinceEpoch;
|
||
final dateModMs = it.takenAtUtc?.millisecondsSinceEpoch ?? nowMs;
|
||
|
||
return <String, Object?>{
|
||
'id': existingId,
|
||
'contentId': _makeContentId(),
|
||
|
||
// ✔️ URI HTTP temporaneo
|
||
'uri': syntheticUri,
|
||
|
||
// ✔️ MIME sempre valorizzato
|
||
'path': it.path,
|
||
'sourceMimeType': it.mimeType ?? 'image/jpeg',
|
||
|
||
// ✔️ width/height sempre valorizzati
|
||
'width': it.width ?? 0,
|
||
'height': it.height ?? 0,
|
||
|
||
// ✔️ rotation sempre valorizzata
|
||
'sourceRotationDegrees': it.rotation ?? 0,
|
||
|
||
'sizeBytes': it.sizeBytes,
|
||
|
||
'title': it.name,
|
||
'dateAddedSecs': nowMs ~/ 1000,
|
||
'dateModifiedMillis': dateModMs,
|
||
'sourceDateTakenMillis': it.takenAtUtc?.millisecondsSinceEpoch,
|
||
'durationMillis': it.durationMillis,
|
||
|
||
'trashed': 0,
|
||
'origin': 1,
|
||
'provider': 'json@patachina',
|
||
|
||
'latitude': it.lat,
|
||
'longitude': it.lng,
|
||
'altitude': it.alt,
|
||
|
||
'remoteId': it.id,
|
||
'remotePath': it.path,
|
||
'remoteThumb1': it.thub1,
|
||
'remoteThumb2': it.thub2,
|
||
'remoteRotation': it.rotation ?? 0,
|
||
|
||
// ✔️ remoteWidth/remoteHeight sempre valorizzati
|
||
'remoteWidth': it.width ?? 0,
|
||
'remoteHeight': it.height ?? 0,
|
||
};
|
||
}
|
||
|
||
Map<String, Object?> _buildAddressRow(int newId, RemoteLocation location) {
|
||
return <String, Object?>{
|
||
'id': newId,
|
||
'addressLine': location.address,
|
||
'countryCode': null,
|
||
'countryName': location.country,
|
||
'adminArea': location.region,
|
||
'locality': location.city,
|
||
};
|
||
}
|
||
|
||
// =========================
|
||
// Upsert a chunk (con fallback robusti)
|
||
// =========================
|
||
|
||
Future<void> upsertAll(List<RemotePhotoItem> items, {int chunkSize = 200}) async {
|
||
debugPrint('RemoteRepository.upsertAll: items=${items.length}');
|
||
if (items.isEmpty) return;
|
||
|
||
await _withRetryBusy(() => _ensureEntryColumns(db));
|
||
|
||
// Protezione DB: crea indici unici dove mancano
|
||
await ensureUniqueRemoteId();
|
||
await ensureUniqueRemotePath();
|
||
|
||
// Ordina: prima immagini, poi video
|
||
final images = <RemotePhotoItem>[];
|
||
final videos = <RemotePhotoItem>[];
|
||
for (final it in items) {
|
||
(_isVideoItem(it) ? videos : images).add(it);
|
||
}
|
||
final ordered = <RemotePhotoItem>[...images, ...videos];
|
||
|
||
for (var offset = 0; offset < ordered.length; offset += chunkSize) {
|
||
final end = (offset + chunkSize < ordered.length) ? offset + chunkSize : ordered.length;
|
||
final chunk = ordered.sublist(offset, end);
|
||
|
||
try {
|
||
await _withRetryBusy(() => db.transaction((txn) async {
|
||
final batch = txn.batch();
|
||
|
||
for (final it in chunk) {
|
||
// Log essenziale (puoi silenziare dopo i test)
|
||
final raw = it.path;
|
||
final norm = _normPath(raw);
|
||
final cand = _canonCandidate(raw, it.name);
|
||
debugPrint('[repo-upsert] in: rid=${it.id.substring(0,8)} name=${it.name} raw="$raw"');
|
||
|
||
// Lookup record esistente:
|
||
// 1) per remoteId
|
||
int? existingId;
|
||
try {
|
||
final existing = await txn.query(
|
||
'entry',
|
||
columns: ['id'],
|
||
where: 'origin=1 AND remoteId = ?',
|
||
whereArgs: [it.id],
|
||
limit: 1,
|
||
);
|
||
existingId = existing.isNotEmpty ? (existing.first['id'] as int?) : null;
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] lookup by remoteId failed for remoteId=${it.id}: $e\n$st');
|
||
}
|
||
|
||
// 2) fallback per remotePath = candidato canonico (/original/)
|
||
if (existingId == null) {
|
||
try {
|
||
final byCanon = await txn.query(
|
||
'entry',
|
||
columns: ['id'],
|
||
where: 'origin=1 AND remotePath = ?',
|
||
whereArgs: [cand],
|
||
limit: 1,
|
||
);
|
||
if (byCanon.isNotEmpty) {
|
||
existingId = byCanon.first['id'] as int?;
|
||
}
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] lookup by canonical path failed "$cand": $e\n$st');
|
||
}
|
||
}
|
||
|
||
// 3) ultimo fallback per remotePath "raw normalizzato" (solo slash)
|
||
if (existingId == null) {
|
||
try {
|
||
final byNorm = await txn.query(
|
||
'entry',
|
||
columns: ['id'],
|
||
where: 'origin=1 AND remotePath = ?',
|
||
whereArgs: [norm],
|
||
limit: 1,
|
||
);
|
||
if (byNorm.isNotEmpty) {
|
||
existingId = byNorm.first['id'] as int?;
|
||
}
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] lookup by normalized path failed "$norm": $e\n$st');
|
||
}
|
||
}
|
||
|
||
// Riga completa e REPLACE
|
||
final row = _buildEntryRow(it, existingId: existingId);
|
||
|
||
try {
|
||
batch.insert(
|
||
'entry',
|
||
row,
|
||
conflictAlgorithm: ConflictAlgorithm.replace,
|
||
);
|
||
} on DatabaseException catch (e, st) {
|
||
debugPrint('[RemoteRepository] batch insert failed for remoteId=${it.id}: $e\n$st');
|
||
|
||
final rowNoGps = Map<String, Object?>.from(row)
|
||
..remove('latitude')
|
||
..remove('longitude')
|
||
..remove('altitude');
|
||
|
||
batch.insert(
|
||
'entry',
|
||
rowNoGps,
|
||
conflictAlgorithm: ConflictAlgorithm.replace,
|
||
);
|
||
}
|
||
}
|
||
|
||
await batch.commit(noResult: true);
|
||
|
||
// Secondo pass per address (se disponibile)
|
||
for (final it in chunk) {
|
||
if (it.location == null) continue;
|
||
|
||
try {
|
||
final rows = await txn.query(
|
||
'entry',
|
||
columns: ['id'],
|
||
where: 'origin=1 AND remoteId = ?',
|
||
whereArgs: [it.id],
|
||
limit: 1,
|
||
);
|
||
if (rows.isEmpty) continue;
|
||
final newId = rows.first['id'] as int;
|
||
|
||
final addr = _buildAddressRow(newId, it.location!);
|
||
await txn.insert(
|
||
'address',
|
||
addr,
|
||
conflictAlgorithm: ConflictAlgorithm.replace,
|
||
);
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] insert address failed for remoteId=${it.id}: $e\n$st');
|
||
}
|
||
}
|
||
}));
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] upsert chunk ${offset}..${end - 1} ERROR: $e\n$st');
|
||
rethrow;
|
||
}
|
||
}
|
||
}
|
||
|
||
// =========================
|
||
// Unicità & deduplica
|
||
// =========================
|
||
|
||
/// Indice UNICO su `remoteId` limitato alle righe remote (origin=1).
|
||
Future<void> ensureUniqueRemoteId() async {
|
||
try {
|
||
await db.execute(
|
||
'CREATE UNIQUE INDEX IF NOT EXISTS uq_entry_remote_remoteId '
|
||
'ON entry(remoteId) WHERE origin=1',
|
||
);
|
||
debugPrint('[RemoteRepository] ensured UNIQUE index on entry(remoteId) for origin=1');
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] ensureUniqueRemoteId error: $e\n$st');
|
||
}
|
||
}
|
||
|
||
/// Indice UNICO su `remotePath` (solo remoti) per prevenire doppi.
|
||
Future<void> ensureUniqueRemotePath() async {
|
||
try {
|
||
await db.execute(
|
||
'CREATE UNIQUE INDEX IF NOT EXISTS uq_entry_remote_remotePath '
|
||
'ON entry(remotePath) WHERE origin=1 AND remotePath IS NOT NULL',
|
||
);
|
||
debugPrint('[RemoteRepository] ensured UNIQUE index on entry(remotePath) for origin=1');
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] ensureUniqueRemotePath error: $e\n$st');
|
||
}
|
||
}
|
||
|
||
/// Dedup per `remoteId`, tenendo l’ultima riga.
|
||
Future<int> deduplicateRemotes() async {
|
||
try {
|
||
final deleted = await db.rawDelete(
|
||
'DELETE FROM entry '
|
||
'WHERE origin=1 AND remoteId IS NOT NULL AND id NOT IN ('
|
||
' SELECT MAX(id) FROM entry '
|
||
' WHERE origin=1 AND remoteId IS NOT NULL '
|
||
' GROUP BY remoteId'
|
||
')',
|
||
);
|
||
debugPrint('[RemoteRepository] deduplicateRemotes deleted=$deleted');
|
||
return deleted;
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] deduplicateRemotes error: $e\n$st');
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
/// Dedup per `remotePath` (match esatto), tenendo l’ultima riga.
|
||
Future<int> deduplicateByRemotePath() async {
|
||
try {
|
||
final deleted = await db.rawDelete(
|
||
'DELETE FROM entry '
|
||
'WHERE origin=1 AND remotePath IS NOT NULL AND id NOT IN ('
|
||
' SELECT MAX(id) FROM entry '
|
||
' WHERE origin=1 AND remotePath IS NOT NULL '
|
||
' GROUP BY remotePath'
|
||
')',
|
||
);
|
||
debugPrint('[RemoteRepository] deduplicateByRemotePath deleted=$deleted');
|
||
return deleted;
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] deduplicateByRemotePath error: $e\n$st');
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
// =========================
|
||
// Backfill URI fittizi per remoti legacy
|
||
// =========================
|
||
|
||
/// Imposta un URI fittizio `aves-remote://...` per tutte le righe remote
|
||
/// con `uri` NULL/vuoto. Prima prova a usare `remoteId` (SQL puro),
|
||
/// poi completa i rimanenti (senza remoteId) in un loop Dart usando `remotePath`.
|
||
Future<void> backfillRemoteUris() async {
|
||
// 1) Backfill via SQL per chi ha remoteId (più veloce)
|
||
try {
|
||
final updated = await db.rawUpdate(
|
||
"UPDATE entry "
|
||
"SET uri = 'aves-remote://rid/' || replace(remoteId, ' ', '') "
|
||
"WHERE origin=1 AND (uri IS NULL OR trim(uri)='') AND remoteId IS NOT NULL",
|
||
);
|
||
debugPrint('[RemoteRepository] backfill URIs via SQL (remoteId) updated=$updated');
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] backfill URIs (SQL) error: $e\n$st');
|
||
}
|
||
|
||
// 2) Loop Dart per i (pochi) rimanenti senza remoteId ma con remotePath
|
||
try {
|
||
final rows = await db.rawQuery(
|
||
"SELECT id, remotePath FROM entry "
|
||
"WHERE origin=1 AND (uri IS NULL OR trim(uri)='') "
|
||
"AND (remoteId IS NULL OR trim(remoteId)='') "
|
||
"AND remotePath IS NOT NULL"
|
||
);
|
||
|
||
if (rows.isNotEmpty) {
|
||
for (final r in rows) {
|
||
final id = (r['id'] as num).toInt();
|
||
final rp = (r['remotePath'] as String?) ?? '';
|
||
final synthetic = RemoteDbUris.make(remotePath: rp);
|
||
await db.update(
|
||
'entry',
|
||
{'uri': synthetic},
|
||
where: 'id=?',
|
||
whereArgs: [id],
|
||
conflictAlgorithm: ConflictAlgorithm.ignore,
|
||
);
|
||
}
|
||
debugPrint('[RemoteRepository] backfill URIs via Dart (remotePath) updated=${rows.length}');
|
||
}
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] backfill URIs (Dart) error: $e\n$st');
|
||
}
|
||
}
|
||
|
||
// =========================
|
||
// Backfill ESSENZIALI (contentId, dateModifiedMillis) per remoti legacy
|
||
// =========================
|
||
|
||
Future<void> backfillRemoteEssentials() async {
|
||
// 1) backfill contentId sintetico per remoti con contentId NULL/<=0
|
||
try {
|
||
final rows = await db.rawQuery(
|
||
"SELECT id, remoteId, remotePath FROM entry "
|
||
"WHERE origin=1 AND (contentId IS NULL OR contentId<=0)"
|
||
);
|
||
if (rows.isNotEmpty) {
|
||
for (final r in rows) {
|
||
final id = (r['id'] as num).toInt();
|
||
final rid = (r['remoteId'] as String?) ?? '';
|
||
final rpath = (r['remotePath'] as String?) ?? '';
|
||
final base = rid.isNotEmpty ? rid : rpath;
|
||
final h = base.hashCode & 0x7fffffff; // positivo
|
||
final cid = 1_000_000_000 + (h % 900_000_000);
|
||
await db.update('entry', {'contentId': cid}, where: 'id=?', whereArgs: [id]);
|
||
}
|
||
debugPrint('[RemoteRepository] backfill contentId updated=${rows.length}');
|
||
}
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] backfill contentId error: $e\n$st');
|
||
}
|
||
|
||
// 2) backfill dateModifiedMillis (usa sourceDateTakenMillis se presente, altrimenti now)
|
||
try {
|
||
final nowMs = DateTime.now().millisecondsSinceEpoch;
|
||
final updated = await db.rawUpdate(
|
||
"UPDATE entry SET dateModifiedMillis = COALESCE(sourceDateTakenMillis, ?) "
|
||
"WHERE origin=1 AND (dateModifiedMillis IS NULL OR dateModifiedMillis=0)",
|
||
[nowMs],
|
||
);
|
||
debugPrint('[RemoteRepository] backfill dateModifiedMillis updated=$updated');
|
||
} catch (e, st) {
|
||
debugPrint('[RemoteRepository] backfill dateModifiedMillis error: $e\n$st');
|
||
}
|
||
}
|
||
|
||
// =========================
|
||
// Helper combinato: pulizia + indici + backfill URI/ESSENZIALI
|
||
// =========================
|
||
|
||
Future<void> sanitizeRemotes() async {
|
||
await deduplicateRemotes();
|
||
await deduplicateByRemotePath(); // opzionale ma utile
|
||
await ensureUniqueRemoteId();
|
||
await ensureUniqueRemotePath();
|
||
|
||
// Assicura che ogni remoto abbia un uri fittizio valorizzato
|
||
await backfillRemoteUris();
|
||
|
||
// Assicura che i remoti abbiano contentId/dateModifiedMillis validi
|
||
await backfillRemoteEssentials();
|
||
}
|
||
|
||
// =========================
|
||
// Utils
|
||
// =========================
|
||
|
||
Future<int> countRemote() async {
|
||
final rows = await db.rawQuery('SELECT COUNT(1) AS c FROM entry WHERE origin=1');
|
||
return (rows.first['c'] as int?) ?? 0;
|
||
}
|
||
}
|