Refactor concurrency handling and improve sleep precision in concurrency.dart and header.dart

This commit is contained in:
ImBenji
2025-11-23 17:57:30 +00:00
parent 2de17fe720
commit 747f0bd1ed
5 changed files with 48 additions and 124 deletions

View File

@@ -3,7 +3,12 @@ import 'dart:convert';
import 'dart:io';
import 'package:sweepstore/structures.dart';
import 'helper_extensions.dart';
import 'helpers.dart';
int roundToNearest16(int value) {
int rounded = (value + 15) & ~15;
return rounded;
}
void initialiseSweepstoreHeader(RandomAccessFile file, {
int concurrentWorkers = 4,
@@ -200,18 +205,18 @@ class SweepstoreConcurrencyHeaderWriter extends SweepstoreConcurrencyHeader {
}
const int endOfStaticHeaderOffset = 46;
final int endOfStaticHeaderOffset = roundToNearest16(46);
class SweepstoreWorkerTicket {
static const int _ticketSize = 30;
static final int ticketSize = roundToNearest16(29);
final SweepstoreConcurrencyHeader _concurrencyHeader;
final int ticketIndex;
SweepstoreWorkerTicket(this.ticketIndex, this._concurrencyHeader);
// All offsets are relative to the start of the workers ticket
int get _baseOffset => endOfStaticHeaderOffset + (ticketIndex * _ticketSize);
int get _baseOffset => endOfStaticHeaderOffset + (ticketIndex * ticketSize);
// Offset 0 - 4 bytes
int get identifier {
@@ -289,10 +294,10 @@ class SweepstoreWorkerTicket {
try {
_concurrencyHeader._header._file.lockSync(FileLock.blockingExclusive, _baseOffset, _baseOffset + _ticketSize);
_concurrencyHeader._header._file.lockSync(FileLock.blockingExclusive, _baseOffset, _baseOffset + ticketSize);
_concurrencyHeader._header._file.setPositionSync(_baseOffset);
List<int> existingBuffer = _concurrencyHeader._header._file.readSync(_ticketSize);
List<int> existingBuffer = _concurrencyHeader._header._file.readSync(ticketSize);
RandomAccessMemory buffer = RandomAccessMemory(existingBuffer);
if (identifier != null) {
@@ -324,11 +329,17 @@ class SweepstoreWorkerTicket {
buffer.writeIntSync(writeSize, 4);
}
// Pad the rest of the ticket with zeros if necessary
buffer.setPositionSync(30);
while (buffer.positionSync() < ticketSize) {
buffer.writeIntSync(0, 1);
}
_concurrencyHeader._header._file.setPositionSync(_baseOffset);
_concurrencyHeader._header._file.writeFromSync(buffer.toUint8List());
} finally {
_concurrencyHeader._header._file.unlockSync(_baseOffset, _baseOffset + _ticketSize);
_concurrencyHeader._header._file.unlockSync(_baseOffset, _baseOffset + ticketSize);
}
}
@@ -337,118 +348,14 @@ class SweepstoreWorkerTicket {
_concurrencyHeader._header._file.lockSync(
FileLock.blockingExclusive,
_baseOffset,
_baseOffset + _ticketSize
_baseOffset + ticketSize
);
// Successfully locked - immediately unlock and return true
_concurrencyHeader._header._file.unlockSync(_baseOffset, _baseOffset + _ticketSize);
_concurrencyHeader._header._file.unlockSync(_baseOffset, _baseOffset + ticketSize);
return true;
} catch (e) {
// Lock failed - already held by another process
return false;
}
}
}
@deprecated
class _SweepstoreWorkerTicket {
static const int _ticketSize = 30;
final SweepstoreConcurrencyHeader _concurrencyHeader;
final int ticketIndex;
_SweepstoreWorkerTicket(this.ticketIndex, this._concurrencyHeader);
// All offsets are relative to the start of the workers ticket
int get _baseOffset => endOfStaticHeaderOffset + (ticketIndex * _ticketSize);
// Offset 0 - 4 bytes
int get identifier {
_concurrencyHeader._header._file.setPositionSync(_baseOffset);
int id = _concurrencyHeader._header._file.readIntSync(4);
return id;
}
set identifier(int id) {
_concurrencyHeader._header._file.setPositionSync(_baseOffset);
_concurrencyHeader._header._file.writeIntSync(id, 4);
_concurrencyHeader._header._file.flushSync();
}
// Offset 4 - 4 bytes
int get workerHeartbeat {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 4);
int heartbeat = _concurrencyHeader._header._file.readIntSync(4);
return heartbeat;
}
set workerHeartbeat(int heartbeat) {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 4);
_concurrencyHeader._header._file.writeIntSync(heartbeat, 4);
}
// Offset 8 - 1 byte
SweepstoreTicketState get ticketState {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 8);
int stateValue = _concurrencyHeader._header._file.readIntSync(1);
return SweepstoreTicketState.values[stateValue];
}
set ticketState(SweepstoreTicketState state) {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 8);
_concurrencyHeader._header._file.writeIntSync(state.index, 1);
_concurrencyHeader._header._file.flushSync();
}
// Offset 9 - 1 byte
SweepstoreTicketOperation get ticketOperation {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 9);
int operationValue = _concurrencyHeader._header._file.readIntSync(1);
return SweepstoreTicketOperation.values[operationValue];
}
set ticketOperation(SweepstoreTicketOperation operation) {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 9);
_concurrencyHeader._header._file.writeIntSync(operation.index, 1);
}
// Offset 10 - 8 bytes
int get keyHash {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 10);
int hash = _concurrencyHeader._header._file.readIntSync(8);
return hash;
}
set keyHash(int hash) {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 10);
_concurrencyHeader._header._file.writeIntSync(hash, 8);
}
// Offset 18 - 8 bytes
SweepstorePointer get writePointer {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 18);
int address = _concurrencyHeader._header._file.readIntSync(8);
return SweepstorePointer(address);
}
set writePointer(SweepstorePointer pointer) {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 18);
_concurrencyHeader._header._file.writeIntSync(pointer.address, 8);
}
// Offset 26 - 4 bytes
int get writeSize {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 26);
int size = _concurrencyHeader._header._file.readIntSync(4);
return size;
}
set writeSize(int size) {
_concurrencyHeader._header._file.setPositionSync(_baseOffset + 26);
_concurrencyHeader._header._file.writeIntSync(size, 4);
}
// Helpers
void lock() {
_concurrencyHeader._header._file.lockSync(FileLock.exclusive, _baseOffset, _baseOffset + _ticketSize);
}
void unlock() {
_concurrencyHeader._header._file.unlockSync(_baseOffset, _baseOffset + _ticketSize);
}
}