tav player: seek and pause

This commit is contained in:
minjaesong
2025-10-08 21:25:33 +09:00
parent 9826efd98a
commit 17b5063ef0
4 changed files with 238 additions and 20 deletions

View File

@@ -470,6 +470,75 @@ let decoderDbgInfo = {}
let cueElements = []
let currentCueIndex = -1 // Track current cue position
let iframePositions = [] // Track I-frame positions for seeking: [{offset, frameNum}]
// Function to find nearest I-frame before or at target frame
function findNearestIframe(targetFrame) {
if (iframePositions.length === 0) return null
// Find the largest I-frame position <= targetFrame
let result = null
for (let i = iframePositions.length - 1; i >= 0; i--) {
if (iframePositions[i].frameNum <= targetFrame) {
result = iframePositions[i]
break
}
}
// If targetFrame is before first I-frame, return first I-frame
return result || iframePositions[0]
}
// Function to scan forward and find next I-frame at or after target frame
function scanForwardToIframe(targetFrame, currentPos) {
// Save current position
let savedPos = seqread.getReadCount()
try {
let scanFrameCount = frameCount
// Scan forward through packets
while (seqread.getReadCount() < FILE_LENGTH) {
let packetPos = seqread.getReadCount()
let pType = seqread.readOneByte()
// Handle sync packets (increment frame counter)
if (pType === TAV_PACKET_SYNC || pType === TAV_PACKET_SYNC_NTSC) {
if (pType === TAV_PACKET_SYNC) {
scanFrameCount++
}
continue
}
// Found I-frame at or after target?
if (pType === TAV_PACKET_IFRAME && scanFrameCount >= targetFrame) {
// Record this I-frame position for future use
iframePositions.push({offset: packetPos, frameNum: scanFrameCount})
return {offset: packetPos, frameNum: scanFrameCount}
}
// Skip over packet payload (all non-sync packets have uint32 size)
if (pType !== TAV_PACKET_SYNC && pType !== TAV_PACKET_SYNC_NTSC && pType !== TAV_FILE_HEADER_FIRST) {
let payloadSize = seqread.readInt()
seqread.skip(payloadSize)
} else if (pType === TAV_FILE_HEADER_FIRST) {
// Hit next file header, stop scanning
break
}
}
// Didn't find I-frame, restore position
return null
} catch (e) {
// Error or EOF during scan
serial.printerr(`Scan error: ${e}`)
return null
} finally {
// Restore original position
seqread.seek(savedPos)
}
}
// Function to try reading next TAV file header at current position
function tryReadNextTAVHeader() {
@@ -624,6 +693,7 @@ function tryReadNextTAVHeader() {
let lastKey = 0
let skipped = false
let paused = false
// Playback loop - properly adapted from TEV with multi-file support
try {
@@ -642,6 +712,16 @@ try {
stopPlay = true
break
}
else if (keyCode == 62) { // SPACE - pause/resume
paused = !paused
if (paused) {
audio.stop(0)
serial.println(`Paused at frame ${frameCount}`)
} else {
audio.play(0)
serial.println(`Resumed`)
}
}
else if (keyCode == 19 && cueElements.length > 0) { // Up arrow - previous cue
currentCueIndex = (currentCueIndex <= 0) ? cueElements.length - 1 : currentCueIndex - 1
let cue = cueElements[currentCueIndex]
@@ -653,6 +733,10 @@ try {
akku = FRAME_TIME
akku2 = 0.0
audio.purgeQueue(0)
if (paused) {
audio.play(0)
audio.stop(0)
}
skipped = true
}
}
@@ -667,17 +751,70 @@ try {
akku = FRAME_TIME
akku2 = 0.0
audio.purgeQueue(0)
if (paused) {
audio.play(0)
audio.stop(0)
}
skipped = true
}
}
else if (keyCode == 21) { // Left arrow - seek back 5.5s
let targetFrame = Math.max(0, frameCount - Math.floor(header.fps * 5.5))
let seekTarget = findNearestIframe(targetFrame)
if (seekTarget) {
serial.println(`Seeking back to frame ${seekTarget.frameNum} (offset ${seekTarget.offset})`)
seqread.seek(seekTarget.offset)
frameCount = seekTarget.frameNum
akku = FRAME_TIME
akku2 -= 5.5
audio.purgeQueue(0)
if (paused) {
audio.play(0)
audio.stop(0)
}
skipped = true
}
}
else if (keyCode == 22) { // Right arrow - seek forward 5s
let targetFrame = Math.min(header.totalFrames - 1, frameCount + Math.floor(header.fps * 5.0))
// Try to find in already-decoded I-frames first
let seekTarget = findNearestIframe(targetFrame)
// If not found or behind current position, scan forward
if (!seekTarget || seekTarget.frameNum <= frameCount) {
serial.println(`Scanning forward for I-frame near frame ${targetFrame}...`)
seekTarget = scanForwardToIframe(targetFrame, seqread.getReadCount())
}
if (seekTarget && seekTarget.frameNum > frameCount) {
serial.println(`Seeking forward to frame ${seekTarget.frameNum} (offset ${seekTarget.offset})`)
seqread.seek(seekTarget.offset)
frameCount = seekTarget.frameNum
akku = FRAME_TIME
akku2 += 5.0
audio.purgeQueue(0)
if (paused) {
audio.play(0)
audio.stop(0)
}
skipped = true
} else if (!seekTarget) {
serial.println(`No I-frame found ahead`)
}
}
}
lastKey = keyCode
}
if (akku >= FRAME_TIME) {
// Read packet header
var packetType = seqread.readOneByte()
// When paused, just reset accumulator and skip frame processing
if (!paused) {
// Read packet header (record position before reading for I-frame tracking)
let packetOffset = seqread.getReadCount()
var packetType = seqread.readOneByte()
// serial.println(`Packet ${packetType} at offset ${seqread.getReadCount() - 1}`)
@@ -733,6 +870,11 @@ try {
}
else if (packetType === TAV_PACKET_IFRAME || packetType === TAV_PACKET_PFRAME) {
// Record I-frame position for seeking
if (packetType === TAV_PACKET_IFRAME) {
iframePositions.push({offset: packetOffset, frameNum: frameCount})
}
// Video packet
const compressedSize = seqread.readInt()
@@ -870,11 +1012,14 @@ try {
println(`Unknown packet type: 0x${packetType.toString(16)}`)
break
}
} // end of !paused block
}
let t2 = sys.nanoTime()
akku += (t2 - t1) / 1000000000.0
akku2 += (t2 - t1) / 1000000000.0
if (!paused) {
akku += (t2 - t1) / 1000000000.0
akku2 += (t2 - t1) / 1000000000.0
}
// Simple progress display
if (interactive) {
@@ -899,7 +1044,7 @@ try {
fileOrd: (cueElements.length > 0) ? currentCueIndex+1 : currentFileIndex,
resolution: `${header.width}x${header.height}${(isInterlaced) ? 'i' : ''}`,
colourSpace: header.version % 2 == 0 ? "ICtCp" : "YCoCg",
currentStatus: 1
currentStatus: paused ? 2 : 1 // 2 = paused, 1 = playing
}
gui.printBottomBar(guiStatus)
gui.printTopBar(guiStatus, 1)

View File

@@ -659,7 +659,7 @@ try {
// For interlaced: decode current frame into currentFieldAddr
// For display: use prevFieldAddr as current, currentFieldAddr as next
graphics.tevDecode(blockDataPtr, nextFieldAddr, currentFieldAddr, width, decodingHeight, qualityY, qualityCo, qualityCg, trueFrameCount, debugMotionVectors, version, enableDeblocking, enableBoundaryAwareDecoding)
graphics.tevDeinterlace(trueFrameCount, width, decodingHeight, prevFieldAddr, currentFieldAddr, nextFieldAddr, CURRENT_RGB_ADDR, deinterlaceAlgorithm)
graphics.tevDeinterlace(trueFrameCount + 1, width, decodingHeight, prevFieldAddr, currentFieldAddr, nextFieldAddr, CURRENT_RGB_ADDR, deinterlaceAlgorithm)
// Rotate field buffers for next frame: NEXT -> CURRENT -> PREV
rotateFieldBuffers()

View File

@@ -4683,8 +4683,8 @@ class GraphicsJSR223Delegate(private val vm: VM) {
val random = java.util.Random()
for (i in 0 until coeffCount) {
yTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
coTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
cgTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
// coTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
// cgTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
}
}
@@ -4749,8 +4749,8 @@ class GraphicsJSR223Delegate(private val vm: VM) {
val random = java.util.Random()
for (i in 0 until coeffCount) {
yTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
coTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
cgTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
// coTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
// cgTile[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
}
}
@@ -5331,8 +5331,8 @@ class GraphicsJSR223Delegate(private val vm: VM) {
val random = java.util.Random()
for (i in 0 until coeffCount) {
currentY[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
currentCo[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
currentCg[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
// currentCo[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
// currentCg[i] += (random.nextInt(filmGrainLevel * 2 + 1) - filmGrainLevel).toFloat()
}
}

View File

@@ -3433,6 +3433,83 @@ static int detect_still_frame(tav_encoder_t *enc) {
return (changed_pixels == 0);
}
// Detect still frames by comparing quantised DWT coefficients
// Returns 1 if quantised coefficients are identical (frame is truly still), 0 otherwise
// Benefits: quality-aware (lower quality = more SKIP frames), pure integer math
// DISABLED - should work in theory, not actually
static int detect_still_frame_dwt(tav_encoder_t *enc) {
if (!enc->previous_coeffs_allocated || enc->intra_only) {
return 0; // No previous coefficients to compare or intra-only mode
}
// Only compare against I-frames to avoid DELTA quantization drift
// previous_coeffs are updated by DELTA frames with reconstructed values that accumulate error
if (enc->last_frame_packet_type != TAV_PACKET_IFRAME) {
return 0; // Must compare against clean I-frame, not DELTA reconstruction
}
// Get current quantisers (use adjusted quantiser from bitrate control if applicable)
int qY = enc->bitrate_mode ? quantiser_float_to_int_dithered(enc) : enc->quantiser_y;
int this_frame_qY = QLUT[qY];
int this_frame_qCo = QLUT[enc->quantiser_co];
int this_frame_qCg = QLUT[enc->quantiser_cg];
// Coefficient count (monoblock mode)
const int coeff_count = enc->width * enc->height;
// Quantise current DWT coefficients
int16_t *quantised_y = enc->reusable_quantised_y;
int16_t *quantised_co = enc->reusable_quantised_co;
int16_t *quantised_cg = enc->reusable_quantised_cg;
if (enc->perceptual_tuning) {
quantise_dwt_coefficients_perceptual_per_coeff(enc, enc->current_dwt_y, quantised_y, coeff_count, this_frame_qY, enc->width, enc->height, enc->decomp_levels, 0, enc->frame_count);
quantise_dwt_coefficients_perceptual_per_coeff(enc, enc->current_dwt_co, quantised_co, coeff_count, this_frame_qCo, enc->width, enc->height, enc->decomp_levels, 1, enc->frame_count);
quantise_dwt_coefficients_perceptual_per_coeff(enc, enc->current_dwt_cg, quantised_cg, coeff_count, this_frame_qCg, enc->width, enc->height, enc->decomp_levels, 1, enc->frame_count);
} else {
quantise_dwt_coefficients(enc->current_dwt_y, quantised_y, coeff_count, this_frame_qY, enc->dead_zone_threshold, enc->width, enc->height, enc->decomp_levels, 0);
quantise_dwt_coefficients(enc->current_dwt_co, quantised_co, coeff_count, this_frame_qCo, enc->dead_zone_threshold, enc->width, enc->height, enc->decomp_levels, 1);
quantise_dwt_coefficients(enc->current_dwt_cg, quantised_cg, coeff_count, this_frame_qCg, enc->dead_zone_threshold, enc->width, enc->height, enc->decomp_levels, 1);
}
// Quantise previous DWT coefficients (stored from last I-frame)
int16_t *prev_quantised_y = malloc(coeff_count * sizeof(int16_t));
int16_t *prev_quantised_co = malloc(coeff_count * sizeof(int16_t));
int16_t *prev_quantised_cg = malloc(coeff_count * sizeof(int16_t));
if (enc->perceptual_tuning) {
quantise_dwt_coefficients_perceptual_per_coeff(enc, enc->previous_coeffs_y, prev_quantised_y, coeff_count, this_frame_qY, enc->width, enc->height, enc->decomp_levels, 0, enc->frame_count);
quantise_dwt_coefficients_perceptual_per_coeff(enc, enc->previous_coeffs_co, prev_quantised_co, coeff_count, this_frame_qCo, enc->width, enc->height, enc->decomp_levels, 1, enc->frame_count);
quantise_dwt_coefficients_perceptual_per_coeff(enc, enc->previous_coeffs_cg, prev_quantised_cg, coeff_count, this_frame_qCg, enc->width, enc->height, enc->decomp_levels, 1, enc->frame_count);
} else {
quantise_dwt_coefficients(enc->previous_coeffs_y, prev_quantised_y, coeff_count, this_frame_qY, enc->dead_zone_threshold, enc->width, enc->height, enc->decomp_levels, 0);
quantise_dwt_coefficients(enc->previous_coeffs_co, prev_quantised_co, coeff_count, this_frame_qCo, enc->dead_zone_threshold, enc->width, enc->height, enc->decomp_levels, 1);
quantise_dwt_coefficients(enc->previous_coeffs_cg, prev_quantised_cg, coeff_count, this_frame_qCg, enc->dead_zone_threshold, enc->width, enc->height, enc->decomp_levels, 1);
}
// Compare quantised coefficients - pure integer math
int diff_count = 0;
for (int i = 0; i < coeff_count; i++) {
if (quantised_y[i] != prev_quantised_y[i] ||
quantised_co[i] != prev_quantised_co[i] ||
quantised_cg[i] != prev_quantised_cg[i]) {
diff_count++;
}
}
free(prev_quantised_y);
free(prev_quantised_co);
free(prev_quantised_cg);
if (enc->verbose) {
printf("Still frame detection (DWT): %d/%d coeffs differ\n", diff_count, coeff_count);
}
// If all quantised coefficients match, frames are identical after compression
return (diff_count == 0);
}
// Main function
int main(int argc, char *argv[]) {
generate_random_filename(TEMP_AUDIO_FILE);
@@ -3892,18 +3969,14 @@ int main(int argc, char *argv[]) {
int is_scene_change = detect_scene_change(enc);
int is_time_keyframe = (frame_count % KEYFRAME_INTERVAL) == 0;
// Check if we can use SKIP mode
// Check if we can use SKIP mode (DWT coefficient-based detection)
int is_still = detect_still_frame(enc);
enc->is_still_frame_cached = is_still; // Cache for use in compress_and_write_frame
// SKIP mode can be used if:
// 1. Frame is still AND
// 2. Previous coeffs allocated AND
// 3. (Last frame was I-frame OR we're continuing a SKIP run)
// SKIP mode can be used if frame is still (detect_still_frame_dwt already checks against I-frame)
// SKIP runs can continue as long as frames remain identical to the reference I-frame
int in_skip_run = enc->used_skip_mode_last_frame;
int can_use_skip = is_still &&
enc->previous_coeffs_allocated &&
(enc->last_frame_packet_type == TAV_PACKET_IFRAME || in_skip_run);
int can_use_skip = is_still && enc->previous_coeffs_allocated;
// During a SKIP run, suppress keyframe timer unless content changes enough to un-skip
// Un-skip threshold is the negation of SKIP threshold: content must change to break the run