Remove C helper, use pure Nim dynlib imports; better defaults

- Replaced avhelper.c with direct {.dynlib.} pragma imports
- Grid off by default
- Higher default gain and time zoom

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
rolandnsharp
2026-04-05 20:08:12 +10:00
parent 2c04d7f034
commit 9d65aa1f94
3 changed files with 72 additions and 198 deletions

View File

@@ -1,34 +1,55 @@
## Audio capture via libavdevice/libavformat (dlopen at runtime). ## Audio capture via libavdevice/libavformat using Nim's dynlib pragma.
## Libraries are loaded at runtime — no dev packages, no C helper file.
import osproc, strutils import osproc, strutils
import scope import scope
# ── libav C helper bindings ────────────────────────────────────────── # ── libav dynlib bindings ────────────────────────────────────────────
{.compile: "avhelper.c".} const
{.passL: "-ldl".} avformat = "libavformat.so(|.61|.60|.59)"
avdevice = "libavdevice.so(|.61|.60|.59)"
avcodec = "libavcodec.so(|.61|.60|.59)"
type type
AVFormatContext = object AVFormatContext = object # opaque
AVPacket = object AVInputFormat = object # opaque
proc av_helper_init(): cint {.importc, cdecl.} # AVPacket layout — must match FFmpeg 5.x/6.x/7.x:
proc av_helper_open_pulse(ctx: ptr ptr AVFormatContext, # buf(8), pts(8), dts(8), data(8), size(4), stream_index(4)
device: cstring): cint {.importc, cdecl.} AVPacket = object
proc av_helper_find_stream_info(ctx: ptr AVFormatContext): cint buf: pointer
{.importc, cdecl.} pts: int64
proc av_helper_find_audio_stream(ctx: ptr AVFormatContext): cint dts: int64
{.importc, cdecl.} data: ptr UncheckedArray[uint8]
proc av_helper_read_frame(ctx: ptr AVFormatContext, size: cint
pkt: ptr AVPacket): cint {.importc, cdecl.} stream_index: cint
proc av_helper_packet_stream(pkt: ptr AVPacket): cint {.importc, cdecl.}
proc av_helper_packet_data(pkt: ptr AVPacket): ptr UncheckedArray[uint8] const AVMEDIA_TYPE_AUDIO = 1.cint
{.importc, cdecl.}
proc av_helper_packet_size(pkt: ptr AVPacket): cint {.importc, cdecl.} proc avdevice_register_all()
proc av_helper_packet_alloc(): ptr AVPacket {.importc, cdecl.} {.importc, dynlib: avdevice, cdecl.}
proc av_helper_packet_unref(pkt: ptr AVPacket) {.importc, cdecl.} proc av_find_input_format(name: cstring): ptr AVInputFormat
proc av_helper_packet_free(pkt: ptr ptr AVPacket) {.importc, cdecl.} {.importc, dynlib: avformat, cdecl.}
proc av_helper_close(ctx: ptr ptr AVFormatContext) {.importc, cdecl.} proc avformat_open_input(ctx: ptr ptr AVFormatContext, url: cstring,
fmt: ptr AVInputFormat, options: pointer): cint
{.importc, dynlib: avformat, cdecl.}
proc avformat_find_stream_info(ctx: ptr AVFormatContext,
options: pointer): cint
{.importc, dynlib: avformat, cdecl.}
proc av_find_best_stream(ctx: ptr AVFormatContext, mediaType: cint,
wanted: cint, related: cint, codec: pointer, flags: cint): cint
{.importc, dynlib: avformat, cdecl.}
proc av_read_frame(ctx: ptr AVFormatContext, pkt: ptr AVPacket): cint
{.importc, dynlib: avformat, cdecl.}
proc avformat_close_input(ctx: ptr ptr AVFormatContext)
{.importc, dynlib: avformat, cdecl.}
proc av_packet_alloc(): ptr AVPacket
{.importc, dynlib: avcodec, cdecl.}
proc av_packet_unref(pkt: ptr AVPacket)
{.importc, dynlib: avcodec, cdecl.}
proc av_packet_free(pkt: ptr ptr AVPacket)
{.importc, dynlib: avcodec, cdecl.}
# ── Monitor source detection ───────────────────────────────────────── # ── Monitor source detection ─────────────────────────────────────────
@@ -58,26 +79,35 @@ proc startAudio*(): AudioCapture =
let monitor = findMonitorSource() let monitor = findMonitorSource()
if monitor.len == 0: return if monitor.len == 0: return
if av_helper_init() < 0: return try:
avdevice_register_all()
except: return
let fmt = av_find_input_format("pulse")
if fmt == nil: return
var ctx: ptr AVFormatContext = nil var ctx: ptr AVFormatContext = nil
if av_helper_open_pulse(addr ctx, monitor.cstring) < 0: return if avformat_open_input(addr ctx, monitor.cstring, fmt, nil) < 0: return
if av_helper_find_stream_info(ctx) < 0: if avformat_find_stream_info(ctx, nil) < 0:
av_helper_close(addr ctx) avformat_close_input(addr ctx)
return return
let idx = av_helper_find_audio_stream(ctx) let idx = av_find_best_stream(ctx, AVMEDIA_TYPE_AUDIO, -1, -1, nil, 0)
let pkt = av_helper_packet_alloc() if idx < 0:
avformat_close_input(addr ctx)
return
let pkt = av_packet_alloc()
if pkt == nil: if pkt == nil:
av_helper_close(addr ctx) avformat_close_input(addr ctx)
return return
AudioCapture(fmtCtx: ctx, packet: pkt, streamIdx: idx.cint, live: true) AudioCapture(fmtCtx: ctx, packet: pkt, streamIdx: idx, live: true)
proc stop*(cap: var AudioCapture) = proc stop*(cap: var AudioCapture) =
if cap.live: if cap.live:
if cap.packet != nil: av_helper_packet_free(addr cap.packet) if cap.packet != nil: av_packet_free(addr cap.packet)
if cap.fmtCtx != nil: av_helper_close(addr cap.fmtCtx) if cap.fmtCtx != nil: avformat_close_input(addr cap.fmtCtx)
proc sourceLabel*(cap: AudioCapture): string = proc sourceLabel*(cap: AudioCapture): string =
if cap.live: "LIVE" else: "NO SIGNAL" if cap.live: "LIVE" else: "NO SIGNAL"
@@ -87,21 +117,18 @@ proc readSamples*(cap: var AudioCapture, scope: var Scope) =
const frameSize = 4 # 2ch × 16-bit const frameSize = 4 # 2ch × 16-bit
# Read one packet — av_read_frame blocks until data arrives, let ret = av_read_frame(cap.fmtCtx, cap.packet)
# which naturally rate-limits the render loop to the audio rate
let ret = av_helper_read_frame(cap.fmtCtx, cap.packet)
if ret < 0: if ret < 0:
scope.sampleCount = 0 scope.sampleCount = 0
return return
if av_helper_packet_stream(cap.packet) != cap.streamIdx: if cap.packet.stream_index != cap.streamIdx:
av_helper_packet_unref(cap.packet) av_packet_unref(cap.packet)
scope.sampleCount = 0 scope.sampleCount = 0
return return
let data = av_helper_packet_data(cap.packet) let data = cap.packet.data
let size = av_helper_packet_size(cap.packet) let frames = min(cap.packet.size div frameSize, scope.samplesL.len.cint)
let frames = min(size div frameSize, scope.samplesL.len)
for i in 0..<frames: for i in 0..<frames:
let off = i * frameSize let off = i * frameSize
@@ -111,4 +138,4 @@ proc readSamples*(cap: var AudioCapture, scope: var Scope) =
scope.samplesR[i] = right.float / 32768.0 scope.samplesR[i] = right.float / 32768.0
scope.sampleCount = frames scope.sampleCount = frames
av_helper_packet_unref(cap.packet) av_packet_unref(cap.packet)

View File

@@ -1,153 +0,0 @@
/* Minimal libav audio capture without requiring dev headers.
Loads libavformat/libavdevice at runtime via dlopen. */
#include <dlfcn.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
/* Opaque handles — we never touch the struct internals from Nim */
typedef void AVFormatContext;
typedef void AVInputFormat;
typedef void AVDictionary;
/* AVPacket partial layout — must match FFmpeg 5.x/6.x/7.x:
buf(8), pts(8), dts(8), data(8), size(4), stream_index(4) */
typedef struct {
void *buf;
int64_t pts;
int64_t dts;
uint8_t *data;
int size;
int stream_index;
/* we don't care about the rest */
} AVPacketHead;
/* Function pointer types matching libav API */
typedef void (*fn_avdevice_register_all)(void);
typedef const AVInputFormat* (*fn_av_find_input_format)(const char*);
typedef int (*fn_avformat_open_input)(AVFormatContext**, const char*,
const AVInputFormat*, AVDictionary**);
typedef int (*fn_avformat_find_stream_info)(AVFormatContext*, AVDictionary**);
typedef void (*fn_avformat_close_input)(AVFormatContext**);
typedef int (*fn_av_read_frame)(AVFormatContext*, AVPacketHead*);
typedef AVPacketHead* (*fn_av_packet_alloc)(void);
typedef void (*fn_av_packet_free)(AVPacketHead**);
typedef void (*fn_av_packet_unref)(AVPacketHead*);
/* Accessors for AVFormatContext fields via known offsets.
We use av_find_best_stream to avoid struct access entirely. */
typedef int (*fn_av_find_best_stream)(AVFormatContext*, int media_type,
int wanted, int related, void**, int flags);
/* Loaded function pointers */
static fn_avdevice_register_all p_avdevice_register_all;
static fn_av_find_input_format p_av_find_input_format;
static fn_avformat_open_input p_avformat_open_input;
static fn_avformat_find_stream_info p_avformat_find_stream_info;
static fn_avformat_close_input p_avformat_close_input;
static fn_av_read_frame p_av_read_frame;
static fn_av_packet_alloc p_av_packet_alloc;
static fn_av_packet_free p_av_packet_free;
static fn_av_packet_unref p_av_packet_unref;
static fn_av_find_best_stream p_av_find_best_stream;
static void *h_format, *h_device, *h_util;
static int loaded = 0;
static int load_libs(void) {
if (loaded) return loaded > 0 ? 0 : -1;
h_format = dlopen("libavformat.so", RTLD_LAZY);
if (!h_format) h_format = dlopen("libavformat.so.60", RTLD_LAZY);
if (!h_format) h_format = dlopen("libavformat.so.59", RTLD_LAZY);
h_device = dlopen("libavdevice.so", RTLD_LAZY);
if (!h_device) h_device = dlopen("libavdevice.so.60", RTLD_LAZY);
if (!h_device) h_device = dlopen("libavdevice.so.59", RTLD_LAZY);
if (!h_format || !h_device) { loaded = -1; return -1; }
p_avdevice_register_all = (fn_avdevice_register_all)
dlsym(h_device, "avdevice_register_all");
p_av_find_input_format = (fn_av_find_input_format)
dlsym(h_format, "av_find_input_format");
p_avformat_open_input = (fn_avformat_open_input)
dlsym(h_format, "avformat_open_input");
p_avformat_find_stream_info = (fn_avformat_find_stream_info)
dlsym(h_format, "avformat_find_stream_info");
p_avformat_close_input = (fn_avformat_close_input)
dlsym(h_format, "avformat_close_input");
p_av_read_frame = (fn_av_read_frame)
dlsym(h_format, "av_read_frame");
p_av_find_best_stream = (fn_av_find_best_stream)
dlsym(h_format, "av_find_best_stream");
p_av_packet_alloc = (fn_av_packet_alloc)
dlsym(h_format, "av_packet_alloc");
if (!p_av_packet_alloc) {
h_util = dlopen("libavcodec.so", RTLD_LAZY);
if (!h_util) h_util = dlopen("libavcodec.so.60", RTLD_LAZY);
if (h_util) p_av_packet_alloc = (fn_av_packet_alloc)
dlsym(h_util, "av_packet_alloc");
}
p_av_packet_free = (fn_av_packet_free)
dlsym(h_format, "av_packet_free");
if (!p_av_packet_free && h_util)
p_av_packet_free = (fn_av_packet_free)dlsym(h_util, "av_packet_free");
p_av_packet_unref = (fn_av_packet_unref)
dlsym(h_format, "av_packet_unref");
if (!p_av_packet_unref && h_util)
p_av_packet_unref = (fn_av_packet_unref)dlsym(h_util, "av_packet_unref");
if (!p_avformat_open_input || !p_av_read_frame ||
!p_av_packet_alloc || !p_av_packet_free) {
loaded = -1;
return -1;
}
loaded = 1;
return 0;
}
/* ── Public API called from Nim ──────────────────────────────── */
int av_helper_init(void) {
if (load_libs() < 0) return -1;
if (p_avdevice_register_all) p_avdevice_register_all();
return 0;
}
int av_helper_open_pulse(AVFormatContext **ctx, const char *device) {
if (!p_av_find_input_format || !p_avformat_open_input) return -1;
const AVInputFormat *fmt = p_av_find_input_format("pulse");
if (!fmt) return -1;
return p_avformat_open_input(ctx, device, fmt, NULL);
}
int av_helper_find_audio_stream(AVFormatContext *ctx) {
if (!p_av_find_best_stream) return 0; /* assume stream 0 */
int ret = p_av_find_best_stream(ctx, 1 /* AVMEDIA_TYPE_AUDIO */,
-1, -1, NULL, 0);
return ret >= 0 ? ret : 0;
}
int av_helper_find_stream_info(AVFormatContext *ctx) {
if (!p_avformat_find_stream_info) return 0;
return p_avformat_find_stream_info(ctx, NULL);
}
int av_helper_read_frame(AVFormatContext *ctx, AVPacketHead *pkt) {
return p_av_read_frame(ctx, pkt);
}
int av_helper_packet_stream(AVPacketHead *pkt) { return pkt->stream_index; }
uint8_t* av_helper_packet_data(AVPacketHead *pkt) { return pkt->data; }
int av_helper_packet_size(AVPacketHead *pkt) { return pkt->size; }
AVPacketHead* av_helper_packet_alloc(void) { return p_av_packet_alloc(); }
void av_helper_packet_unref(AVPacketHead *pkt) { if (p_av_packet_unref) p_av_packet_unref(pkt); }
void av_helper_packet_free(AVPacketHead **pkt) { if (p_av_packet_free) p_av_packet_free(pkt); }
void av_helper_close(AVFormatContext **ctx) {
if (p_avformat_close_input) p_avformat_close_input(ctx);
}

View File

@@ -29,10 +29,10 @@ proc initScope*(w, h: int): Scope =
samplesL: newSeq[float](4096), samplesL: newSeq[float](4096),
samplesR: newSeq[float](4096), samplesR: newSeq[float](4096),
sampleCount: 0, sampleCount: 0,
gain: 3.0, gain: 5.0,
timeDiv: 1.0, timeDiv: 2.25,
frozen: false, frozen: false,
grid: gsGrid grid: gsOff
) )
proc w*(s: Scope): int = s.phosphor.w proc w*(s: Scope): int = s.phosphor.w