In the future, we can optimize it by seeking when possible.
bflush(f);
bsetpos(f, 0);
}
+
+void
+bskip(struct fastbuf *f, uns len)
+{
+ while (len)
+ {
+ byte *buf;
+ uns l = bdirect_read_prepare(f, &buf);
+ l = MIN(l, len);
+ bdirect_read_commit(f, buf+l);
+ len -= l;
+ }
+}
void bseek(struct fastbuf *f, sh_off_t pos, int whence);
void bsetpos(struct fastbuf *f, sh_off_t pos);
void brewind(struct fastbuf *f);
+void bskip(struct fastbuf *f, uns len);
static inline sh_off_t btell(struct fastbuf *f)
{