Skip to content

Commit

Permalink
Fixes bkaradzic#1671 and Fixes bkaradzic#1139. Flush the nanovg draw …
Browse files Browse the repository at this point in the history
…commands whenever the next draw would overflow the uint16_t index type. (bkaradzic#3207)
  • Loading branch information
mcourteaux authored and jay3d committed Dec 7, 2023
1 parent 2e401b2 commit abc284c
Showing 1 changed file with 24 additions and 4 deletions.
28 changes: 24 additions & 4 deletions examples/common/nanovg/nanovg_bgfx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -556,8 +556,12 @@ namespace
static void fan(uint32_t _start, uint32_t _count)
{
uint32_t numTris = _count-2;
BX_ASSERT(_count >= 3, "less than one triangle");
BX_ASSERT(_start + ((numTris - 1) * 3) + 2 <= UINT16_MAX, "index overflow");
bgfx::TransientIndexBuffer tib;
bgfx::allocTransientIndexBuffer(&tib, numTris*3);
BX_ASSERT(tib.size == numTris*3*(tib.isIndex16 ? 2 : 4), "did not get enough room for indices");

uint16_t* data = (uint16_t*)tib.data;
for (uint32_t ii = 0; ii < numTris; ++ii)
{
Expand Down Expand Up @@ -823,6 +827,7 @@ namespace
return count;
}

static int glnvg__mini(int a, int b) { return a < b ? a : b; }
static int glnvg__maxi(int a, int b) { return a > b ? a : b; }

static struct GLNVGcall* glnvg__allocCall(struct GLNVGcontext* gl)
Expand Down Expand Up @@ -856,11 +861,15 @@ namespace

static int glnvg__allocVerts(GLNVGcontext* gl, int n)
{
// Before calling this function, make sure that glnvg__flushIfNeeded()
// is called, before allocating the NVGCall.
int ret = 0;
BX_ASSERT(gl->nverts + n <= UINT16_MAX, "index overflow is imminent, please flush.");
if (gl->nverts+n > gl->cverts)
{
NVGvertex* verts;
int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate
cverts = glnvg__mini(cverts, UINT16_MAX);
verts = (NVGvertex*)bx::realloc(gl->allocator, gl->verts, sizeof(NVGvertex) * cverts);
if (verts == NULL) return -1;
gl->verts = verts;
Expand Down Expand Up @@ -892,6 +901,12 @@ namespace
vtx->v = v;
}

static void glnvg__flushIfNeeded(struct GLNVGcontext *gl, int nverts) {
if (gl->nverts + nverts > UINT16_MAX) {
nvgRenderFlush(gl);
}
}

static void nvgRenderFill(
void* _userPtr
, NVGpaint* paint
Expand All @@ -904,11 +919,13 @@ namespace
)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
int maxverts = glnvg__maxVertCount(paths, npaths) + 6;
glnvg__flushIfNeeded(gl, maxverts);

struct GLNVGcall* call = glnvg__allocCall(gl);
struct NVGvertex* quad;
struct GLNVGfragUniforms* frag;
int i, maxverts, offset;
int i, offset;

call->type = GLNVG_FILL;
call->pathOffset = glnvg__allocPaths(gl, npaths);
Expand All @@ -922,7 +939,6 @@ namespace
}

// Allocate vertices for all the paths.
maxverts = glnvg__maxVertCount(paths, npaths) + 6;
offset = glnvg__allocVerts(gl, maxverts);

for (i = 0; i < npaths; i++)
Expand Down Expand Up @@ -990,9 +1006,11 @@ namespace
)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
int maxverts = glnvg__maxVertCount(paths, npaths);
glnvg__flushIfNeeded(gl, maxverts);

struct GLNVGcall* call = glnvg__allocCall(gl);
int i, maxverts, offset;
int i, offset;

call->type = GLNVG_STROKE;
call->pathOffset = glnvg__allocPaths(gl, npaths);
Expand All @@ -1001,14 +1019,14 @@ namespace
call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);

// Allocate vertices for all the paths.
maxverts = glnvg__maxVertCount(paths, npaths);
offset = glnvg__allocVerts(gl, maxverts);

for (i = 0; i < npaths; i++)
{
struct GLNVGpath* copy = &gl->paths[call->pathOffset + i];
const struct NVGpath* path = &paths[i];
bx::memSet(copy, 0, sizeof(struct GLNVGpath) );
BX_ASSERT(path->nfill == 0, "strokes should not have any fill");
if (path->nstroke)
{
copy->strokeOffset = offset;
Expand All @@ -1027,6 +1045,8 @@ namespace
const struct NVGvertex* verts, int nverts)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
glnvg__flushIfNeeded(gl, nverts);

struct GLNVGcall* call = glnvg__allocCall(gl);
struct GLNVGfragUniforms* frag;

Expand Down

0 comments on commit abc284c

Please sign in to comment.