get texture working

main
Patrick Cleavelin 2024-03-13 18:51:14 -05:00
parent 50ce2372eb
commit 26df55b198
4 changed files with 333 additions and 13 deletions

View File

@ -1,9 +1,14 @@
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) color: vec4<f32>,
@location(0) tex_coord: vec2<f32>,
}
@group(0) @binding(0)
var texture: texture_2d<f32>;
@group(0) @binding(1)
var texture_sampler: sampler;
@fragment
fn fs_main(input: VertexOutput) -> @location(0) vec4<f32> {
return input.color;
return textureSample(texture, texture_sampler, input.tex_coord);
}

View File

@ -1,18 +1,18 @@
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) color: vec4<f32>,
@location(1) tex_coord: vec2<f32>,
}
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) color: vec4<f32>,
@location(0) tex_coord: vec2<f32>,
}
@vertex
fn vs_main(input: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.position = vec4<f32>(input.position, 1.);
out.color = input.color;
out.tex_coord = input.tex_coord;
return out;
}

View File

@ -15,13 +15,66 @@
#include <sokol/sokol_glue.h>
#include <sokol/sokol_fetch.h>
#define ARENA_IMPLEMENTATION
#include <tsoding/arena.h>
static Arena default_arena = {0};
static Arena temporary_arena = {0};
static Arena *context_arena = &default_arena;
void *context_alloc(size_t size) {
// assert(context_arena);
return arena_alloc(context_arena, size);
}
typedef struct {
double position[2];
double size[2];
double border_size[2];
} GpuUiRect;
typedef struct {
double atlas_position[2];
double size[2];
double position[2];
} GpuGlyph;
typedef struct {
size_t size;
size_t capacity;
uint8_t *data;
} U8Array;
U8Array new_u8array(size_t capacity) {
return (U8Array) {
.size = 0,
.capacity = capacity,
.data = context_alloc(capacity),
};
}
void push_u8array(U8Array *array, uint8_t *items, size_t num) {
if (array->size + num <= array->capacity) {
memcpy(array->data + array->size, items, num);
array->size += num;
} else {
fprintf(stderr, "failed to push to u8 array, size+num > capacity\n");
}
}
static struct {
sg_pass_action pass_action;
sg_pipeline pip;
sg_bindings bind;
sg_image text_atlas_image;
sg_sampler text_atlas_sampler;
sg_shader_desc scratch_shader_desc;
U8Array gpu_ui_rects;
U8Array gpu_glyphs;
bool should_exit;
} state;
@ -37,6 +90,13 @@ void vertex_shader_loaded(const sfetch_response_t *response) {
void fragment_shader_loaded(const sfetch_response_t *response) {
if (response->fetched) {
state.scratch_shader_desc.fs = (sg_shader_stage_desc){
.source = response->data.ptr,
.entry = "fs_main",
.images[0].used = true,
.samplers[0].used = true,
.image_sampler_pairs[0] = { .used = true, .image_slot = 0, .sampler_slot = 0 },
};
state.scratch_shader_desc.fs.source = response->data.ptr;
state.scratch_shader_desc.fs.entry = "fs_main";
} else if (response->failed) {
@ -52,10 +112,10 @@ void ed_init() {
float vertices[] = {
// positions colors
-0.25f, 0.5f, 0.5f, 1.0f, 0.0f, 0.0f, 1.0f,
0.5f, 0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 1.0f,
0.5f, -0.5f, 0.5f, 0.0f, 0.0f, 1.0f, 1.0f,
-0.5f, -0.5f, 0.5f, 1.0f, 1.0f, 0.0f, 1.0f,
-0.5f, 0.5f, 0.5f, 0.0f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f, 0.0f,
0.5f, -0.5f, 0.5f, 1.0f, 1.0f,
-0.5f, -0.5f, 0.5f, 0.0f, 1.0f,
};
state.bind.vertex_buffers[0] = sg_make_buffer(&(sg_buffer_desc) {
.data = SG_RANGE(vertices)
@ -88,6 +148,19 @@ void ed_init() {
sfetch_dowork();
}
uint8_t *font_bitmap = context_alloc(256*256 * sizeof(uint8_t));
font_bitmap[4 + 4 * 256] = 255;
state.text_atlas_sampler = sg_make_sampler(&(sg_sampler_desc) { .mag_filter = SG_FILTER_LINEAR });
state.text_atlas_image = sg_make_image(&(sg_image_desc) {
.width = 256,
.height = 256,
.pixel_format = SG_PIXELFORMAT_R8,
.data.subimage[0][0] = { .ptr = font_bitmap, .size = 256*256 * sizeof(uint8_t) },
});
state.bind.fs.images[0] = state.text_atlas_image;
state.bind.fs.samplers[0] = state.text_atlas_sampler;
sg_shader shd = sg_make_shader(&state.scratch_shader_desc);
state.pip = sg_make_pipeline(&(sg_pipeline_desc) {
.shader = shd,
@ -95,16 +168,21 @@ void ed_init() {
.layout = {
.attrs = {
[0] = { .offset=0, .format=SG_VERTEXFORMAT_FLOAT3 },
[1] = { .offset=12, .format=SG_VERTEXFORMAT_FLOAT4 },
[1] = { .offset=12, .format=SG_VERTEXFORMAT_FLOAT2 },
},
},
});
state.gpu_ui_rects = new_u8array(sizeof(GpuUiRect) * 2000);
state.gpu_glyphs = new_u8array(sizeof(GpuGlyph) * 8000);
}
void ed_frame() {
sg_begin_pass(&(sg_pass) { .action = state.pass_action, .swapchain = sglue_swapchain() });
sg_apply_pipeline(state.pip);
sg_apply_bindings(&state.bind);
sg_draw(0, 6, 1);
{
sg_apply_pipeline(state.pip);
sg_apply_bindings(&state.bind);
sg_draw(0, 6, 1);
}
sg_end_pass();
sg_commit();
}

237
vendor/tsoding/arena.h vendored Normal file
View File

@ -0,0 +1,237 @@
// Copyright 2022 Alexey Kutepov <reximkut@gmail.com>
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef ARENA_H_
#define ARENA_H_
#include <stddef.h>
#include <stdint.h>
#ifndef ARENA_ASSERT
#include <assert.h>
#define ARENA_ASSERT assert
#endif
#define ARENA_BACKEND_LIBC_MALLOC 0
#define ARENA_BACKEND_LINUX_MMAP 1
#define ARENA_BACKEND_WIN32_VIRTUALALLOC 2
#define ARENA_BACKEND_WASM_HEAPBASE 3
#ifndef ARENA_BACKEND
#define ARENA_BACKEND ARENA_BACKEND_LIBC_MALLOC
#endif // ARENA_BACKEND
typedef struct Region Region;
struct Region {
Region *next;
size_t count;
size_t capacity;
uintptr_t data[];
};
typedef struct {
Region *begin, *end;
} Arena;
#define REGION_DEFAULT_CAPACITY (8*1024)
Region *new_region(size_t capacity);
void free_region(Region *r);
// TODO: snapshot/rewind capability for the arena
// - Snapshot should be combination of a->end and a->end->count.
// - Rewinding should be restoring a->end and a->end->count from the snapshot and
// setting count-s of all the Region-s after the remembered a->end to 0.
void *arena_alloc(Arena *a, size_t size_bytes);
void *arena_realloc(Arena *a, void *oldptr, size_t oldsz, size_t newsz);
void arena_reset(Arena *a);
void arena_free(Arena *a);
#endif // ARENA_H_
#ifdef ARENA_IMPLEMENTATION
#if ARENA_BACKEND == ARENA_BACKEND_LIBC_MALLOC
#include <stdlib.h>
// TODO: instead of accepting specific capacity new_region() should accept the size of the object we want to fit into the region
// It should be up to new_region() to decide the actual capacity to allocate
Region *new_region(size_t capacity)
{
size_t size_bytes = sizeof(Region) + sizeof(uintptr_t)*capacity;
// TODO: it would be nice if we could guarantee that the regions are allocated by ARENA_BACKEND_LIBC_MALLOC are page aligned
Region *r = malloc(size_bytes);
ARENA_ASSERT(r);
r->next = NULL;
r->count = 0;
r->capacity = capacity;
return r;
}
void free_region(Region *r)
{
free(r);
}
#elif ARENA_BACKEND == ARENA_BACKEND_LINUX_MMAP
#include <unistd.h>
#include <sys/mman.h>
Region *new_region(size_t capacity)
{
size_t size_bytes = sizeof(Region) + sizeof(uintptr_t) * capacity;
Region *r = mmap(NULL, size_bytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ARENA_ASSERT(r != MAP_FAILED);
r->next = NULL;
r->count = 0;
r->capacity = capacity;
return r;
}
void free_region(Region *r)
{
size_t size_bytes = sizeof(Region) + sizeof(uintptr_t) * r->capacity;
int ret = munmap(r, size_bytes);
ARENA_ASSERT(ret == 0);
}
#elif ARENA_BACKEND == ARENA_BACKEND_WIN32_VIRTUALALLOC
#if !defined(_WIN32)
# error "Current platform is not Windows"
#endif
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define INV_HANDLE(x) (((x) == NULL) || ((x) == INVALID_HANDLE_VALUE))
Region *new_region(size_t capacity)
{
SIZE_T size_bytes = sizeof(Region) + sizeof(uintptr_t) * capacity;
Region *r = VirtualAllocEx(
GetCurrentProcess(), /* Allocate in current process address space */
NULL, /* Unknown position */
size_bytes, /* Bytes to allocate */
MEM_COMMIT | MEM_RESERVE, /* Reserve and commit allocated page */
PAGE_READWRITE /* Permissions ( Read/Write )*/
);
if (INV_HANDLE(r))
ARENA_ASSERT(0 && "VirtualAllocEx() failed.");
r->next = NULL;
r->count = 0;
r->capacity = capacity;
return r;
}
void free_region(Region *r)
{
if (INV_HANDLE(r))
return;
BOOL free_result = VirtualFreeEx(
GetCurrentProcess(), /* Deallocate from current process address space */
(LPVOID)r, /* Address to deallocate */
0, /* Bytes to deallocate ( Unknown, deallocate entire page ) */
MEM_RELEASE /* Release the page ( And implicitly decommit it ) */
);
if (FALSE == free_result)
ARENA_ASSERT(0 && "VirtualFreeEx() failed.");
}
#elif ARENA_BACKEND == ARENA_BACKEND_WASM_HEAPBASE
# error "TODO: WASM __heap_base backend is not implemented yet"
#else
# error "Unknown Arena backend"
#endif
// TODO: add debug statistic collection mode for arena
// Should collect things like:
// - How many times new_region was called
// - How many times existing region was skipped
// - How many times allocation exceeded REGION_DEFAULT_CAPACITY
void *arena_alloc(Arena *a, size_t size_bytes)
{
size_t size = (size_bytes + sizeof(uintptr_t) - 1)/sizeof(uintptr_t);
if (a->end == NULL) {
ARENA_ASSERT(a->begin == NULL);
size_t capacity = REGION_DEFAULT_CAPACITY;
if (capacity < size) capacity = size;
a->end = new_region(capacity);
a->begin = a->end;
}
while (a->end->count + size > a->end->capacity && a->end->next != NULL) {
a->end = a->end->next;
}
if (a->end->count + size > a->end->capacity) {
ARENA_ASSERT(a->end->next == NULL);
size_t capacity = REGION_DEFAULT_CAPACITY;
if (capacity < size) capacity = size;
a->end->next = new_region(capacity);
a->end = a->end->next;
}
void *result = &a->end->data[a->end->count];
a->end->count += size;
return result;
}
void *arena_realloc(Arena *a, void *oldptr, size_t oldsz, size_t newsz)
{
if (newsz <= oldsz) return oldptr;
void *newptr = arena_alloc(a, newsz);
char *newptr_char = newptr;
char *oldptr_char = oldptr;
for (size_t i = 0; i < oldsz; ++i) {
newptr_char[i] = oldptr_char[i];
}
return newptr;
}
void arena_reset(Arena *a)
{
for (Region *r = a->begin; r != NULL; r = r->next) {
r->count = 0;
}
a->end = a->begin;
}
void arena_free(Arena *a)
{
Region *r = a->begin;
while (r) {
Region *r0 = r;
r = r->next;
free_region(r0);
}
a->begin = NULL;
a->end = NULL;
}
#endif // ARENA_IMPLEMENTATION