blob: 697c1e453634bdc65ead010b32508e33eb9cec29 [file] [log] [blame]
//! Generation of GLSL struct definitions and accessor functions.
use std::fmt::Write;
use std::ops::Deref;
use crate::layout::{LayoutModule, LayoutType, LayoutTypeDef};
use crate::parse::{GpuScalar, GpuType};
pub fn gen_glsl(module: &LayoutModule) -> String {
let mut r = String::new();
writeln!(
&mut r,
"// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense\n"
)
.unwrap();
writeln!(&mut r, "// Code auto-generated by piet-gpu-derive\n").unwrap();
// Note: GLSL needs definitions before uses. We could do a topological sort here,
// but easiest for now to just require that in spec.
for name in &module.def_names {
gen_refdef(&mut r, &name);
}
for name in &module.def_names {
match module.defs.get(name).unwrap() {
(size, LayoutTypeDef::Struct(fields)) => {
gen_struct_def(&mut r, name, fields);
gen_item_def(&mut r, name, size.size);
}
(size, LayoutTypeDef::Enum(en)) => {
gen_enum_def(&mut r, name, en);
gen_item_def(&mut r, name, size.size);
gen_tag_def(&mut r, name);
}
}
}
for name in &module.def_names {
let def = module.defs.get(name).unwrap();
let is_mem = !module.name.eq(&"state") && !module.name.eq(&"scene");
match def {
(_size, LayoutTypeDef::Struct(fields)) => {
gen_struct_read(&mut r, &module.name, &name, is_mem, fields);
if module.gpu_write {
gen_struct_write(&mut r, &module.name, &name, is_mem, fields);
}
}
(_size, LayoutTypeDef::Enum(en)) => {
gen_enum_read(&mut r, &module.name, &name, is_mem, en);
if module.gpu_write {
gen_enum_write(&mut r, &module.name, &name, is_mem, en);
}
}
}
}
r
}
fn gen_refdef(r: &mut String, name: &str) {
writeln!(r, "struct {}Ref {{", name).unwrap();
writeln!(r, " uint offset;").unwrap();
writeln!(r, "}};\n").unwrap();
}
fn gen_struct_def(r: &mut String, name: &str, fields: &[(String, usize, LayoutType)]) {
writeln!(r, "struct {} {{", name).unwrap();
for (name, _offset, ty) in fields {
writeln!(r, " {} {};", glsl_type(&ty.ty), name).unwrap();
}
writeln!(r, "}};\n").unwrap();
}
fn gen_enum_def(r: &mut String, name: &str, variants: &[(String, Vec<(usize, LayoutType)>)]) {
for (i, (var_name, _payload)) in variants.iter().enumerate() {
writeln!(r, "#define {}_{} {}", name, var_name, i).unwrap();
}
}
fn gen_item_def(r: &mut String, name: &str, size: usize) {
writeln!(r, "#define {}_size {}\n", name, size).unwrap();
writeln!(
r,
"{}Ref {}_index({}Ref ref, uint index) {{",
name, name, name
)
.unwrap();
writeln!(
r,
" return {}Ref(ref.offset + index * {}_size);",
name, name
)
.unwrap();
writeln!(r, "}}\n").unwrap();
}
fn gen_tag_def(r: &mut String, name: &str) {
writeln!(r, "struct {}Tag {{", name).unwrap();
writeln!(r, " uint tag;").unwrap();
writeln!(r, " uint flags;").unwrap();
writeln!(r, "}};\n").unwrap();
}
fn gen_struct_read(
r: &mut String,
bufname: &str,
name: &str,
is_mem: bool,
fields: &[(String, usize, LayoutType)],
) {
write!(r, "{} {}_read(", name, name).unwrap();
if is_mem {
write!(r, "Alloc a, ").unwrap();
}
writeln!(r, "{}Ref ref) {{", name).unwrap();
writeln!(r, " uint ix = ref.offset >> 2;").unwrap();
let coverage = crate::layout::struct_coverage(fields, false);
for (i, fields) in coverage.iter().enumerate() {
if !fields.is_empty() {
if is_mem {
writeln!(r, " uint raw{} = read_mem(a, ix + {});", i, i).unwrap();
} else {
writeln!(r, " uint raw{} = {}[ix + {}];", i, bufname, i).unwrap();
}
}
}
writeln!(r, " {} s;", name).unwrap();
let mut preload: bool = false;
for (name, offset, ty) in fields {
let (setup, extract) = gen_extract(*offset, &ty.ty, preload);
writeln!(r, "{} s.{} = {};", setup, name, extract).unwrap();
if let GpuType::Scalar(GpuScalar::F16) = &ty.ty {
if offset % 4 == 0 {
preload = true;
continue;
}
}
preload = false;
}
writeln!(r, " return s;").unwrap();
writeln!(r, "}}\n").unwrap();
}
fn gen_enum_read(
r: &mut String,
bufname: &str,
name: &str,
is_mem: bool,
variants: &[(String, Vec<(usize, LayoutType)>)],
) {
if is_mem {
writeln!(r, "{}Tag {}_tag(Alloc a, {}Ref ref) {{", name, name, name).unwrap();
writeln!(r, " uint tag_and_flags = read_mem(a, ref.offset >> 2);").unwrap();
} else {
writeln!(r, "{}Tag {}_tag({}Ref ref) {{", name, name, name).unwrap();
writeln!(r, " uint tag_and_flags = {}[ref.offset >> 2];", bufname).unwrap();
}
writeln!(
r,
" return {}Tag(tag_and_flags & 0xffff, tag_and_flags >> 16);",
name
)
.unwrap();
writeln!(r, "}}\n").unwrap();
for (var_name, payload) in variants {
let payload_ix = if payload.len() == 1 {
Some(0)
} else if payload.len() == 2 {
if matches!(payload[0].1.ty, GpuType::Scalar(GpuScalar::TagFlags)) {
Some(1)
} else {
None
}
} else {
None
};
if let Some(payload_ix) = payload_ix {
if let GpuType::InlineStruct(structname) = &payload[payload_ix].1.ty {
if is_mem {
writeln!(
r,
"{} {}_{}_read(Alloc a, {}Ref ref) {{",
structname, name, var_name, name
)
.unwrap();
writeln!(
r,
" return {}_read(a, {}Ref(ref.offset + {}));",
structname, structname, payload[0].0
)
.unwrap();
} else {
writeln!(
r,
"{} {}_{}_read({}Ref ref) {{",
structname, name, var_name, name
)
.unwrap();
writeln!(
r,
" return {}_read({}Ref(ref.offset + {}));",
structname, structname, payload[0].0
)
.unwrap();
}
writeln!(r, "}}\n").unwrap();
}
}
// TODO: support for variants that aren't one struct.
}
}
fn gen_extract(offset: usize, ty: &GpuType, preload: bool) -> (String, String) {
match ty {
GpuType::Scalar(scalar) => {
let setup = match scalar {
GpuScalar::F16 => {
if preload {
String::new()
} else {
let ix = offset / 4;
format!(" vec2 halves{} = unpackHalf2x16(raw{});\n", ix, ix)
}
}
_ => String::new(),
};
(setup, gen_extract_scalar(offset, scalar))
}
GpuType::Vector(scalar, size) => {
let is_f16 = match scalar {
GpuScalar::F16 => true,
_ => false,
};
let mut setup = String::new();
let mut extract = glsl_type(ty);
extract.push_str("(");
for i in 0..*size {
if i != 0 {
extract.push_str(", ");
}
if is_f16 && i % 2 == 0 {
let ix = (offset + i * scalar.size()) / 4;
let s = format!(" vec2 halves{} = unpackHalf2x16(raw{});\n", ix, ix);
setup.push_str(&s);
};
let el_offset = offset + i * scalar.size();
extract.push_str(&gen_extract_scalar(el_offset, scalar));
}
extract.push_str(")");
(setup, extract)
}
GpuType::InlineStruct(name) => (
String::new(),
format!(
"{}_read({}Ref({}))",
name,
name,
simplified_add("ref.offset", offset)
),
),
GpuType::Ref(inner) => {
if let GpuType::InlineStruct(name) = inner.deref() {
(
String::new(),
format!(
"{}Ref({})",
name,
gen_extract_scalar(offset, &GpuScalar::U32)
),
)
} else {
panic!("only know how to deal with Ref of struct")
}
}
}
}
fn gen_extract_scalar(offset: usize, ty: &GpuScalar) -> String {
match ty {
GpuScalar::F16 | GpuScalar::F32 => extract_fbits(offset, ty.size()),
GpuScalar::U8 | GpuScalar::U16 | GpuScalar::U32 => extract_ubits(offset, ty.size()),
GpuScalar::I8 | GpuScalar::I16 | GpuScalar::I32 => extract_ibits(offset, ty.size()),
GpuScalar::TagFlags => format!("0 /* TODO */"),
}
}
fn extract_ubits(offset: usize, nbytes: usize) -> String {
if nbytes == 4 {
return format!("raw{}", offset / 4);
}
let mask = (1 << (nbytes * 8)) - 1;
if offset % 4 == 0 {
format!("raw{} & 0x{:x}", offset / 4, mask)
} else if offset % 4 + nbytes == 4 {
format!("raw{} >> {}", offset / 4, (offset % 4) * 8)
} else {
format!("(raw{} >> {}) & 0x{:x}", offset / 4, (offset % 4) * 8, mask)
}
}
fn extract_ibits(offset: usize, nbytes: usize) -> String {
if nbytes == 4 {
return format!("int(raw{})", offset / 4);
}
if offset % 4 + nbytes == 4 {
format!("int(raw{}) >> {}", offset / 4, (offset % 4) * 8)
} else {
format!(
"int(raw{} << {}) >> {}",
offset / 4,
((4 - nbytes) - offset % 4) * 8,
(4 - nbytes) * 8
)
}
}
fn extract_fbits(offset: usize, nbytes: usize) -> String {
match nbytes {
4 => format!("uintBitsToFloat(raw{})", offset / 4),
2 => match offset % 4 {
0 => {
let ix = offset / 4;
format!("halves{}.x", ix)
}
2 => format!("halves{}.y", offset / 4),
_ => panic!("unexpected packing of f16 at offset {}", offset % 4),
},
_ => {
panic!("unexpected extraction of float with nbytes = {}", nbytes);
}
}
}
// Writing
fn is_f16(ty: &GpuType) -> bool {
match ty {
GpuType::Scalar(GpuScalar::F16) => true,
GpuType::Vector(GpuScalar::F16, _) => true,
_ => false,
}
}
fn is_f16_pair(field_ixs: &[usize], fields: &[(String, usize, LayoutType)]) -> bool {
if field_ixs.len() == 2 {
fields.iter().all(|(_, _, t)| is_f16(&t.ty))
} else {
false
}
}
fn gen_struct_write(
r: &mut String,
bufname: &str,
name: &str,
is_mem: bool,
fields: &[(String, usize, LayoutType)],
) {
write!(r, "void {}_write(", name).unwrap();
if is_mem {
write!(r, "Alloc a, ").unwrap();
}
writeln!(r, "{}Ref ref, {} s) {{", name, name).unwrap();
writeln!(r, " uint ix = ref.offset >> 2;").unwrap();
let coverage = crate::layout::struct_coverage(fields, true);
for (i, field_ixs) in coverage.iter().enumerate() {
let mut pieces = Vec::new();
if is_f16_pair(field_ixs, fields) {
let (ix0, ix1) = (field_ixs[0], field_ixs[1]);
let inner0 = format!("s.{}", fields[ix0].0);
let inner1 = format!("s.{}", fields[ix1].0);
pieces.push(format!("packHalf2x16(vec2({}, {}))", &inner0, &inner1));
} else {
for field_ix in field_ixs {
let (name, offset, ty) = &fields[*field_ix];
match &ty.ty {
GpuType::Scalar(scalar) => {
let inner = format!("s.{}", name);
pieces.push(gen_pack_bits_scalar(scalar, *offset, &inner));
}
GpuType::Vector(scalar, len) => {
let size = scalar.size();
let ix_lo = (i * 4 - offset) / size;
let ix_hi = ((4 + i * 4 - offset) / size).min(*len);
match scalar {
GpuScalar::F16 => {
if ix_hi - ix_lo == 2 {
let inner0 =
format!("s.{}.{}", name, &"xyzw"[ix_lo..ix_lo + 1]);
let inner1 =
format!("s.{}.{}", name, &"xyzw"[ix_lo + 1..ix_hi]);
pieces.push(format!(
"packHalf2x16(vec2({}, {}))",
&inner0, &inner1
));
} else {
let ix = ix_lo;
let scalar_offset = offset + ix * size;
let inner = format!("s.{}.{}", name, &"xyzw"[ix..ix + 1]);
pieces.push(gen_pack_bits_scalar(
scalar,
scalar_offset,
&inner,
));
}
}
_ => {
for ix in ix_lo..ix_hi {
let scalar_offset = offset + ix * size;
let inner = format!("s.{}.{}", name, &"xyzw"[ix..ix + 1]);
pieces.push(gen_pack_bits_scalar(
scalar,
scalar_offset,
&inner,
));
}
}
}
}
GpuType::InlineStruct(structname) => {
writeln!(
r,
" {}_write({}Ref({}), s.{});",
structname,
structname,
simplified_add("ref.offset", *offset),
name
)
.unwrap();
}
GpuType::Ref(_) => pieces.push(format!("s.{}.offset", name)),
}
}
}
if !pieces.is_empty() {
if is_mem {
write!(r, " write_mem(a, ix + {}, ", i).unwrap();
} else {
write!(r, " {}[ix + {}] = ", bufname, i).unwrap();
}
for (j, piece) in pieces.iter().enumerate() {
if j != 0 {
write!(r, " | ").unwrap();
}
write!(r, "{}", piece).unwrap();
}
if is_mem {
write!(r, ")").unwrap();
}
writeln!(r, ";").unwrap();
}
}
writeln!(r, "}}\n").unwrap();
}
fn gen_pack_bits_scalar(ty: &GpuScalar, offset: usize, inner: &str) -> String {
let shift = (offset % 4) * 8;
let bits = match ty {
GpuScalar::F16 => format!("packHalf2x16(vec2({}, 0.0)) & 0xffff", inner),
GpuScalar::F32 => format!("floatBitsToUint({})", inner),
// Note: this doesn't mask small unsigned int types; the caller is
// responsible for making sure they don't overflow.
GpuScalar::U8 | GpuScalar::U16 | GpuScalar::U32 => inner.into(),
GpuScalar::I8 => {
if shift == 24 {
format!("uint({})", inner)
} else {
format!("(uint({}) & 0xff)", inner)
}
}
GpuScalar::I16 => {
if shift == 16 {
format!("uint({})", inner)
} else {
format!("(uint({}) & 0xffff)", inner)
}
}
GpuScalar::I32 => format!("uint({})", inner),
GpuScalar::TagFlags => format!("0"),
};
if shift == 0 {
bits
} else {
format!("({} << {})", bits, shift)
}
}
fn gen_enum_write(
r: &mut String,
bufname: &str,
name: &str,
is_mem: bool,
variants: &[(String, Vec<(usize, LayoutType)>)],
) {
for (var_name, payload) in variants {
if payload.is_empty() {
if is_mem {
writeln!(
r,
"void {}_{}_write(Alloc a, {}Ref ref) {{",
name, var_name, name
)
.unwrap();
writeln!(
r,
" write_mem(a, ref.offset >> 2, {}_{});",
name, var_name
)
.unwrap();
} else {
writeln!(r, "void {}_{}_write({}Ref ref) {{", name, var_name, name).unwrap();
writeln!(
r,
" {}[ref.offset >> 2] = {}_{};",
bufname, name, var_name
)
.unwrap();
}
writeln!(r, "}}\n").unwrap();
} else if payload.len() == 1 {
if let GpuType::InlineStruct(structname) = &payload[0].1.ty {
if is_mem {
writeln!(
r,
"void {}_{}_write(Alloc a, {}Ref ref, {} s) {{",
name, var_name, name, structname
)
.unwrap();
writeln!(
r,
" write_mem(a, ref.offset >> 2, {}_{});",
name, var_name
)
.unwrap();
writeln!(
r,
" {}_write(a, {}Ref(ref.offset + {}), s);",
structname, structname, payload[0].0
)
.unwrap();
} else {
writeln!(
r,
"void {}_{}_write({}Ref ref, {} s) {{",
name, var_name, name, structname
)
.unwrap();
writeln!(
r,
" {}[ref.offset >> 2] = {}_{};",
bufname, name, var_name
)
.unwrap();
writeln!(
r,
" {}_write({}Ref(ref.offset + {}), s);",
structname, structname, payload[0].0
)
.unwrap();
}
writeln!(r, "}}\n").unwrap();
}
} else if payload.len() == 2
&& matches!(payload[0].1.ty, GpuType::Scalar(GpuScalar::TagFlags))
{
if let GpuType::InlineStruct(structname) = &payload[1].1.ty {
if is_mem {
writeln!(
r,
"void {}_{}_write(Alloc a, {}Ref ref, uint flags, {} s) {{",
name, var_name, name, structname
)
.unwrap();
writeln!(
r,
" write_mem(a, ref.offset >> 2, (flags << 16) | {}_{});",
name, var_name
)
.unwrap();
writeln!(
r,
" {}_write(a, {}Ref(ref.offset + {}), s);",
structname, structname, payload[0].0
)
.unwrap();
} else {
writeln!(
r,
"void {}_{}_write({}Ref ref, uint flags, {} s) {{",
name, var_name, name, structname
)
.unwrap();
writeln!(
r,
" {}[ref.offset >> 2] = (flags << 16) | {}_{};",
bufname, name, var_name
)
.unwrap();
writeln!(
r,
" {}_write({}Ref(ref.offset + {}), s);",
structname, structname, payload[0].0
)
.unwrap();
}
writeln!(r, "}}\n").unwrap();
}
}
// TODO: support for variants that aren't one struct.
}
}
// Utility functions
fn glsl_type(ty: &GpuType) -> String {
match ty {
GpuType::Scalar(scalar) => glsl_scalar(scalar).into(),
GpuType::Vector(scalar, size) => {
if *size == 1 {
glsl_scalar(scalar).into()
} else {
format!("{}{}", glsl_vecname(scalar), size)
}
}
GpuType::InlineStruct(name) => name.clone(),
GpuType::Ref(inner) => {
if let GpuType::InlineStruct(name) = inner.deref() {
format!("{}Ref", name)
} else {
panic!("only know how to deal with Ref of struct")
}
}
}
}
// GLSL type that can contain the scalar value.
fn glsl_scalar(s: &GpuScalar) -> &'static str {
match s {
GpuScalar::F16 | GpuScalar::F32 => "float",
GpuScalar::I8 | GpuScalar::I16 | GpuScalar::I32 => "int",
GpuScalar::U8 | GpuScalar::U16 | GpuScalar::U32 | GpuScalar::TagFlags => "uint",
}
}
fn glsl_vecname(s: &GpuScalar) -> &'static str {
match s {
GpuScalar::F16 | GpuScalar::F32 => "vec",
GpuScalar::I8 | GpuScalar::I16 | GpuScalar::I32 => "ivec",
GpuScalar::U8 | GpuScalar::U16 | GpuScalar::U32 | GpuScalar::TagFlags => "uvec",
}
}
/// If `c = 0`, return `"var_name"`, else `"var_name + c"`
fn simplified_add(var_name: &str, c: usize) -> String {
if c == 0 {
String::from(var_name)
} else {
format!("{} + {}", var_name, c)
}
}