Zig is a system programming language which prioritizes optimality, safety, and readability.
Zig is under active development, but it has not yet reached a release milestone.
Latest developments:
const io = @import("std").io;
pub fn main(args: [][]u8) -> %void {
%%io.stdout.printf("Hello, world!\n");
}
Build this with:
zig build hello.zig --name hello --export exe
const c = @c_import(@c_include("stdio.h"));
export fn main(argc: c_int, argv: &&u8) -> c_int {
c.printf(c"Hello, world!\n");
return 0;
}
Build this with:
zig build hello.zig --name hello --export exe --library c
pub fn parse_unsigned(inline T: type, buf: []u8, radix: u8) -> %T {
var x: T = 0;
for (buf) |c| {
const digit = char_to_digit(c, radix) %% |err| return err;
x = mul_overflow(T, x, radix) %% |err| return err;
x = add_overflow(T, x, digit) %% |err| return err;
}
return x;
}
pub error InvalidChar;
fn char_to_digit(c: u8, radix: u8) -> %u8 {
const value = if ('0' <= c && c <= '9') {
c - '0'
} else if ('A' <= c && c <= 'Z') {
c - 'A' + 10
} else if ('a' <= c && c <= 'z') {
c - 'a' + 10
} else {
return error.InvalidChar;
};
return if (value >= radix) error.InvalidChar else value;
}
pub error Overflow;
pub fn mul_overflow(inline T: type, a: T, b: T) -> %T {
var answer: T = undefined;
if (@mul_with_overflow(T, a, b, &answer)) error.Overflow else answer
}
pub fn add_overflow(inline T: type, a: T, b: T) -> %T {
var answer: T = undefined;
if (@add_with_overflow(T, a, b, &answer)) error.Overflow else answer
}
fn get_number_with_default(s: []u8) -> u32 {
parse_unsigned(u32, s, 10) %% 42
}
fn get_number_or_crash(s: []u8) -> u32 {
%%parse_unsigned(u32, s, 10)
}
fn add_two_together_or_return_err(a_str: []u8, b_str: []u8) -> %u32 {
const a = parse_unsigned(u32, a_str, 10) %% |err| return err;
const b = parse_unsigned(u32, b_str, 10) %% |err| return err;
return a + b;
}
const assert = @import("debug.zig").assert;
const math = @import("math.zig");
const mem = @import("mem.zig");
const Allocator = mem.Allocator;
const want_modification_safety = !@compile_var("is_release");
const debug_u32 = if (want_modification_safety) u32 else void;
pub inline fn HashMap(inline K: type, inline V: type, inline hash: fn(key: K)->u32, inline eql: fn(a: K, b: K)->bool) {
SmallHashMap(K, V, hash, eql, 8)
}
pub struct SmallHashMap(K: type, V: type, hash: fn(key: K)->u32, eql: fn(a: K, b: K)->bool, STATIC_SIZE: usize) {
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
allocator: &Allocator,
// if the hash map is small enough, we use linear search through these
// entries instead of allocating memory
prealloc_entries: [STATIC_SIZE]Entry,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
const Self = SmallHashMap(K, V, hash, eql, STATIC_SIZE);
pub struct Entry {
used: bool,
distance_from_start_index: usize,
key: K,
value: V,
}
pub struct Iterator {
hm: &Self,
// how many items have we returned
count: usize,
// iterator through the entry array
index: usize,
// used to detect concurrent modification
initial_modification_count: debug_u32,
pub fn next(it: &Iterator) -> ?&Entry {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
if (it.count >= it.hm.size) return null;
while (it.index < it.hm.entries.len; it.index += 1) {
const entry = &it.hm.entries[it.index];
if (entry.used) {
it.index += 1;
it.count += 1;
return entry;
}
}
unreachable{} // no next item
}
}
pub fn init(hm: &Self, allocator: &Allocator) {
hm.entries = hm.prealloc_entries[0...];
hm.allocator = allocator;
hm.size = 0;
hm.max_distance_from_start_index = 0;
for (hm.entries) |*entry| {
entry.used = false;
}
}
pub fn deinit(hm: &Self) {
if (hm.entries.ptr != &hm.prealloc_entries[0]) {
hm.allocator.free(Entry, hm.entries);
}
}
pub fn clear(hm: &Self) {
for (hm.entries) |*entry| {
entry.used = false;
}
hm.size = 0;
hm.max_distance_from_start_index = 0;
hm.increment_modification_count();
}
pub fn put(hm: &Self, key: K, value: V) -> %void {
hm.increment_modification_count();
const resize = if (hm.entries.ptr == &hm.prealloc_entries[0]) {
// preallocated entries table is full
hm.size == hm.entries.len
} else {
// if we get too full (60%), double the capacity
hm.size * 5 >= hm.entries.len * 3
};
if (resize) {
const old_entries = hm.entries;
%return hm.init_capacity(hm.entries.len * 2);
// dump all of the old elements into the new table
for (old_entries) |*old_entry| {
if (old_entry.used) {
hm.internal_put(old_entry.key, old_entry.value);
}
}
if (old_entries.ptr != &hm.prealloc_entries[0]) {
hm.allocator.free(Entry, old_entries);
}
}
hm.internal_put(key, value);
}
pub fn get(hm: &Self, key: K) -> ?&Entry {
return hm.internal_get(key);
}
pub fn remove(hm: &Self, key: K) {
hm.increment_modification_count();
const start_index = hm.key_to_index(key);
{var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
const index = (start_index + roll_over) % hm.entries.len;
var entry = &hm.entries[index];
assert(entry.used); // key not found
if (!eql(entry.key, key)) continue;
while (roll_over < hm.entries.len; roll_over += 1) {
const next_index = (start_index + roll_over + 1) % hm.entries.len;
const next_entry = &hm.entries[next_index];
if (!next_entry.used || next_entry.distance_from_start_index == 0) {
entry.used = false;
hm.size -= 1;
return;
}
*entry = *next_entry;
entry.distance_from_start_index -= 1;
entry = next_entry;
}
unreachable{} // shifting everything in the table
}}
unreachable{} // key not found
}
pub fn entry_iterator(hm: &Self) -> Iterator {
return Iterator {
.hm = hm,
.count = 0,
.index = 0,
.initial_modification_count = hm.modification_count,
};
}
fn init_capacity(hm: &Self, capacity: usize) -> %void {
hm.entries = %return hm.allocator.alloc(Entry, capacity);
hm.size = 0;
hm.max_distance_from_start_index = 0;
for (hm.entries) |*entry| {
entry.used = false;
}
}
fn increment_modification_count(hm: &Self) {
if (want_modification_safety) {
hm.modification_count += 1;
}
}
fn internal_put(hm: &Self, orig_key: K, orig_value: V) {
var key = orig_key;
var value = orig_value;
const start_index = hm.key_to_index(key);
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
while (roll_over < hm.entries.len; {roll_over += 1; distance_from_start_index += 1}) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &hm.entries[index];
if (entry.used && !eql(entry.key, key)) {
if (entry.distance_from_start_index < distance_from_start_index) {
// robin hood to the rescue
const tmp = *entry;
hm.max_distance_from_start_index = math.max(usize,
hm.max_distance_from_start_index, distance_from_start_index);
*entry = Entry {
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
.value = value,
};
key = tmp.key;
value = tmp.value;
distance_from_start_index = tmp.distance_from_start_index;
}
continue;
}
if (!entry.used) {
// adding an entry. otherwise overwriting old value with
// same key
hm.size += 1;
}
hm.max_distance_from_start_index = math.max(usize, distance_from_start_index,
hm.max_distance_from_start_index);
*entry = Entry {
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
.value = value,
};
return;
}
unreachable{} // put into a full map
}
fn internal_get(hm: &Self, key: K) -> ?&Entry {
const start_index = hm.key_to_index(key);
{var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &hm.entries[index];
if (!entry.used) return null;
if (eql(entry.key, key)) return entry;
}}
return null;
}
fn key_to_index(hm: &Self, key: K) -> usize {
return usize(hash(key)) % hm.entries.len;
}
}
var global_allocator = Allocator {
.alloc_fn = global_alloc,
.realloc_fn = global_realloc,
.free_fn = global_free,
.context = null,
};
var some_mem: [200]u8 = undefined;
var some_mem_index: usize = 0;
fn global_alloc(self: &Allocator, n: usize) -> %[]u8 {
const result = some_mem[some_mem_index ... some_mem_index + n];
some_mem_index += n;
return result;
}
fn global_realloc(self: &Allocator, old_mem: []u8, new_size: usize) -> %[]u8 {
const result = %return global_alloc(self, new_size);
@memcpy(result.ptr, old_mem.ptr, old_mem.len);
return result;
}
fn global_free(self: &Allocator, old_mem: []u8) {
}
#attribute("test")
fn basic_hash_map_test() {
var map: SmallHashMap(i32, i32, hash_i32, eql_i32, 4) = undefined;
map.init(&global_allocator);
defer map.deinit();
%%map.put(1, 11);
%%map.put(2, 22);
%%map.put(3, 33);
%%map.put(4, 44);
%%map.put(5, 55);
assert((??map.get(2)).value == 22);
map.remove(2);
assert(if (const entry ?= map.get(2)) false else true);
}
fn hash_i32(x: i32) -> u32 {
*(&u32)(&x)
}
fn eql_i32(a: i32, b: i32) -> bool {
a == b
}
const std = @import("std");
const io = std.io;
const str = std.str;
pub fn main(args: [][]u8) -> %void {
const exe = args[0];
var catted_anything = false;
for (args[1...]) |arg| {
if (str.eql(arg, "-")) {
catted_anything = true;
cat_stream(io.stdin) %% |err| return err;
} else if (arg[0] == '-') {
return usage(exe);
} else {
var is = io.InStream.open(arg) %% |err| {
%%io.stderr.printf("Unable to open file: ");
%%io.stderr.printf(@err_name(err));
%%io.stderr.printf("\n");
return err;
};
defer %%is.close();
catted_anything = true;
cat_stream(is) %% |err| return err;
}
}
if (!catted_anything) {
cat_stream(io.stdin) %% |err| return err;
}
io.stdout.flush() %% |err| return err;
}
fn usage(exe: []u8) -> %void {
%%io.stderr.printf("Usage: ");
%%io.stderr.printf(exe);
%%io.stderr.printf(" [FILE]...\n");
return error.Invalid;
}
fn cat_stream(is: io.InStream) -> %void {
var buf: [1024 * 4]u8 = undefined;
while (true) {
const bytes_read = is.read(buf) %% |err| {
%%io.stderr.printf("Unable to read from stream: ");
%%io.stderr.printf(@err_name(err));
%%io.stderr.printf("\n");
return err;
};
if (bytes_read == 0) {
break;
}
io.stdout.write(buf[0...bytes_read]) %% |err| {
%%io.stderr.printf("Unable to write to stdout: ");
%%io.stderr.printf(@err_name(err));
%%io.stderr.printf("\n");
return err;
};
}
}
pub fn create_all_shaders() -> AllShaders {
var as : AllShaders = undefined;
as.primitive = create_shader(
\\#version 150 core
\\
\\in vec3 VertexPosition;
\\
\\uniform mat4 MVP;
\\
\\void main(void) {
\\ gl_Position = vec4(VertexPosition, 1.0) * MVP;
\\}
,
\\#version 150 core
\\
\\out vec4 FragColor;
\\
\\uniform vec4 Color;
\\
\\void main(void) {
\\ FragColor = Color;
\\}
, null);
as.primitive_attrib_position = as.primitive.attrib_location(c"VertexPosition");
as.primitive_uniform_mvp = as.primitive.uniform_location(c"MVP");
as.primitive_uniform_color = as.primitive.uniform_location(c"Color");
as.texture = create_shader(
\\#version 150 core
\\
\\in vec3 VertexPosition;
\\in vec2 TexCoord;
\\
\\out vec2 FragTexCoord;
\\
\\uniform mat4 MVP;
\\
\\void main(void)
\\{
\\ FragTexCoord = TexCoord;
\\ gl_Position = vec4(VertexPosition, 1.0) * MVP;
\\}
,
\\#version 150 core
\\
\\in vec2 FragTexCoord;
\\out vec4 FragColor;
\\
\\uniform sampler2D Tex;
\\
\\void main(void)
\\{
\\ FragColor = texture(Tex, FragTexCoord);
\\}
, null);
as.primitive_attrib_position = as.primitive.attrib_location(c"VertexPosition");
as.primitive_uniform_mvp = as.primitive.uniform_location(c"MVP");
as.primitive_uniform_color = as.primitive.uniform_location(c"Color");
as.texture_attrib_tex_coord = as.texture.attrib_location(c"TexCoord");
as.texture_attrib_position = as.texture.attrib_location(c"VertexPosition");
as.texture_uniform_mvp = as.texture.uniform_location(c"MVP");
as.texture_uniform_tex = as.texture.uniform_location(c"Tex");
debug_gl.assert_no_error();
return as;
}
const assert = @import("debug.zig").assert;
const rand_test = @import("rand_test.zig");
pub const MT19937_32 = MersenneTwister(
u32, 624, 397, 31,
0x9908B0DF,
11, 0xFFFFFFFF,
7, 0x9D2C5680,
15, 0xEFC60000,
18, 1812433253);
pub const MT19937_64 = MersenneTwister(
u64, 312, 156, 31,
0xB5026F5AA96619E9,
29, 0x5555555555555555,
17, 0x71D67FFFEDA60000,
37, 0xFFF7EEE000000000,
43, 6364136223846793005);
/// Use `init` to initialize this state.
pub struct Rand {
const Rng = if (@sizeof(usize) >= 8) MT19937_64 else MT19937_32;
rng: Rng,
/// Initialize random state with the given seed.
pub fn init(seed: usize) -> Rand {
var r: Rand = undefined;
r.rng = Rng.init(seed);
return r;
}
/// Get an integer with random bits.
pub fn scalar(r: &Rand, inline T: type) -> T {
if (T == usize) {
return r.rng.get();
} else {
var result: [@sizeof(T)]u8 = undefined;
r.fill_bytes(result);
return ([]T)(result)[0];
}
}
/// Fill `buf` with randomness.
pub fn fill_bytes(r: &Rand, buf: []u8) {
var bytes_left = buf.len;
while (bytes_left >= @sizeof(usize)) {
([]usize)(buf[buf.len - bytes_left...])[0] = r.rng.get();
bytes_left -= @sizeof(usize);
}
if (bytes_left > 0) {
var rand_val_array : [@sizeof(usize)]u8 = undefined;
([]usize)(rand_val_array)[0] = r.rng.get();
while (bytes_left > 0) {
buf[buf.len - bytes_left] = rand_val_array[@sizeof(usize) - bytes_left];
bytes_left -= 1;
}
}
}
/// Get a random unsigned integer with even distribution between `start`
/// inclusive and `end` exclusive.
// TODO support signed integers and then rename to "range"
pub fn range_unsigned(r: &Rand, inline T: type, start: T, end: T) -> T {
const range = end - start;
const leftover = @max_value(T) % range;
const upper_bound = @max_value(T) - leftover;
var rand_val_array : [@sizeof(T)]u8 = undefined;
while (true) {
r.fill_bytes(rand_val_array);
const rand_val = ([]T)(rand_val_array)[0];
if (rand_val < upper_bound) {
return start + (rand_val % range);
}
}
}
/// Get a floating point value in the range 0.0..1.0.
pub fn float(r: &Rand, inline T: type) -> T {
// TODO Implement this way instead:
// const int = @int_type(false, @sizeof(T) * 8);
// const mask = ((1 << @float_mantissa_bit_count(T)) - 1);
// const rand_bits = r.rng.scalar(int) & mask;
// return @float_compose(T, false, 0, rand_bits) - 1.0
const int_type = @int_type(false, @sizeof(T) * 8);
const precision = if (T == f32) {
16777216
} else if (T == f64) {
9007199254740992
} else {
@compile_err("unknown floating point type" ++ @type_name(T))
};
return T(r.range_unsigned(int_type, 0, precision)) / T(precision);
}
}
struct MersenneTwister(
int: type, n: usize, m: usize, r: int,
a: int,
u: int, d: int,
s: int, b: int,
t: int, c: int,
l: int, f: int)
{
const Self = MersenneTwister(int, n, m, r, a, u, d, s, b, t, c, l, f);
array: [n]int,
index: usize,
// TODO improve compile time eval code and then allow this function to be executed at compile time.
#static_eval_enable(false)
pub fn init(seed: int) -> Self {
var mt: Self = undefined;
mt.index = n;
var prev_value = seed;
mt.array[0] = prev_value;
{var i: usize = 1; while (i < n; i += 1) {
prev_value = int(i) +% f *% (prev_value ^ (prev_value >> (int.bit_count - 2)));
mt.array[i] = prev_value;
}};
return mt;
}
pub fn get(mt: &Self) -> int {
const mag01 = []int{0, a};
const LM: int = (1 << r) - 1;
const UM = ~LM;
if (mt.index >= n) {
var i: usize = 0;
while (i < n - m; i += 1) {
const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
mt.array[i] = mt.array[i + m] ^ (x >> 1) ^ mag01[x & 0x1];
}
while (i < n - 1; i += 1) {
const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
mt.array[i] = mt.array[i + m - n] ^ (x >> 1) ^ mag01[x & 0x1];
}
const x = (mt.array[i] & UM) | (mt.array[0] & LM);
mt.array[i] = mt.array[m - 1] ^ (x >> 1) ^ mag01[x & 0x1];
mt.index = 0;
}
var x = mt.array[mt.index];
mt.index += 1;
x ^= ((x >> u) & d);
x ^= ((x <<% s) & b);
x ^= ((x <<% t) & c);
x ^= (x >> l);
return x;
}
}
#attribute("test")
fn test_float32() {
var r = Rand.init(42);
{var i: usize = 0; while (i < 1000; i += 1) {
const val = r.float(f32);
assert(val >= 0.0);
assert(val < 1.0);
}}
}
#attribute("test")
fn test_MT19937_64() {
const rng = MT19937_64.init(rand_test.mt64_seed);
for (rand_test.mt64_data) |value| {
assert(value == rng.get());
}
}
#attribute("test")
fn test_MT19937_32() {
const rng = MT19937_32.init(rand_test.mt32_seed);
for (rand_test.mt32_data) |value| {
assert(value == rng.get());
}
}