Commit 06c4b35eb1
Changed files (9)
example
guess_number
std
test
example/guess_number/main.zig
@@ -6,13 +6,11 @@ const os = std.os;
pub fn main(args: [][]u8) -> %void {
%%io.stdout.printf("Welcome to the Guess Number Game in Zig.\n");
- var seed : u32 = undefined;
- const seed_bytes = (&u8)(&seed)[0...4];
- %%os.get_random_bytes(seed_bytes);
+ var seed: [@sizeof(usize)]u8 = undefined;
+ %%os.get_random_bytes(seed);
+ var rand = Rand.init(([]usize)(seed)[0]);
- var rand = Rand.init(seed);
-
- const answer = rand.range_u64(0, 100) + 1;
+ const answer = rand.range_unsigned(u8, 0, 100) + 1;
while (true) {
%%io.stdout.printf("\nGuess a number between 1 and 100: ");
src/all_types.hpp
@@ -387,6 +387,7 @@ enum CastOp {
CastOpBoolToInt,
CastOpResizeSlice,
CastOpIntToEnum,
+ CastOpBytesToSlice,
};
struct AstNodeFnCallExpr {
@@ -1311,6 +1312,7 @@ struct VariableTableEntry {
int gen_arg_index;
BlockContext *block_context;
LLVMValueRef param_value_ref;
+ bool force_depends_on_compile_var;
};
struct ErrorTableEntry {
src/analyze.cpp
@@ -3038,7 +3038,7 @@ static TypeTableEntry *analyze_var_ref(CodeGen *g, AstNode *source_node, Variabl
ConstExprValue *other_const_val = &get_resolved_expr(var->val_node)->const_val;
if (other_const_val->ok) {
return resolve_expr_const_val_as_other_expr(g, source_node, var->val_node,
- depends_on_compile_var);
+ depends_on_compile_var || var->force_depends_on_compile_var);
}
}
return var->type;
@@ -3959,12 +3959,12 @@ static TypeTableEntry *analyze_while_expr(CodeGen *g, ImportTableEntry *import,
{
assert(node->type == NodeTypeWhileExpr);
- AstNode *condition_node = node->data.while_expr.condition;
+ AstNode **condition_node = &node->data.while_expr.condition;
AstNode *while_body_node = node->data.while_expr.body;
AstNode **continue_expr_node = &node->data.while_expr.continue_expr;
TypeTableEntry *condition_type = analyze_expression(g, import, context,
- g->builtin_types.entry_bool, condition_node);
+ g->builtin_types.entry_bool, *condition_node);
if (*continue_expr_node) {
analyze_expression(g, import, context, g->builtin_types.entry_void, *continue_expr_node);
@@ -3983,7 +3983,7 @@ static TypeTableEntry *analyze_while_expr(CodeGen *g, ImportTableEntry *import,
} else {
// if the condition is a simple constant expression and there are no break statements
// then the return type is unreachable
- ConstExprValue *const_val = &get_resolved_expr(condition_node)->const_val;
+ ConstExprValue *const_val = &get_resolved_expr(*condition_node)->const_val;
if (const_val->ok) {
if (const_val->data.x_bool) {
node->data.while_expr.condition_always_true = true;
@@ -4392,6 +4392,24 @@ static TypeTableEntry *analyze_cast_expr(CodeGen *g, ImportTableEntry *import, B
return resolve_cast(g, context, node, expr_node, wanted_type, CastOpResizeSlice, true);
}
+ // explicit cast from [N]u8 to []T
+ if (is_slice(wanted_type) &&
+ actual_type->id == TypeTableEntryIdArray &&
+ is_u8(actual_type->data.array.child_type))
+ {
+ mark_impure_fn(context);
+ uint64_t child_type_size = type_size(g,
+ wanted_type->data.structure.fields[0].type_entry->data.pointer.child_type);
+ if (actual_type->data.array.len % child_type_size == 0) {
+ return resolve_cast(g, context, node, expr_node, wanted_type, CastOpBytesToSlice, true);
+ } else {
+ add_node_error(g, node,
+ buf_sprintf("unable to convert %s to %s: size mismatch",
+ buf_ptr(&actual_type->name), buf_ptr(&wanted_type->name)));
+ return g->builtin_types.entry_invalid;
+ }
+ }
+
// explicit cast from pointer to another pointer
if ((actual_type->id == TypeTableEntryIdPointer || actual_type->id == TypeTableEntryIdFn) &&
(wanted_type->id == TypeTableEntryIdPointer || wanted_type->id == TypeTableEntryIdFn))
@@ -5062,8 +5080,10 @@ static TypeTableEntry *analyze_builtin_fn_call_expr(CodeGen *g, ImportTableEntry
return g->builtin_types.entry_invalid;
} else {
uint64_t size_in_bytes = type_size(g, type_entry);
+ bool depends_on_compile_var = (type_entry == g->builtin_types.entry_usize ||
+ type_entry == g->builtin_types.entry_isize);
return resolve_expr_const_val_as_unsigned_num_lit(g, node, expected_type,
- size_in_bytes, false);
+ size_in_bytes, depends_on_compile_var);
}
}
case BuiltinFnIdAlignof:
@@ -5461,8 +5481,11 @@ static TypeTableEntry *analyze_fn_call_with_inline_args(CodeGen *g, ImportTableE
ConstExprValue *const_val = &get_resolved_expr(*param_node)->const_val;
if (const_val->ok) {
- add_local_var(g, generic_param_decl_node, decl_node->owner, child_context,
+ VariableTableEntry *var = add_local_var(g, generic_param_decl_node, decl_node->owner, child_context,
&generic_param_decl_node->data.param_decl.name, param_type, true, *param_node);
+ // This generic function instance could be called with anything, so when this variable is read it
+ // needs to know that it depends on compile time variable data.
+ var->force_depends_on_compile_var = true;
} else {
add_node_error(g, *param_node,
buf_sprintf("unable to evaluate constant expression for inline parameter"));
@@ -5552,8 +5575,9 @@ static TypeTableEntry *analyze_generic_fn_call(CodeGen *g, ImportTableEntry *imp
ConstExprValue *const_val = &get_resolved_expr(*param_node)->const_val;
if (const_val->ok) {
- add_local_var(g, generic_param_decl_node, decl_node->owner, child_context,
+ VariableTableEntry *var = add_local_var(g, generic_param_decl_node, decl_node->owner, child_context,
&generic_param_decl_node->data.param_decl.name, param_type, true, *param_node);
+ var->force_depends_on_compile_var = true;
} else {
add_node_error(g, *param_node, buf_sprintf("unable to evaluate constant expression"));
src/codegen.cpp
@@ -1005,6 +1005,31 @@ static LLVMValueRef gen_cast_expr(CodeGen *g, AstNode *node) {
LLVMBuildStore(g->builder, new_len, dest_len_ptr);
+ return cast_expr->tmp_ptr;
+ }
+ case CastOpBytesToSlice:
+ {
+ assert(cast_expr->tmp_ptr);
+ assert(wanted_type->id == TypeTableEntryIdStruct);
+ assert(wanted_type->data.structure.is_slice);
+ assert(actual_type->id == TypeTableEntryIdArray);
+
+ TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[0].type_entry;
+ TypeTableEntry *wanted_child_type = wanted_pointer_type->data.pointer.child_type;
+
+ set_debug_source_node(g, node);
+
+ int wanted_ptr_index = wanted_type->data.structure.fields[0].gen_index;
+ LLVMValueRef dest_ptr_ptr = LLVMBuildStructGEP(g->builder, cast_expr->tmp_ptr, wanted_ptr_index, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, expr_val, wanted_pointer_type->type_ref, "");
+ LLVMBuildStore(g->builder, src_ptr_casted, dest_ptr_ptr);
+
+ int wanted_len_index = wanted_type->data.structure.fields[1].gen_index;
+ LLVMValueRef len_ptr = LLVMBuildStructGEP(g->builder, cast_expr->tmp_ptr, wanted_len_index, "");
+ LLVMValueRef len_val = LLVMConstInt(g->builtin_types.entry_usize->type_ref,
+ actual_type->data.array.len / type_size(g, wanted_child_type), false);
+ LLVMBuildStore(g->builder, len_val, len_ptr);
+
return cast_expr->tmp_ptr;
}
case CastOpIntToFloat:
src/eval.cpp
@@ -601,6 +601,7 @@ void eval_const_expr_implicit_cast(CastOp cast_op,
case CastOpPtrToInt:
case CastOpIntToPtr:
case CastOpResizeSlice:
+ case CastOpBytesToSlice:
// can't do it
break;
case CastOpToUnknownSizeArray:
src/parser.cpp
@@ -3301,6 +3301,8 @@ AstNode *ast_clone_subtree_special(AstNode *old_node, uint32_t *next_node_index,
case NodeTypeWhileExpr:
clone_subtree_field(&new_node->data.while_expr.condition, old_node->data.while_expr.condition, next_node_index);
clone_subtree_field(&new_node->data.while_expr.body, old_node->data.while_expr.body, next_node_index);
+ clone_subtree_field(&new_node->data.while_expr.continue_expr,
+ old_node->data.while_expr.continue_expr, next_node_index);
break;
case NodeTypeForExpr:
clone_subtree_field(&new_node->data.for_expr.elem_node, old_node->data.for_expr.elem_node, next_node_index);
src/zig_llvm.hpp
@@ -145,7 +145,6 @@ LLVMZigDISubprogram *LLVMZigCreateFunction(LLVMZigDIBuilder *dibuilder, LLVMZigD
LLVMZigDIType *fn_di_type, bool is_local_to_unit, bool is_definition, unsigned scope_line,
unsigned flags, bool is_optimized, LLVMZigDISubprogram *decl_subprogram);
-
void ZigLLVMFnSetSubprogram(LLVMValueRef fn, LLVMZigDISubprogram *subprogram);
void LLVMZigDIBuilderFinalize(LLVMZigDIBuilder *dibuilder);
std/rand.zig
@@ -1,51 +1,57 @@
-// Mersenne Twister
-const ARRAY_SIZE = 624;
+const assert = @import("debug.zig").assert;
+
+pub const MT19937_32 = MersenneTwister(
+ u32, 624, 397, 31,
+ 0x9908B0DF,
+ 11, 0xFFFFFFFF,
+ 7, 0x9D2C5680,
+ 15, 0xEFC60000,
+ 18, 1812433253);
+
+pub const MT19937_64 = MersenneTwister(
+ u64, 312, 156, 31,
+ 0xB5026F5AA96619E9,
+ 29, 0x5555555555555555,
+ 17, 0x71D67FFFEDA60000,
+ 37, 0xFFF7EEE000000000,
+ 43, 6364136223846793005);
/// Use `init` to initialize this state.
pub struct Rand {
- array: [ARRAY_SIZE]u32,
- index: usize,
+ const Rng = if (@sizeof(usize) >= 8) MT19937_64 else MT19937_32;
+
+ rng: Rng,
/// Initialize random state with the given seed.
- #static_eval_enable(false)
- pub fn init(seed: u32) -> Rand {
+ pub fn init(seed: usize) -> Rand {
var r: Rand = undefined;
- r.index = 0;
- r.array[0] = seed;
- var i : usize = 1;
- var prev_value: u64w = seed;
- while (i < ARRAY_SIZE; i += 1) {
- r.array[i] = @truncate(u32, (prev_value ^ (prev_value << 30)) * 0x6c078965 + u64w(i));
- prev_value = r.array[i];
- }
+ r.rng = Rng.init(seed);
return r;
}
- /// Get 32 bits of randomness.
- pub fn get_u32(r: &Rand) -> u32 {
- if (r.index == 0) {
- r.generate_numbers();
+ /// Get an integer with random bits.
+ pub fn scalar(r: &Rand, inline T: type) -> T {
+ if (T == usize) {
+ return r.rng.get();
+ } else {
+ var result: T = undefined;
+ r.fill_bytes(([]u8)((&result)[0...@sizeof(T)]));
+ return result;
}
-
- // temper the number
- var y : u32 = r.array[r.index];
- y ^= y >> 11;
- y ^= (y >> 7) & 0x9d2c5680;
- y ^= (y >> 15) & 0xefc60000;
- y ^= y >> 18;
-
- r.index = (r.index + 1) % ARRAY_SIZE;
- return y;
}
/// Fill `buf` with randomness.
- pub fn get_bytes(r: &Rand, buf: []u8) {
- var bytes_left = r.get_bytes_aligned(buf);
+ pub fn fill_bytes(r: &Rand, buf: []u8) {
+ var bytes_left = buf.len;
+ while (bytes_left >= @sizeof(usize)) {
+ *((&usize)(&buf[buf.len - bytes_left])) = r.scalar(usize);
+ bytes_left -= @sizeof(usize);
+ }
if (bytes_left > 0) {
- var rand_val_array : [@sizeof(u32)]u8 = undefined;
- *((&u32)(&rand_val_array[0])) = r.get_u32();
+ var rand_val_array : [@sizeof(usize)]u8 = undefined;
+ ([]usize)(rand_val_array)[0] = r.scalar(usize);
while (bytes_left > 0) {
- buf[buf.len - bytes_left] = rand_val_array[@sizeof(u32) - bytes_left];
+ buf[buf.len - bytes_left] = rand_val_array[@sizeof(usize) - bytes_left];
bytes_left -= 1;
}
}
@@ -53,61 +59,119 @@ pub struct Rand {
/// Get a random unsigned integer with even distribution between `start`
/// inclusive and `end` exclusive.
- pub fn range_u64(r: &Rand, start: u64, end: u64) -> u64 {
+ // TODO support signed integers and then rename to "range"
+ pub fn range_unsigned(r: &Rand, inline T: type, start: T, end: T) -> T {
const range = end - start;
- const leftover = @max_value(u64) % range;
- const upper_bound = @max_value(u64) - leftover;
- var rand_val_array : [@sizeof(u64)]u8 = undefined;
+ const leftover = @max_value(T) % range;
+ const upper_bound = @max_value(T) - leftover;
+ var rand_val_array : [@sizeof(T)]u8 = undefined;
while (true) {
- r.get_bytes_aligned(rand_val_array);
- const rand_val = *(&u64)(&rand_val_array[0]);
+ r.fill_bytes(rand_val_array);
+ const rand_val = ([]T)(rand_val_array)[0];
if (rand_val < upper_bound) {
return start + (rand_val % range);
}
}
}
- pub fn float32(r: &Rand) -> f32 {
- const precision = 16777216;
- return f32(r.range_u64(0, precision)) / precision;
+ /// Get a floating point value in the range 0.0..1.0.
+ pub fn float(r: &Rand, inline T: type) -> T {
+ const int_type = @int_type(false, @sizeof(T) * 8, false);
+ // TODO switch statement for constant values
+ const precision = if (T == f32) {
+ 16777216
+ } else if (T == f64) {
+ 9007199254740992
+ } else {
+ @compile_err("unknown floating point type" ++ @type_name(T))
+ };
+ return T(r.range_unsigned(int_type, 0, precision)) / T(precision);
}
+}
+
+struct MersenneTwister(
+ int: type, n: usize, m: usize, r: int,
+ a: int,
+ u: int, d: int,
+ s: int, b: int,
+ t: int, c: int,
+ l: int, f: int)
+{
+ const Self = MersenneTwister(int, n, m, r, a, u, d, s, b, t, c, l, f);
+ const intw = @int_type(int.is_signed, int.bit_count, true);
+
+ array: [n]int,
+ index: usize,
+
+ // TODO improve compile time eval code and then allow this function to be executed at compile time.
+ #static_eval_enable(false)
+ pub fn init(seed: int) -> Self {
+ var mt = Self {
+ .index = n,
+ .array = undefined,
+ };
+
+ var prev_value = seed;
+ mt.array[0] = prev_value;
+ {var i: usize = 1; while (i < n; i += 1) {
+ prev_value = intw(i) + intw(f) * intw(prev_value ^ (prev_value >> (int.bit_count - 2)));
+ mt.array[i] = prev_value;
+ }};
- pub fn boolean(r: &Rand) -> bool {
- return (r.get_u32() & 0x1) == 1;
+ return mt;
}
- fn generate_numbers(r: &Rand) {
- for (r.array) |item, i| {
- const y : u32 = (item & 0x80000000) + (r.array[(i + 1) % ARRAY_SIZE] & 0x7fffffff);
- const untempered : u32 = r.array[(i + 397) % ARRAY_SIZE] ^ (y >> 1);
- r.array[i] = if ((y % 2) == 0) {
- untempered
- } else {
- // y is odd
- untempered ^ 0x9908b0df
- };
+ pub fn get(mt: &Self) -> int {
+ const mag01 = []int{0, a};
+ const LM: int = (1 << r) - 1;
+ const UM = ~LM;
+
+ if (int.bit_count == 64) {
+ assert(LM == 0x7fffffff);
+ assert(UM == 0xffffffff80000000);
+ } else if (int.bit_count == 32) {
+ assert(LM == 0x7fffffff);
+ assert(UM == 0x80000000);
}
- }
- // does not populate the remaining (buf.len % 4) bytes
- fn get_bytes_aligned(r: &Rand, buf: []u8) -> usize {
- var bytes_left = buf.len;
- while (bytes_left >= 4) {
- *((&u32)(&buf[buf.len - bytes_left])) = r.get_u32();
- bytes_left -= @sizeof(u32);
+ if (mt.index >= n) {
+ var i: usize = 0;
+
+ while (i < n - m; i += 1) {
+ const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
+ mt.array[i] = mt.array[i + m] ^ (x >> 1) ^ mag01[x & 0x1];
+ }
+
+ while (i < n - 1; i += 1) {
+ const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
+ mt.array[i] = mt.array[i + m - n] ^ (x >> 1) ^ mag01[x & 0x1];
+
+ }
+ const x = (mt.array[i] & UM) | (mt.array[0] & LM);
+ mt.array[i] = mt.array[m - 1] ^ (x >> 1) ^ mag01[x & 0x1];
+
+ mt.index = 0;
}
- return bytes_left;
- }
+ var x: intw = mt.array[mt.index];
+ mt.index += 1;
+
+ x ^= ((x >> u) & d);
+ x ^= ((x << s) & b);
+ x ^= ((x << t) & c);
+ x ^= (x >> l);
+
+ return x;
+ }
}
#attribute("test")
fn test_float32() {
var r = Rand.init(42);
- {var i: i32 = 0; while (i < 1000; i += 1) {
- const val = r.float32();
+ {var i: usize = 0; while (i < 1000; i += 1) {
+ const val = r.float(f32);
if (!(val >= 0.0)) unreachable{};
if (!(val < 1.0)) unreachable{};
}}
test/run_tests.cpp
@@ -1427,6 +1427,12 @@ export inline fn foo(x: i32, y: i32) -> i32{
)SOURCE", 1, ".tmp_source.zig:2:1: error: extern functions cannot be inline");
*/
+ add_compile_fail_case("convert fixed size array to slice with invalid size", R"SOURCE(
+fn f() {
+ var array: [5]u8 = undefined;
+ var foo = ([]u32)(array)[0];
+}
+ )SOURCE", 1, ".tmp_source.zig:4:22: error: unable to convert [5]u8 to []u32: size mismatch");
}
//////////////////////////////////////////////////////////////////////////////