Commit e53b683bd3

Andrew Kelley <superjoe30@gmail.com>
2018-06-05 04:11:14
Pointer Reform: proper slicing and indexing (#1053)
* enable slicing for single-item ptr to arrays * disable slicing for other single-item pointers * enable indexing for single-item ptr to arrays * disable indexing for other single-item pointers see #770 closes #386
1 parent 32e0dfd
doc/langref.html.in
@@ -1565,7 +1565,7 @@ var foo: u8 align(4) = 100;
 test "global variable alignment" {
     assert(@typeOf(&foo).alignment == 4);
     assert(@typeOf(&foo) == *align(4) u8);
-    const slice = (&foo)[0..1];
+    const slice = (*[1]u8)(&foo)[0..];
     assert(@typeOf(slice) == []align(4) u8);
 }
 
@@ -1671,7 +1671,7 @@ test "using slices for strings" {
 
 test "slice pointer" {
     var array: [10]u8 = undefined;
-    const ptr = &array[0];
+    const ptr = &array;
 
     // You can use slicing syntax to convert a pointer into a slice:
     const slice = ptr[0..5];
@@ -6004,9 +6004,12 @@ const c = @cImport({
       {#code_begin|syntax#}
 const base64 = @import("std").base64;
 
-export fn decode_base_64(dest_ptr: *u8, dest_len: usize,
-    source_ptr: *const u8, source_len: usize) usize
-{
+export fn decode_base_64(
+    dest_ptr: [*]u8,
+    dest_len: usize,
+    source_ptr: [*]const u8,
+    source_len: usize,
+) usize {
     const src = source_ptr[0..source_len];
     const dest = dest_ptr[0..dest_len];
     const base64_decoder = base64.standard_decoder_unsafe;
example/mix_o_files/base64.zig
@@ -1,6 +1,6 @@
 const base64 = @import("std").base64;
 
-export fn decode_base_64(dest_ptr: *u8, dest_len: usize, source_ptr: *const u8, source_len: usize) usize {
+export fn decode_base_64(dest_ptr: [*]u8, dest_len: usize, source_ptr: [*]const u8, source_len: usize) usize {
     const src = source_ptr[0..source_len];
     const dest = dest_ptr[0..dest_len];
     const base64_decoder = base64.standard_decoder_unsafe;
src/all_types.hpp
@@ -83,6 +83,7 @@ enum ConstParentId {
     ConstParentIdStruct,
     ConstParentIdArray,
     ConstParentIdUnion,
+    ConstParentIdScalar,
 };
 
 struct ConstParent {
@@ -100,6 +101,9 @@ struct ConstParent {
         struct {
             ConstExprValue *union_val;
         } p_union;
+        struct {
+            ConstExprValue *scalar_val;
+        } p_scalar;
     } data;
 };
 
@@ -578,6 +582,7 @@ enum CastOp {
     CastOpBytesToSlice,
     CastOpNumLitToConcrete,
     CastOpErrSet,
+    CastOpBitCast,
 };
 
 struct AstNodeFnCallExpr {
src/analyze.cpp
@@ -5158,7 +5158,8 @@ void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *arr
     const_val->type = get_slice_type(g, ptr_type);
     const_val->data.x_struct.fields = create_const_vals(2);
 
-    init_const_ptr_array(g, &const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const);
+    init_const_ptr_array(g, &const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const,
+            PtrLenUnknown);
     init_const_usize(g, &const_val->data.x_struct.fields[slice_len_index], len);
 }
 
@@ -5169,21 +5170,24 @@ ConstExprValue *create_const_slice(CodeGen *g, ConstExprValue *array_val, size_t
 }
 
 void init_const_ptr_array(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val,
-        size_t elem_index, bool is_const)
+        size_t elem_index, bool is_const, PtrLen ptr_len)
 {
     assert(array_val->type->id == TypeTableEntryIdArray);
     TypeTableEntry *child_type = array_val->type->data.array.child_type;
 
     const_val->special = ConstValSpecialStatic;
-    const_val->type = get_pointer_to_type(g, child_type, is_const);
+    const_val->type = get_pointer_to_type_extra(g, child_type, is_const, false,
+            ptr_len, get_abi_alignment(g, child_type), 0, 0);
     const_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
     const_val->data.x_ptr.data.base_array.array_val = array_val;
     const_val->data.x_ptr.data.base_array.elem_index = elem_index;
 }
 
-ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const) {
+ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const,
+        PtrLen ptr_len)
+{
     ConstExprValue *const_val = create_const_vals(1);
-    init_const_ptr_array(g, const_val, array_val, elem_index, is_const);
+    init_const_ptr_array(g, const_val, array_val, elem_index, is_const, ptr_len);
     return const_val;
 }
 
src/analyze.hpp
@@ -152,8 +152,9 @@ ConstExprValue *create_const_ptr_hard_coded_addr(CodeGen *g, TypeTableEntry *poi
         size_t addr, bool is_const);
 
 void init_const_ptr_array(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val,
-        size_t elem_index, bool is_const);
-ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const);
+        size_t elem_index, bool is_const, PtrLen ptr_len);
+ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index,
+        bool is_const, PtrLen ptr_len);
 
 void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val,
         size_t start, size_t len, bool is_const);
src/codegen.cpp
@@ -2574,6 +2574,8 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
                 add_error_range_check(g, wanted_type, g->err_tag_type, expr_val);
             }
             return expr_val;
+        case CastOpBitCast:
+            return LLVMBuildBitCast(g->builder, expr_val, wanted_type->type_ref, "");
     }
     zig_unreachable();
 }
@@ -2884,7 +2886,13 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
 
     bool safety_check_on = ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on;
 
-    if (array_type->id == TypeTableEntryIdArray) {
+    if (array_type->id == TypeTableEntryIdArray ||
+        (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle))
+    {
+        if (array_type->id == TypeTableEntryIdPointer) {
+            assert(array_type->data.pointer.child_type->id == TypeTableEntryIdArray);
+            array_type = array_type->data.pointer.child_type;
+        }
         if (safety_check_on) {
             LLVMValueRef end = LLVMConstInt(g->builtin_types.entry_usize->type_ref,
                     array_type->data.array.len, false);
@@ -3794,7 +3802,12 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
 
     bool want_runtime_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base);
 
-    if (array_type->id == TypeTableEntryIdArray) {
+    if (array_type->id == TypeTableEntryIdArray ||
+        (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle))
+    {
+        if (array_type->id == TypeTableEntryIdPointer) {
+            array_type = array_type->data.pointer.child_type;
+        }
         LLVMValueRef start_val = ir_llvm_value(g, instruction->start);
         LLVMValueRef end_val;
         if (instruction->end) {
@@ -3835,6 +3848,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
 
         return tmp_struct_ptr;
     } else if (array_type->id == TypeTableEntryIdPointer) {
+        assert(array_type->data.pointer.ptr_len == PtrLenUnknown);
         LLVMValueRef start_val = ir_llvm_value(g, instruction->start);
         LLVMValueRef end_val = ir_llvm_value(g, instruction->end);
 
@@ -4812,7 +4826,7 @@ static void ir_render(CodeGen *g, FnTableEntry *fn_entry) {
 
 static LLVMValueRef gen_const_ptr_struct_recursive(CodeGen *g, ConstExprValue *struct_const_val, size_t field_index);
 static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ConstExprValue *array_const_val, size_t index);
-static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *array_const_val);
+static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *union_const_val);
 
 static LLVMValueRef gen_parent_ptr(CodeGen *g, ConstExprValue *val, ConstParent *parent) {
     switch (parent->id) {
@@ -4828,6 +4842,10 @@ static LLVMValueRef gen_parent_ptr(CodeGen *g, ConstExprValue *val, ConstParent
                     parent->data.p_array.elem_index);
         case ConstParentIdUnion:
             return gen_const_ptr_union_recursive(g, parent->data.p_union.union_val);
+        case ConstParentIdScalar:
+            render_const_val(g, parent->data.p_scalar.scalar_val, "");
+            render_const_val_global(g, parent->data.p_scalar.scalar_val, "");
+            return parent->data.p_scalar.scalar_val->global_refs->llvm_global;
     }
     zig_unreachable();
 }
@@ -4853,7 +4871,8 @@ static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ConstExprValue *ar
         };
         return LLVMConstInBoundsGEP(base_ptr, indices, 2);
     } else {
-        zig_unreachable();
+        assert(parent->id == ConstParentIdScalar);
+        return base_ptr;
     }
 }
 
src/ir.cpp
@@ -107,6 +107,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_
 static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, VariableTableEntry *var);
 static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op);
 static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval);
+static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align);
 
 ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) {
     assert(const_val->type->id == TypeTableEntryIdPointer);
@@ -6849,7 +6850,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
         IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr);
         IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
         IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle);
-        IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_mem_ptr_maybe);
+        IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
+                get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
+                    false, false, PtrLenUnknown, get_abi_alignment(irb->codegen, irb->codegen->builtin_types.entry_u8),
+                    0, 0));
+        IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, coro_mem_ptr_maybe);
         IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false);
         IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var);
         IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr);
@@ -8729,6 +8734,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
         case CastOpNoCast:
             zig_unreachable();
         case CastOpErrSet:
+        case CastOpBitCast:
             zig_panic("TODO");
         case CastOpNoop:
             {
@@ -9750,6 +9756,49 @@ static IrInstruction *ir_analyze_err_to_int(IrAnalyze *ira, IrInstruction *sourc
     return result;
 }
 
+static IrInstruction *ir_analyze_ptr_to_array(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *target,
+        TypeTableEntry *wanted_type)
+{
+    assert(wanted_type->id == TypeTableEntryIdPointer);
+    wanted_type = adjust_ptr_align(ira->codegen, wanted_type, target->value.type->data.pointer.alignment);
+    TypeTableEntry *array_type = wanted_type->data.pointer.child_type;
+    assert(array_type->id == TypeTableEntryIdArray);
+    assert(array_type->data.array.len == 1);
+
+    if (instr_is_comptime(target)) {
+        ConstExprValue *val = ir_resolve_const(ira, target, UndefBad);
+        if (!val)
+            return ira->codegen->invalid_instruction;
+
+        assert(val->type->id == TypeTableEntryIdPointer);
+        ConstExprValue *pointee = const_ptr_pointee(ira->codegen, val);
+        if (pointee->special != ConstValSpecialRuntime) {
+            ConstExprValue *array_val = create_const_vals(1);
+            array_val->special = ConstValSpecialStatic;
+            array_val->type = array_type;
+            array_val->data.x_array.special = ConstArraySpecialNone;
+            array_val->data.x_array.s_none.elements = pointee;
+            array_val->data.x_array.s_none.parent.id = ConstParentIdScalar;
+            array_val->data.x_array.s_none.parent.data.p_scalar.scalar_val = pointee;
+
+            IrInstructionConst *const_instruction = ir_create_instruction<IrInstructionConst>(&ira->new_irb,
+                    source_instr->scope, source_instr->source_node);
+            const_instruction->base.value.type = wanted_type;
+            const_instruction->base.value.special = ConstValSpecialStatic;
+            const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialRef;
+            const_instruction->base.value.data.x_ptr.data.ref.pointee = array_val;
+            const_instruction->base.value.data.x_ptr.mut = val->data.x_ptr.mut;
+            return &const_instruction->base;
+        }
+    }
+
+    // pointer to array and pointer to single item are represented the same way at runtime
+    IrInstruction *result = ir_build_cast(&ira->new_irb, target->scope, target->source_node,
+            wanted_type, target, CastOpBitCast);
+    result->value.type = wanted_type;
+    return result;
+}
+
 static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_instr,
     TypeTableEntry *wanted_type, IrInstruction *value)
 {
@@ -10156,6 +10205,30 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
         }
     }
 
+    // explicit cast from *T to *[1]T
+    if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+        actual_type->id == TypeTableEntryIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle)
+    {
+        TypeTableEntry *array_type = wanted_type->data.pointer.child_type;
+        if (array_type->id == TypeTableEntryIdArray && array_type->data.array.len == 1 &&
+            types_match_const_cast_only(ira, array_type->data.array.child_type,
+            actual_type->data.pointer.child_type, source_node).id == ConstCastResultIdOk)
+        {
+            if (wanted_type->data.pointer.alignment > actual_type->data.pointer.alignment) {
+                ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
+                add_error_note(ira->codegen, msg, value->source_node,
+                        buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name),
+                            actual_type->data.pointer.alignment));
+                add_error_note(ira->codegen, msg, source_instr->source_node,
+                        buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name),
+                            wanted_type->data.pointer.alignment));
+                return ira->codegen->invalid_instruction;
+            }
+            return ir_analyze_ptr_to_array(ira, source_instr, value, wanted_type);
+        }
+    }
+
+
     // explicit cast from undefined to anything
     if (actual_type->id == TypeTableEntryIdUndefLit) {
         return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type);
@@ -13162,11 +13235,13 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
     if (type_is_invalid(array_ptr->value.type))
         return ira->codegen->builtin_types.entry_invalid;
 
+    ConstExprValue *orig_array_ptr_val = &array_ptr->value;
+
     IrInstruction *elem_index = elem_ptr_instruction->elem_index->other;
     if (type_is_invalid(elem_index->value.type))
         return ira->codegen->builtin_types.entry_invalid;
 
-    TypeTableEntry *ptr_type = array_ptr->value.type;
+    TypeTableEntry *ptr_type = orig_array_ptr_val->type;
     assert(ptr_type->id == TypeTableEntryIdPointer);
 
     TypeTableEntry *array_type = ptr_type->data.pointer.child_type;
@@ -13177,7 +13252,18 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
 
     if (type_is_invalid(array_type)) {
         return array_type;
-    } else if (array_type->id == TypeTableEntryIdArray) {
+    } else if (array_type->id == TypeTableEntryIdArray ||
+        (array_type->id == TypeTableEntryIdPointer &&
+         array_type->data.pointer.ptr_len == PtrLenSingle &&
+         array_type->data.pointer.child_type->id == TypeTableEntryIdArray))
+    {
+        if (array_type->id == TypeTableEntryIdPointer) {
+            array_type = array_type->data.pointer.child_type;
+            ptr_type = ptr_type->data.pointer.child_type;
+            if (orig_array_ptr_val->special != ConstValSpecialRuntime) {
+                orig_array_ptr_val = const_ptr_pointee(ira->codegen, orig_array_ptr_val);
+            }
+        }
         if (array_type->data.array.len == 0) {
             ir_add_error_node(ira, elem_ptr_instruction->base.source_node,
                     buf_sprintf("index 0 outside array of size 0"));
@@ -13205,7 +13291,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
     } else if (array_type->id == TypeTableEntryIdPointer) {
         if (array_type->data.pointer.ptr_len == PtrLenSingle) {
             ir_add_error_node(ira, elem_ptr_instruction->base.source_node,
-                    buf_sprintf("indexing not allowed on pointer to single item"));
+                    buf_sprintf("index of single-item pointer"));
             return ira->codegen->builtin_types.entry_invalid;
         }
         return_type = adjust_ptr_len(ira->codegen, array_type, elem_ptr_instruction->ptr_len);
@@ -13294,9 +13380,9 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
         }
 
         ConstExprValue *array_ptr_val;
-        if (array_ptr->value.special != ConstValSpecialRuntime &&
-            (array_ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == TypeTableEntryIdArray) &&
-            (array_ptr_val = const_ptr_pointee(ira->codegen, &array_ptr->value)) &&
+        if (orig_array_ptr_val->special != ConstValSpecialRuntime &&
+            (orig_array_ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == TypeTableEntryIdArray) &&
+            (array_ptr_val = const_ptr_pointee(ira->codegen, orig_array_ptr_val)) &&
             array_ptr_val->special != ConstValSpecialRuntime &&
             (array_type->id != TypeTableEntryIdPointer ||
                 array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr))
@@ -13401,7 +13487,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
             } else if (array_type->id == TypeTableEntryIdArray) {
                 ConstExprValue *out_val = ir_build_const_from(ira, &elem_ptr_instruction->base);
                 out_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
-                out_val->data.x_ptr.mut = array_ptr->value.data.x_ptr.mut;
+                out_val->data.x_ptr.mut = orig_array_ptr_val->data.x_ptr.mut;
                 out_val->data.x_ptr.data.base_array.array_val = array_ptr_val;
                 out_val->data.x_ptr.data.base_array.elem_index = index;
                 return return_type;
@@ -17406,14 +17492,29 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
             byte_alignment, 0, 0);
         return_type = get_slice_type(ira->codegen, slice_ptr_type);
     } else if (array_type->id == TypeTableEntryIdPointer) {
-        TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type,
-                array_type->data.pointer.is_const, array_type->data.pointer.is_volatile,
-                PtrLenUnknown,
-                array_type->data.pointer.alignment, 0, 0);
-        return_type = get_slice_type(ira->codegen, slice_ptr_type);
-        if (!end) {
-            ir_add_error(ira, &instruction->base, buf_sprintf("slice of pointer must include end value"));
-            return ira->codegen->builtin_types.entry_invalid;
+        if (array_type->data.pointer.ptr_len == PtrLenSingle) {
+            TypeTableEntry *main_type = array_type->data.pointer.child_type;
+            if (main_type->id == TypeTableEntryIdArray) {
+                TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen,
+                        main_type->data.pointer.child_type,
+                        array_type->data.pointer.is_const, array_type->data.pointer.is_volatile,
+                        PtrLenUnknown,
+                        array_type->data.pointer.alignment, 0, 0);
+                return_type = get_slice_type(ira->codegen, slice_ptr_type);
+            } else {
+                ir_add_error(ira, &instruction->base, buf_sprintf("slice of single-item pointer"));
+                return ira->codegen->builtin_types.entry_invalid;
+            }
+        } else {
+            TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type,
+                    array_type->data.pointer.is_const, array_type->data.pointer.is_volatile,
+                    PtrLenUnknown,
+                    array_type->data.pointer.alignment, 0, 0);
+            return_type = get_slice_type(ira->codegen, slice_ptr_type);
+            if (!end) {
+                ir_add_error(ira, &instruction->base, buf_sprintf("slice of pointer must include end value"));
+                return ira->codegen->builtin_types.entry_invalid;
+            }
         }
     } else if (is_slice(array_type)) {
         TypeTableEntry *ptr_type = array_type->data.structure.fields[slice_ptr_index].type_entry;
@@ -17433,12 +17534,24 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
         size_t abs_offset;
         size_t rel_end;
         bool ptr_is_undef = false;
-        if (array_type->id == TypeTableEntryIdArray) {
-            array_val = const_ptr_pointee(ira->codegen, &ptr_ptr->value);
-            abs_offset = 0;
-            rel_end = array_type->data.array.len;
-            parent_ptr = nullptr;
+        if (array_type->id == TypeTableEntryIdArray ||
+            (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle))
+        {
+            if (array_type->id == TypeTableEntryIdPointer) {
+                TypeTableEntry *child_array_type = array_type->data.pointer.child_type;
+                assert(child_array_type->id == TypeTableEntryIdArray);
+                parent_ptr = const_ptr_pointee(ira->codegen, &ptr_ptr->value);
+                array_val = const_ptr_pointee(ira->codegen, parent_ptr);
+                rel_end = child_array_type->data.array.len;
+                abs_offset = 0;
+            } else {
+                array_val = const_ptr_pointee(ira->codegen, &ptr_ptr->value);
+                rel_end = array_type->data.array.len;
+                parent_ptr = nullptr;
+                abs_offset = 0;
+            }
         } else if (array_type->id == TypeTableEntryIdPointer) {
+            assert(array_type->data.pointer.ptr_len == PtrLenUnknown);
             parent_ptr = const_ptr_pointee(ira->codegen, &ptr_ptr->value);
             if (parent_ptr->special == ConstValSpecialUndef) {
                 array_val = nullptr;
@@ -17537,7 +17650,7 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
         if (array_val) {
             size_t index = abs_offset + start_scalar;
             bool is_const = slice_is_const(return_type);
-            init_const_ptr_array(ira->codegen, ptr_val, array_val, index, is_const);
+            init_const_ptr_array(ira->codegen, ptr_val, array_val, index, is_const, PtrLenUnknown);
             if (array_type->id == TypeTableEntryIdArray) {
                 ptr_val->data.x_ptr.mut = ptr_ptr->value.data.x_ptr.mut;
             } else if (is_slice(array_type)) {
std/fmt/errol/index.zig
@@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro
                 float_decimal.exp += 1;
 
                 // Re-size the buffer to use the reserved leading byte.
-                const one_before = @intToPtr(*u8, @ptrToInt(&float_decimal.digits[0]) - 1);
+                const one_before = @intToPtr([*]u8, @ptrToInt(&float_decimal.digits[0]) - 1);
                 float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1];
                 float_decimal.digits[0] = '1';
                 return;
std/fmt/index.zig
@@ -278,7 +278,7 @@ pub fn formatAsciiChar(
     comptime Errors: type,
     output: fn (@typeOf(context), []const u8) Errors!void,
 ) Errors!void {
-    return output(context, (&c)[0..1]);
+    return output(context, (*[1]u8)(&c)[0..]);
 }
 
 pub fn formatBuf(
@@ -603,7 +603,7 @@ fn formatIntSigned(
     const uint = @IntType(false, @typeOf(value).bit_count);
     if (value < 0) {
         const minus_sign: u8 = '-';
-        try output(context, (&minus_sign)[0..1]);
+        try output(context, (*[1]u8)(&minus_sign)[0..]);
         const new_value = uint(-(value + 1)) + 1;
         const new_width = if (width == 0) 0 else (width - 1);
         return formatIntUnsigned(new_value, base, uppercase, new_width, context, Errors, output);
@@ -611,7 +611,7 @@ fn formatIntSigned(
         return formatIntUnsigned(uint(value), base, uppercase, width, context, Errors, output);
     } else {
         const plus_sign: u8 = '+';
-        try output(context, (&plus_sign)[0..1]);
+        try output(context, (*[1]u8)(&plus_sign)[0..]);
         const new_value = uint(value);
         const new_width = if (width == 0) 0 else (width - 1);
         return formatIntUnsigned(new_value, base, uppercase, new_width, context, Errors, output);
@@ -648,7 +648,7 @@ fn formatIntUnsigned(
         const zero_byte: u8 = '0';
         var leftover_padding = padding - index;
         while (true) {
-            try output(context, (&zero_byte)[0..1]);
+            try output(context, (*[1]u8)(&zero_byte)[0..]);
             leftover_padding -= 1;
             if (leftover_padding == 0) break;
         }
std/os/index.zig
@@ -1240,7 +1240,7 @@ pub const Dir = struct {
             const next_index = self.index + darwin_entry.d_reclen;
             self.index = next_index;
 
-            const name = (&darwin_entry.d_name)[0..darwin_entry.d_namlen];
+            const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen];
 
             // skip . and .. entries
             if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
@@ -1704,7 +1704,7 @@ pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void {
     for (args_alloc) |arg| {
         total_bytes += @sizeOf([]u8) + arg.len;
     }
-    const unaligned_allocated_buf = @ptrCast(*const u8, args_alloc.ptr)[0..total_bytes];
+    const unaligned_allocated_buf = @ptrCast([*]const u8, args_alloc.ptr)[0..total_bytes];
     const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf);
     return allocator.free(aligned_allocated_buf);
 }
std/heap.zig
@@ -24,7 +24,7 @@ fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
 fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
     const old_ptr = @ptrCast([*]c_void, old_mem.ptr);
     if (c.realloc(old_ptr, new_size)) |buf| {
-        return @ptrCast(*u8, buf)[0..new_size];
+        return @ptrCast([*]u8, buf)[0..new_size];
     } else if (new_size <= old_mem.len) {
         return old_mem[0..new_size];
     } else {
std/io.zig
@@ -219,12 +219,12 @@ pub fn OutStream(comptime WriteError: type) type {
         }
 
         pub fn writeByte(self: *Self, byte: u8) !void {
-            const slice = (&byte)[0..1];
+            const slice = (*[1]u8)(&byte)[0..];
             return self.writeFn(self, slice);
         }
 
         pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) !void {
-            const slice = (&byte)[0..1];
+            const slice = (*[1]u8)(&byte)[0..];
             var i: usize = 0;
             while (i < n) : (i += 1) {
                 try self.writeFn(self, slice);
std/macho.zig
@@ -164,7 +164,7 @@ fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void {
     return in.stream.readNoEof(([]u8)(result));
 }
 fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void {
-    return readNoEof(in, T, result[0..1]);
+    return readNoEof(in, T, (*[1]T)(result)[0..]);
 }
 
 fn isSymbol(sym: *const Nlist64) bool {
std/mem.zig
@@ -31,14 +31,16 @@ pub const Allocator = struct {
     /// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
     freeFn: fn (self: *Allocator, old_mem: []u8) void,
 
-    fn create(self: *Allocator, comptime T: type) !*T {
+    /// Call destroy with the result
+    pub fn create(self: *Allocator, comptime T: type) !*T {
         if (@sizeOf(T) == 0) return *{};
         const slice = try self.alloc(T, 1);
         return &slice[0];
     }
 
-    // TODO once #733 is solved, this will replace create
-    fn construct(self: *Allocator, init: var) t: {
+    /// Call destroy with the result
+    /// TODO once #733 is solved, this will replace create
+    pub fn construct(self: *Allocator, init: var) t: {
         // TODO this is a workaround for type getting parsed as Error!&const T
         const T = @typeOf(init).Child;
         break :t Error!*T;
@@ -51,17 +53,19 @@ pub const Allocator = struct {
         return ptr;
     }
 
-    fn destroy(self: *Allocator, ptr: var) void {
-        self.free(ptr[0..1]);
+    /// `ptr` should be the return value of `construct` or `create`
+    pub fn destroy(self: *Allocator, ptr: var) void {
+        const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
+        self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
     }
 
-    fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
+    pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
         return self.alignedAlloc(T, @alignOf(T), n);
     }
 
-    fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
+    pub fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
         if (n == 0) {
-            return (*align(alignment) T)(undefined)[0..0];
+            return ([*]align(alignment) T)(undefined)[0..0];
         }
         const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
         const byte_slice = try self.allocFn(self, byte_count, alignment);
@@ -73,17 +77,17 @@ pub const Allocator = struct {
         return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
     }
 
-    fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
+    pub fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
         return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
     }
 
-    fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
+    pub fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
         if (old_mem.len == 0) {
             return self.alloc(T, n);
         }
         if (n == 0) {
             self.free(old_mem);
-            return (*align(alignment) T)(undefined)[0..0];
+            return ([*]align(alignment) T)(undefined)[0..0];
         }
 
         const old_byte_slice = ([]u8)(old_mem);
@@ -102,11 +106,11 @@ pub const Allocator = struct {
     /// Reallocate, but `n` must be less than or equal to `old_mem.len`.
     /// Unlike `realloc`, this function cannot fail.
     /// Shrinking to 0 is the same as calling `free`.
-    fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T {
+    pub fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T {
         return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
     }
 
-    fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
+    pub fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
         if (n == 0) {
             self.free(old_mem);
             return old_mem[0..0];
@@ -123,10 +127,10 @@ pub const Allocator = struct {
         return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
     }
 
-    fn free(self: *Allocator, memory: var) void {
+    pub fn free(self: *Allocator, memory: var) void {
         const bytes = ([]const u8)(memory);
         if (bytes.len == 0) return;
-        const non_const_ptr = @intToPtr(*u8, @ptrToInt(bytes.ptr));
+        const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
         self.freeFn(self, non_const_ptr[0..bytes.len]);
     }
 };
std/net.zig
@@ -68,7 +68,7 @@ pub const Address = struct {
 
 pub fn parseIp4(buf: []const u8) !u32 {
     var result: u32 = undefined;
-    const out_ptr = ([]u8)((&result)[0..1]);
+    const out_ptr = ([]u8)((*[1]u32)(&result)[0..]);
 
     var x: u8 = 0;
     var index: u8 = 0;
test/cases/align.zig
@@ -6,7 +6,7 @@ var foo: u8 align(4) = 100;
 test "global variable alignment" {
     assert(@typeOf(&foo).alignment == 4);
     assert(@typeOf(&foo) == *align(4) u8);
-    const slice = (&foo)[0..1];
+    const slice = (*[1]u8)(&foo)[0..];
     assert(@typeOf(slice) == []align(4) u8);
 }
 
@@ -60,7 +60,7 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
 test "implicitly decreasing slice alignment" {
     const a: u32 align(4) = 3;
     const b: u32 align(8) = 4;
-    assert(addUnalignedSlice((&a)[0..1], (&b)[0..1]) == 7);
+    assert(addUnalignedSlice((*[1]u32)(&a)[0..], (*[1]u32)(&b)[0..]) == 7);
 }
 fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
     return a[0] + b[0];
test/cases/array.zig
@@ -115,3 +115,32 @@ test "array len property" {
     var x: [5]i32 = undefined;
     assert(@typeOf(x).len == 5);
 }
+
+test "single-item pointer to array indexing and slicing" {
+    testSingleItemPtrArrayIndexSlice();
+    comptime testSingleItemPtrArrayIndexSlice();
+}
+
+fn testSingleItemPtrArrayIndexSlice() void {
+    var array = "aaaa";
+    doSomeMangling(&array);
+    assert(mem.eql(u8, "azya", array));
+}
+
+fn doSomeMangling(array: *[4]u8) void {
+    array[1] = 'z';
+    array[2..3][0] = 'y';
+}
+
+test "implicit cast single-item pointer" {
+    testImplicitCastSingleItemPtr();
+    comptime testImplicitCastSingleItemPtr();
+}
+
+fn testImplicitCastSingleItemPtr() void {
+    var byte: u8 = 100;
+    const slice = (*[1]u8)(&byte)[0..];
+    slice[0] += 1;
+    assert(byte == 101);
+}
+
test/cases/eval.zig
@@ -418,9 +418,9 @@ test "string literal used as comptime slice is memoized" {
 }
 
 test "comptime slice of undefined pointer of length 0" {
-    const slice1 = (*i32)(undefined)[0..0];
+    const slice1 = ([*]i32)(undefined)[0..0];
     assert(slice1.len == 0);
-    const slice2 = (*i32)(undefined)[100..100];
+    const slice2 = ([*]i32)(undefined)[100..100];
     assert(slice2.len == 0);
 }
 
@@ -508,7 +508,7 @@ test "comptime slice of slice preserves comptime var" {
 test "comptime slice of pointer preserves comptime var" {
     comptime {
         var buff: [10]u8 = undefined;
-        var a = &buff[0];
+        var a = buff[0..].ptr;
         a[0..1][0] = 1;
         assert(buff[0..][0..][0] == 1);
     }
test/cases/misc.zig
@@ -274,7 +274,7 @@ test "generic malloc free" {
 }
 var some_mem: [100]u8 = undefined;
 fn memAlloc(comptime T: type, n: usize) error![]T {
-    return @ptrCast(*T, &some_mem[0])[0..n];
+    return @ptrCast([*]T, &some_mem[0])[0..n];
 }
 fn memFree(comptime T: type, memory: []T) void {}
 
@@ -588,7 +588,7 @@ var global_ptr = &gdt[0];
 
 // can't really run this test but we can make sure it has no compile error
 // and generates code
-const vram = @intToPtr(*volatile u8, 0x20000000)[0..0x8000];
+const vram = @intToPtr([*]volatile u8, 0x20000000)[0..0x8000];
 export fn writeToVRam() void {
     vram[0] = 'X';
 }
test/cases/slice.zig
@@ -1,7 +1,7 @@
 const assert = @import("std").debug.assert;
 const mem = @import("std").mem;
 
-const x = @intToPtr(*i32, 0x1000)[0..0x500];
+const x = @intToPtr([*]i32, 0x1000)[0..0x500];
 const y = x[0x100..];
 test "compile time slice of pointer to hard coded address" {
     assert(@ptrToInt(x.ptr) == 0x1000);
test/compile_errors.zig
@@ -1,13 +1,22 @@
 const tests = @import("tests.zig");
 
 pub fn addCases(cases: *tests.CompileErrorContext) void {
+    cases.add(
+        "slicing single-item pointer",
+        \\export fn entry(ptr: *i32) void {
+        \\    const slice = ptr[0..2];
+        \\}
+    ,
+        ".tmp_source.zig:2:22: error: slice of single-item pointer",
+    );
+
     cases.add(
         "indexing single-item pointer",
         \\export fn entry(ptr: *i32) i32 {
         \\    return ptr[1];
         \\}
     ,
-        ".tmp_source.zig:2:15: error: indexing not allowed on pointer to single item",
+        ".tmp_source.zig:2:15: error: index of single-item pointer",
     );
 
     cases.add(
@@ -144,10 +153,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
     cases.add(
         "comptime slice of undefined pointer non-zero len",
         \\export fn entry() void {
-        \\    const slice = (*i32)(undefined)[0..1];
+        \\    const slice = ([*]i32)(undefined)[0..1];
         \\}
     ,
-        ".tmp_source.zig:2:36: error: non-zero length slice of undefined pointer",
+        ".tmp_source.zig:2:38: error: non-zero length slice of undefined pointer",
     );
 
     cases.add(
@@ -3129,14 +3138,16 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
         \\export fn entry() void {
         \\    var foo = Foo { .a = 1, .b = 10 };
         \\    foo.b += 1;
-        \\    bar((&foo.b)[0..1]);
+        \\    bar((*[1]u32)(&foo.b)[0..]);
         \\}
         \\
         \\fn bar(x: []u32) void {
         \\    x[0] += 1;
         \\}
     ,
-        ".tmp_source.zig:9:17: error: expected type '[]u32', found '[]align(1) u32'",
+        ".tmp_source.zig:9:18: error: cast increases pointer alignment",
+        ".tmp_source.zig:9:23: note: '*align(1) u32' has alignment 1",
+        ".tmp_source.zig:9:18: note: '*[1]u32' has alignment 4",
     );
 
     cases.add(