SIGN IN SIGN UP
const std = @import("std");
const expect = std.testing.expect;
2017-10-02 22:00:42 -04:00
const builtin = @import("builtin");
const native_arch = builtin.target.cpu.arch;
const assert = std.debug.assert;
2017-08-29 08:35:51 -04:00
var foo: u8 align(4) = 100;
test "global variable alignment" {
comptime assert(@typeInfo(@TypeOf(&foo)).pointer.alignment == 4);
comptime assert(@TypeOf(&foo) == *align(4) u8);
{
const slice = @as(*align(4) [1]u8, &foo)[0..];
comptime assert(@TypeOf(slice) == *align(4) [1]u8);
}
}
test "large alignment of local constant" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
const x: f32 align(128) = 12.34;
try std.testing.expect(@intFromPtr(&x) % 128 == 0);
}
test "slicing array of length 1 can not assume runtime index is always zero" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2024-01-13 18:44:44 +01:00
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
var runtime_index: usize = 1;
_ = &runtime_index;
const slice = @as(*align(4) [1]u8, &foo)[runtime_index..];
try expect(@TypeOf(slice) == []u8);
try expect(slice.len == 0);
try expect(@as(u2, @truncate(@intFromPtr(slice.ptr) - 1)) == 0);
}
test "default alignment allows unspecified in type syntax" {
try expect(*u32 == *align(@alignOf(u32)) u32);
}
test "implicitly decreasing pointer alignment" {
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;
try expect(addUnaligned(&a, &b) == 7);
}
fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
return a.* + b.*;
}
test "@alignCast pointers" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var x: u32 align(4) = 1;
expectsOnly1(&x);
try expect(x == 2);
}
fn expectsOnly1(x: *align(1) u32) void {
expects4(@alignCast(x));
}
fn expects4(x: *align(4) u32) void {
x.* += 1;
}
test "alignment of struct with pointer has same alignment as usize" {
try expect(@alignOf(struct {
a: i32,
b: *i32,
}) == @alignOf(usize));
}
test "alignment and size of structs with 128-bit fields" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const A = struct {
x: u128,
};
const B = extern struct {
x: u128,
y: u8,
};
const expected = switch (builtin.cpu.arch) {
.amdgcn,
.arm,
.armeb,
.thumb,
.thumbeb,
.hexagon,
.lanai,
.mips,
.mipsel,
.powerpc,
.powerpcle,
.riscv32,
.s390x,
=> .{
.a_align = 8,
.a_size = 16,
.b_align = 16,
.b_size = 32,
.u128_align = 8,
.u128_size = 16,
.u129_align = 8,
.u129_size = 24,
},
.aarch64,
.aarch64_be,
.bpfel,
.bpfeb,
.mips64,
.mips64el,
.nvptx,
.nvptx64,
.powerpc64,
.powerpc64le,
.sparc,
.sparc64,
.riscv64,
.wasm32,
.wasm64,
.x86,
.x86_64,
=> .{
.a_align = 16,
.a_size = 16,
.b_align = 16,
.b_size = 32,
.u128_align = 16,
.u128_size = 16,
.u129_align = 16,
.u129_size = 32,
},
else => return error.SkipZigTest,
};
const min_struct_align = if (builtin.zig_backend == .stage2_c) 16 else 0;
comptime {
assert(@alignOf(A) == @max(expected.a_align, min_struct_align));
assert(@sizeOf(A) == expected.a_size);
assert(@alignOf(B) == @max(expected.b_align, min_struct_align));
assert(@sizeOf(B) == expected.b_size);
assert(@alignOf(u128) == expected.u128_align);
assert(@sizeOf(u128) == expected.u128_size);
assert(@alignOf(u129) == expected.u129_align);
assert(@sizeOf(u129) == expected.u129_size);
}
}
test "implicitly decreasing slice alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;
try expect(addUnalignedSlice(@as(*const [1]u32, &a)[0..], @as(*const [1]u32, &b)[0..]) == 7);
}
fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
return a[0] + b[0];
}
test "specifying alignment allows pointer cast" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try testBytesAlign(0x33);
}
fn testBytesAlign(b: u8) !void {
var bytes align(4) = [_]u8{ b, b, b, b };
const ptr = @as(*u32, @ptrCast(&bytes[0]));
try expect(ptr.* == 0x33333333);
}
test "@alignCast slices" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2023-05-11 09:23:34 +03:30
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var array align(4) = [_]u32{ 1, 1 };
const slice = array[0..];
sliceExpectsOnly1(slice);
try expect(slice[0] == 2);
}
fn sliceExpectsOnly1(slice: []align(1) u32) void {
sliceExpects4(@alignCast(slice));
}
fn sliceExpects4(slice: []align(4) u32) void {
slice[0] += 1;
}
test "return error union with 128-bit integer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
2022-02-28 19:54:13 +01:00
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2023-05-11 09:23:34 +03:30
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try expect(3 == try give());
}
fn give() anyerror!u128 {
return 3;
}
test "page aligned array on stack" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2023-05-11 09:23:34 +03:30
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// Large alignment value to make it hard to accidentally pass.
var array align(0x1000) = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
var number1: u8 align(16) = 42;
var number2: u8 align(16) = 43;
try expect(@intFromPtr(&array[0]) & 0xFFF == 0);
try expect(array[3] == 4);
try expect(@as(u4, @truncate(@intFromPtr(&number1))) == 0);
try expect(@as(u4, @truncate(@intFromPtr(&number2))) == 0);
try expect(number1 == 42);
try expect(number2 == 43);
}
stage2: type system treats fn ptr and body separately This commit updates stage2 to enforce the property that the syntax `fn()void` is a function *body* not a *pointer*. To get a pointer, the syntax `*const fn()void` is required. ZIR puts function alignment into the func instruction rather than the decl because this way it makes it into function types. LLVM backend respects function alignments. Struct and Union have methods `fieldSrcLoc` to help look up source locations of their fields. These trigger full loading, tokenization, and parsing of source files, so should only be called once it is confirmed that an error message needs to be printed. There are some nice new error hints for explaining why a type is required to be comptime, particularly for structs that contain function body types. `Type.requiresComptime` is now moved into Sema because it can fail and might need to trigger field type resolution. Comptime pointer loading takes into account types that do not have a well-defined memory layout and does not try to compute a byte offset for them. `fn()void` syntax no longer secretly makes a pointer. You get a function body type, which requires comptime. However a pointer to a function body can be runtime known (obviously). Compile errors that report "expected pointer, found ..." are factored out into convenience functions `checkPtrOperand` and `checkPtrType` and have a note about function pointers. Implemented `Value.hash` for functions, enum literals, and undefined values. stage1 is not updated to this (yet?), so some workarounds and disabled tests are needed to keep everything working. Should we update stage1 to these new type semantics? Yes probably because I don't want to add too much conditional compilation logic in the std lib for the different backends.
2022-01-21 00:49:58 -07:00
test "function alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
stage2: type system treats fn ptr and body separately This commit updates stage2 to enforce the property that the syntax `fn()void` is a function *body* not a *pointer*. To get a pointer, the syntax `*const fn()void` is required. ZIR puts function alignment into the func instruction rather than the decl because this way it makes it into function types. LLVM backend respects function alignments. Struct and Union have methods `fieldSrcLoc` to help look up source locations of their fields. These trigger full loading, tokenization, and parsing of source files, so should only be called once it is confirmed that an error message needs to be printed. There are some nice new error hints for explaining why a type is required to be comptime, particularly for structs that contain function body types. `Type.requiresComptime` is now moved into Sema because it can fail and might need to trigger field type resolution. Comptime pointer loading takes into account types that do not have a well-defined memory layout and does not try to compute a byte offset for them. `fn()void` syntax no longer secretly makes a pointer. You get a function body type, which requires comptime. However a pointer to a function body can be runtime known (obviously). Compile errors that report "expected pointer, found ..." are factored out into convenience functions `checkPtrOperand` and `checkPtrType` and have a note about function pointers. Implemented `Value.hash` for functions, enum literals, and undefined values. stage1 is not updated to this (yet?), so some workarounds and disabled tests are needed to keep everything working. Should we update stage1 to these new type semantics? Yes probably because I don't want to add too much conditional compilation logic in the std lib for the different backends.
2022-01-21 00:49:58 -07:00
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
stage2: type system treats fn ptr and body separately This commit updates stage2 to enforce the property that the syntax `fn()void` is a function *body* not a *pointer*. To get a pointer, the syntax `*const fn()void` is required. ZIR puts function alignment into the func instruction rather than the decl because this way it makes it into function types. LLVM backend respects function alignments. Struct and Union have methods `fieldSrcLoc` to help look up source locations of their fields. These trigger full loading, tokenization, and parsing of source files, so should only be called once it is confirmed that an error message needs to be printed. There are some nice new error hints for explaining why a type is required to be comptime, particularly for structs that contain function body types. `Type.requiresComptime` is now moved into Sema because it can fail and might need to trigger field type resolution. Comptime pointer loading takes into account types that do not have a well-defined memory layout and does not try to compute a byte offset for them. `fn()void` syntax no longer secretly makes a pointer. You get a function body type, which requires comptime. However a pointer to a function body can be runtime known (obviously). Compile errors that report "expected pointer, found ..." are factored out into convenience functions `checkPtrOperand` and `checkPtrType` and have a note about function pointers. Implemented `Value.hash` for functions, enum literals, and undefined values. stage1 is not updated to this (yet?), so some workarounds and disabled tests are needed to keep everything working. Should we update stage1 to these new type semantics? Yes probably because I don't want to add too much conditional compilation logic in the std lib for the different backends.
2022-01-21 00:49:58 -07:00
2025-01-02 03:10:19 -05:00
// function alignment is a compile error on wasm
if (native_arch.isWasm()) return error.SkipZigTest;
const S = struct {
fn alignExpr() align(@sizeOf(usize) * 2) i32 {
return 1234;
}
fn align1() align(1) void {}
fn align4() align(4) void {}
};
try expect(S.alignExpr() == 1234);
try expect(@TypeOf(S.alignExpr) == fn () i32);
try expect(@TypeOf(&S.alignExpr) == *align(@sizeOf(usize) * 2) const fn () i32);
S.align1();
try expect(@TypeOf(S.align1) == fn () void);
try expect(@TypeOf(&S.align1) == *align(1) const fn () void);
S.align4();
try expect(@TypeOf(S.align4) == fn () void);
try expect(@TypeOf(&S.align4) == *align(4) const fn () void);
stage2: type system treats fn ptr and body separately This commit updates stage2 to enforce the property that the syntax `fn()void` is a function *body* not a *pointer*. To get a pointer, the syntax `*const fn()void` is required. ZIR puts function alignment into the func instruction rather than the decl because this way it makes it into function types. LLVM backend respects function alignments. Struct and Union have methods `fieldSrcLoc` to help look up source locations of their fields. These trigger full loading, tokenization, and parsing of source files, so should only be called once it is confirmed that an error message needs to be printed. There are some nice new error hints for explaining why a type is required to be comptime, particularly for structs that contain function body types. `Type.requiresComptime` is now moved into Sema because it can fail and might need to trigger field type resolution. Comptime pointer loading takes into account types that do not have a well-defined memory layout and does not try to compute a byte offset for them. `fn()void` syntax no longer secretly makes a pointer. You get a function body type, which requires comptime. However a pointer to a function body can be runtime known (obviously). Compile errors that report "expected pointer, found ..." are factored out into convenience functions `checkPtrOperand` and `checkPtrType` and have a note about function pointers. Implemented `Value.hash` for functions, enum literals, and undefined values. stage1 is not updated to this (yet?), so some workarounds and disabled tests are needed to keep everything working. Should we update stage1 to these new type semantics? Yes probably because I don't want to add too much conditional compilation logic in the std lib for the different backends.
2022-01-21 00:49:58 -07:00
}
test "implicitly decreasing fn alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2023-05-11 09:23:34 +03:30
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
2025-01-02 03:10:19 -05:00
// function alignment is a compile error on wasm
if (native_arch.isWasm()) return error.SkipZigTest;
try testImplicitlyDecreaseFnAlign(alignedSmall, 1234);
try testImplicitlyDecreaseFnAlign(alignedBig, 5678);
}
fn testImplicitlyDecreaseFnAlign(ptr: *align(1) const fn () i32, answer: i32) !void {
try expect(ptr() == answer);
}
fn alignedSmall() align(8) i32 {
return 1234;
}
fn alignedBig() align(16) i32 {
return 5678;
}
test "@alignCast functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2023-05-11 09:23:34 +03:30
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
2025-01-02 03:10:19 -05:00
// function alignment is a compile error on wasm
if (native_arch.isWasm()) return error.SkipZigTest;
if (native_arch.isThumb()) return error.SkipZigTest;
try expect(fnExpectsOnly1(simple4) == 0x19);
}
fn fnExpectsOnly1(ptr: *align(1) const fn () i32) i32 {
return fnExpects4(@alignCast(ptr));
}
fn fnExpects4(ptr: *align(4) const fn () i32) i32 {
return ptr();
}
fn simple4() align(4) i32 {
return 0x19;
}
test "runtime-known array index has best alignment possible" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// take full advantage of over-alignment
var array align(4) = [_]u8{ 1, 2, 3, 4 };
comptime assert(@TypeOf(&array[0]) == *align(4) u8);
comptime assert(@TypeOf(&array[1]) == *u8);
comptime assert(@TypeOf(&array[2]) == *align(2) u8);
comptime assert(@TypeOf(&array[3]) == *u8);
// because align is too small but we still figure out to use 2
var bigger align(2) = [_]u64{ 1, 2, 3, 4 };
comptime assert(@TypeOf(&bigger[0]) == *align(2) u64);
comptime assert(@TypeOf(&bigger[1]) == *align(2) u64);
comptime assert(@TypeOf(&bigger[2]) == *align(2) u64);
comptime assert(@TypeOf(&bigger[3]) == *align(2) u64);
// because pointer is align 2 and u32 align % 2 == 0 we can assume align 2
var smaller align(2) = [_]u32{ 1, 2, 3, 4 };
var runtime_zero: usize = 0;
_ = &runtime_zero;
comptime assert(@TypeOf(smaller[runtime_zero..]) == []align(2) u32);
comptime assert(@TypeOf(smaller[runtime_zero..].ptr) == [*]align(2) u32);
try testIndex(smaller[runtime_zero..].ptr, 0, *align(2) u32);
try testIndex(smaller[runtime_zero..].ptr, 1, *align(2) u32);
try testIndex(smaller[runtime_zero..].ptr, 2, *align(2) u32);
try testIndex(smaller[runtime_zero..].ptr, 3, *align(2) u32);
// has to use ABI alignment because index known at runtime only
try testIndex2(&array, 0, *u8);
try testIndex2(&array, 1, *u8);
try testIndex2(&array, 2, *u8);
try testIndex2(&array, 3, *u8);
}
fn testIndex(smaller: [*]align(2) u32, index: usize, comptime T: type) !void {
comptime assert(@TypeOf(&smaller[index]) == T);
}
fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void {
comptime assert(@TypeOf(&ptr[index]) == T);
}
test "alignment of function with c calling convention" {
2023-05-11 09:23:34 +03:30
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const a = @alignOf(@TypeOf(nothing));
var runtime_nothing = &nothing;
_ = &runtime_nothing;
const casted1: *align(a) const u8 = @ptrCast(runtime_nothing);
const casted2: *const fn () callconv(.c) void = @ptrCast(casted1);
casted2();
}
fn nothing() callconv(.c) void {}
const DefaultAligned = struct {
nevermind: u32,
badguy: i128,
};
test "read 128-bit field from default aligned struct in stack memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2023-05-11 09:23:34 +03:30
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var default_aligned = DefaultAligned{
.nevermind = 1,
.badguy = 12,
};
_ = &default_aligned;
try expect(12 == default_aligned.badguy);
}
var default_aligned_global = DefaultAligned{
.nevermind = 1,
.badguy = 12,
};
test "read 128-bit field from default aligned struct in global memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2023-05-11 09:23:34 +03:30
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try expect(12 == default_aligned_global.badguy);
}
test "struct field explicit alignment" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2024-01-13 18:44:44 +01:00
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
const S = struct {
const Node = struct {
next: *Node,
massive_byte: u8 align(64),
};
};
var node: S.Node = undefined;
node.massive_byte = 100;
try expect(node.massive_byte == 100);
comptime assert(@TypeOf(&node.massive_byte) == *align(64) u8);
try expect(@intFromPtr(&node.massive_byte) % 64 == 0);
}
test "align(@alignOf(T)) T does not force resolution of T" {
if (true) return error.SkipZigTest; // TODO
const S = struct {
const A = struct {
a: *align(@alignOf(A)) A,
};
fn doTheTest() void {
suspend {
resume @frame();
}
_ = bar(@Frame(doTheTest));
}
fn bar(comptime T: type) *align(@alignOf(T)) T {
ok = true;
return undefined;
}
var ok = false;
};
_ = async S.doTheTest();
try expect(S.ok);
}
test "align(N) on functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
2023-02-21 09:43:00 -05:00
if (builtin.zig_backend == .stage2_c) {
// https://github.com/ziglang/zig/issues/16845
return error.SkipZigTest;
}
2023-02-21 09:43:00 -05:00
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) {
// This is not supported on MSVC.
2023-02-21 09:43:00 -05:00
return error.SkipZigTest;
}
2025-01-02 03:10:19 -05:00
// function alignment is a compile error on wasm
if (native_arch.isWasm()) return error.SkipZigTest;
if (native_arch.isThumb()) return error.SkipZigTest;
try expect((@intFromPtr(&overaligned_fn) & (0x1000 - 1)) == 0);
}
fn overaligned_fn() align(0x1000) i32 {
return 42;
}
test "comptime alloc alignment" {
compiler: implement analysis-local comptime-mutable memory This commit changes how we represent comptime-mutable memory (`comptime var`) in the compiler in order to implement the intended behavior that references to such memory can only exist at comptime. It does *not* clean up the representation of mutable values, improve the representation of comptime-known pointers, or fix the many bugs in the comptime pointer access code. These will be future enhancements. Comptime memory lives for the duration of a single Sema, and is not permitted to escape that one analysis, either by becoming runtime-known or by becoming comptime-known to other analyses. These restrictions mean that we can represent comptime allocations not via Decl, but with state local to Sema - specifically, the new `Sema.comptime_allocs` field. All comptime-mutable allocations, as well as any comptime-known const allocs containing references to such memory, live in here. This allows for relatively fast checking of whether a value references any comptime-mtuable memory, since we need only traverse values up to pointers: pointers to Decls can never reference comptime-mutable memory, and pointers into `Sema.comptime_allocs` always do. This change exposed some faulty pointer access logic in `Value.zig`. I've fixed the important cases, but there are some TODOs I've put in which are definitely possible to hit with sufficiently esoteric code. I plan to resolve these by auditing all direct accesses to pointers (most of them ought to use Sema to perform the pointer access!), but for now this is sufficient for all realistic code and to get tests passing. This change eliminates `Zcu.tmp_hack_arena`, instead using the Sema arena for comptime memory mutations, which is possible since comptime memory is now local to the current Sema. This change should allow `Decl` to store only an `InternPool.Index` rather than a full-blown `ty: Type, val: Value`. This commit does not perform this refactor.
2024-03-22 23:39:44 +00:00
// TODO: it's impossible to test this in Zig today, since comptime vars do not have runtime addresses.
if (true) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
2024-01-13 18:44:44 +01:00
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
comptime var bytes1 = [_]u8{0};
_ = &bytes1;
comptime var bytes2 align(256) = [_]u8{0};
const bytes2_addr = @intFromPtr(&bytes2);
try expect(bytes2_addr & 0xff == 0);
}
test "@alignCast null" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var ptr: ?*anyopaque = null;
_ = &ptr;
const aligned: ?*anyopaque = @alignCast(ptr);
try expect(aligned == null);
}
test "alignment of slice element" {
const a: []align(1024) const u8 = undefined;
try expect(@TypeOf(&a[0]) == *align(1024) const u8);
}
test "sub-aligned pointer field access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/14904
const Header = extern struct {
tag: u32,
bytes_len: u32,
};
var buf: [9]u8 align(4) = .{ 1, 2, 3, 4, 5, 6, 7, 8, 9 };
const ptr: *align(1) Header = @ptrCast(buf[1..][0..8]);
const x = ptr.bytes_len;
switch (builtin.cpu.arch.endian()) {
.big => try expect(x == 0x06070809),
.little => try expect(x == 0x09080706),
}
}
test "alignment of zero-bit types is respected" {
if (true) return error.SkipZigTest; // TODO
const S = struct { arr: [0]usize = .{} };
comptime assert(@alignOf(void) == 1);
comptime assert(@alignOf(u0) == 1);
comptime assert(@alignOf([0]usize) == @alignOf(usize));
comptime assert(@alignOf(S) == @alignOf(usize));
var s: S = .{};
var v32: void align(32) = {};
var x32: u0 align(32) = 0;
var s32: S align(32) = .{};
var zero: usize = 0;
_ = &zero;
try expect(@intFromPtr(&s) % @alignOf(usize) == 0);
try expect(@intFromPtr(&s.arr) % @alignOf(usize) == 0);
try expect(@intFromPtr(s.arr[zero..zero].ptr) % @alignOf(usize) == 0);
try expect(@intFromPtr(&v32) % 32 == 0);
try expect(@intFromPtr(&x32) % 32 == 0);
try expect(@intFromPtr(&s32) % 32 == 0);
try expect(@intFromPtr(&s32.arr) % 32 == 0);
try expect(@intFromPtr(s32.arr[zero..zero].ptr) % 32 == 0);
}
test "zero-bit fields in extern struct pad fields appropriately" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const S = extern struct {
x: u8,
a: [0]u16 = .{},
y: u8,
};
// `a` should give `S` alignment 2, and pad the `arr` field.
comptime assert(@alignOf(S) == 2);
comptime assert(@sizeOf(S) == 4);
comptime assert(@offsetOf(S, "x") == 0);
comptime assert(@offsetOf(S, "a") == 2);
comptime assert(@offsetOf(S, "y") == 2);
var s: S = .{ .x = 100, .y = 200 };
try expect(@intFromPtr(&s) % 2 == 0);
try expect(@intFromPtr(&s.y) - @intFromPtr(&s.x) == 2);
try expect(@intFromPtr(&s.y) == @intFromPtr(&s.a));
try expect(@as(*S, @fieldParentPtr("a", &s.a)) == &s);
}
test "function pointer @intFromPtr/@ptrFromInt roundtrip" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// This only succeeds on Thumb if we handle the Thumb bit correctly; if not, the `@ptrFromInt`
// will incorrectly trip an alignment safety check.
const nothing_ptr: *const fn () callconv(.c) void = &nothing;
const nothing_int: usize = @intFromPtr(nothing_ptr);
const nothing_ptr2: *const fn () callconv(.c) void = @ptrFromInt(nothing_int);
try std.testing.expectEqual(nothing_ptr, nothing_ptr2);
}
test "function pointer align mask" {
if (!(builtin.cpu.arch.isArm() or builtin.cpu.arch.isMIPS())) return error.SkipZigTest;
const a: *const fn () callconv(.c) void = @ptrFromInt(0x20202021);
_ = &a;
const b: *align(16) const fn () callconv(.c) void = @alignCast(a);
_ = &b;
}