Commit 07b6a3d335
Changed files (1)
std
math
big
std/math/big/int.zig
@@ -115,13 +115,47 @@ pub const Int = struct {
return !r.isOdd();
}
- fn bitcount(self: Int) usize {
- const u_bit_count = (self.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(self.limbs[self.len - 1]));
- return usize(@boolToInt(!self.positive)) + u_bit_count;
+ // Returns the number of bits required to represent the absolute value of self.
+ fn bitCountAbs(self: Int) usize {
+ return (self.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(self.limbs[self.len - 1]));
}
+ // Returns the number of bits required to represent the integer in twos-complement form.
+ //
+ // If the integer is negative the value returned is the number of bits needed by a signed
+ // integer to represent the value. If positive the value is the number of bits for an
+ // unsigned integer. Any unsigned integer will fit in the signed integer with bitcount
+ // one greater than the returned value.
+ //
+ // e.g. -127 returns 8 as it will fit in an i8. 127 returns 7 since it fits in a u7.
+ fn bitCountTwosComp(self: Int) usize {
+ var bits = self.bitCountAbs();
+
+ // If the entire value has only one bit set (e.g. 0b100000000) then the negation in twos
+ // complement requires one less bit.
+ if (!self.positive) block: {
+ bits += 1;
+
+ if (@popCount(self.limbs[self.len - 1]) == 1) {
+ for (self.limbs[0 .. self.len - 1]) |limb| {
+ if (@popCount(limb) != 0) {
+ break :block;
+ }
+ }
+
+ bits -= 1;
+ }
+ }
+
+ return bits;
+ }
+
+ // Returns the approximate size of the integer in the given base. Negative values accomodate for
+ // the minus sign. This is used for determining the number of characters needed to print the
+ // value. It is inexact and will exceed the given value by 1-2 digits.
pub fn sizeInBase(self: Int, base: usize) usize {
- return (self.bitcount() / math.log2(base)) + 1;
+ const bit_count = usize(@boolToInt(!self.positive)) + self.bitCountAbs();
+ return (bit_count / math.log2(base)) + 1;
}
pub fn set(self: *Int, value: var) Allocator.Error!void {
@@ -189,9 +223,9 @@ pub const Int = struct {
pub fn to(self: Int, comptime T: type) ConvertError!T {
switch (@typeId(T)) {
TypeId.Int => {
- const UT = if (T.is_signed) @IntType(false, T.bit_count - 1) else T;
+ const UT = @IntType(false, T.bit_count);
- if (self.bitcount() > 8 * @sizeOf(UT)) {
+ if (self.bitCountTwosComp() > T.bit_count) {
return error.TargetTooSmall;
}
@@ -208,9 +242,17 @@ pub const Int = struct {
}
if (!T.is_signed) {
- return if (self.positive) r else error.NegativeIntoUnsigned;
+ return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned;
} else {
- return if (self.positive) @intCast(T, r) else -@intCast(T, r);
+ if (self.positive) {
+ return @intCast(T, r);
+ } else {
+ if (math.cast(T, r)) |ok| {
+ return -ok;
+ } else |_| {
+ return @minValue(T);
+ }
+ }
}
},
else => {
@@ -1120,24 +1162,61 @@ test "big.int bitcount + sizeInBase" {
var a = try Int.init(al);
try a.set(0b100);
- debug.assert(a.bitcount() == 3);
+ debug.assert(a.bitCountAbs() == 3);
debug.assert(a.sizeInBase(2) >= 3);
debug.assert(a.sizeInBase(10) >= 1);
+ a.negate();
+ debug.assert(a.bitCountAbs() == 3);
+ debug.assert(a.sizeInBase(2) >= 4);
+ debug.assert(a.sizeInBase(10) >= 2);
+
try a.set(0xffffffff);
- debug.assert(a.bitcount() == 32);
+ debug.assert(a.bitCountAbs() == 32);
debug.assert(a.sizeInBase(2) >= 32);
debug.assert(a.sizeInBase(10) >= 10);
try a.shiftLeft(a, 5000);
- debug.assert(a.bitcount() == 5032);
+ debug.assert(a.bitCountAbs() == 5032);
debug.assert(a.sizeInBase(2) >= 5032);
a.positive = false;
- debug.assert(a.bitcount() == 5033);
+ debug.assert(a.bitCountAbs() == 5032);
debug.assert(a.sizeInBase(2) >= 5033);
}
+test "big.int bitcount/to" {
+ var a = try Int.init(al);
+
+ try a.set(0);
+ debug.assert(a.bitCountTwosComp() == 0);
+
+ // TODO: stack smashing
+ // debug.assert((try a.to(u0)) == 0);
+ // TODO: sigsegv
+ // debug.assert((try a.to(i0)) == 0);
+
+ try a.set(-1);
+ debug.assert(a.bitCountTwosComp() == 1);
+ debug.assert((try a.to(i1)) == -1);
+
+ try a.set(-8);
+ debug.assert(a.bitCountTwosComp() == 4);
+ debug.assert((try a.to(i4)) == -8);
+
+ try a.set(127);
+ debug.assert(a.bitCountTwosComp() == 7);
+ debug.assert((try a.to(u7)) == 127);
+
+ try a.set(-128);
+ debug.assert(a.bitCountTwosComp() == 8);
+ debug.assert((try a.to(i8)) == -128);
+
+ try a.set(-129);
+ debug.assert(a.bitCountTwosComp() == 9);
+ debug.assert((try a.to(i9)) == -129);
+}
+
test "big.int string set" {
var a = try Int.init(al);
try a.setString(10, "120317241209124781241290847124");