Compare commits

..

No commits in common. "1bd463998cc9acdb60870cd2c274477f1fb8c712" and "882d123fe0c226f3040d510c96fb0e8832443fc1" have entirely different histories.

6 changed files with 15 additions and 89 deletions

View File

@ -24,13 +24,7 @@ Now in Zig!
## v0.0.1 ## v0.0.1
- [x] Lex numbers - [x] Lex integers & floating point numbers
- [x] Lex identifier
- [x] Lex datatypes
- [x] Lex operators
- [x] Lex single line comments
- [x] Lex strings
- [x] Lex grouping signs
- [ ] - [ ]

View File

@ -101,7 +101,6 @@ pub fn build(b: *std.Build) void {
"src/01_lexic/token.zig", "src/01_lexic/token.zig",
"src/01_lexic/utils.zig", "src/01_lexic/utils.zig",
"src/01_lexic/grouping.zig", "src/01_lexic/grouping.zig",
"src/01_lexic/punctiation.zig",
}; };
for (files) |file| { for (files) |file| {
const file_unit_test = b.addTest(.{ const file_unit_test = b.addTest(.{

View File

@ -1,60 +0,0 @@
const std = @import("std");
const assert = std.debug.assert;
const token = @import("./token.zig");
const utils = @import("./utils.zig");
const Token = token.Token;
const TokenType = token.TokenType;
const LexError = token.LexError;
const LexReturn = token.LexReturn;
pub fn lex(input: []const u8, start: usize) LexError!?LexReturn {
// there should be at least 1 char
assert(start < input.len);
const c = input[start];
const token_type = switch (c) {
',' => TokenType.Comma,
'\n' => TokenType.Newline,
else => {
return null;
},
};
return .{ Token.init(input[start .. start + 1], token_type, start), start + 1 };
}
test "shouldnt lex other things" {
const input = "322";
const output = try lex(input, 0);
try std.testing.expect(output == null);
}
test "should lex comma" {
const input = ",";
const output = try lex(input, 0);
if (output) |tuple| {
const t = tuple[0];
try std.testing.expectEqualDeep(",", t.value);
try std.testing.expectEqual(TokenType.Comma, t.token_type);
try std.testing.expectEqual(1, tuple[1]);
} else {
try std.testing.expect(false);
}
}
test "should lex new line" {
const input = "\n";
const output = try lex(input, 0);
if (output) |tuple| {
const t = tuple[0];
try std.testing.expectEqualDeep("\n", t.value);
try std.testing.expectEqual(TokenType.Newline, t.token_type);
try std.testing.expectEqual(1, tuple[1]);
} else {
try std.testing.expect(false);
}
}

View File

@ -8,18 +8,16 @@ const operator = @import("./operator.zig");
const comment = @import("./comment.zig"); const comment = @import("./comment.zig");
const string = @import("./string.zig"); const string = @import("./string.zig");
const grouping = @import("./grouping.zig"); const grouping = @import("./grouping.zig");
const punctuation = @import("./punctiation.zig");
const TokenType = token.TokenType; const TokenType = token.TokenType;
const Token = token.Token; const Token = token.Token;
// Creates an array list of tokens. The caller is responsible of pub fn tokenize(input: []const u8, alloc: std.mem.Allocator) !void {
// calling `deinit` to free the array list
pub fn tokenize(input: []const u8, alloc: std.mem.Allocator) !std.ArrayList(Token) {
const input_len = input.len; const input_len = input.len;
var current_pos: usize = 0; var current_pos: usize = 0;
var tokens = std.ArrayList(Token).init(alloc); var tokens = std.ArrayList(Token).init(alloc);
defer tokens.deinit();
while (current_pos < input_len) { while (current_pos < input_len) {
const actual_next_pos = ignore_whitespace(input, current_pos); const actual_next_pos = ignore_whitespace(input, current_pos);
@ -81,14 +79,6 @@ pub fn tokenize(input: []const u8, alloc: std.mem.Allocator) !std.ArrayList(Toke
try tokens.append(t); try tokens.append(t);
} }
// lex punctuation
else if (try punctuation.lex(input, actual_next_pos)) |tuple| {
assert(tuple[1] > current_pos);
const t = tuple[0];
current_pos = tuple[1];
try tokens.append(t);
}
// nothing was matched. fail // nothing was matched. fail
// TODO: instead of failing add an error, ignore all chars // TODO: instead of failing add an error, ignore all chars
// until next whitespace, and continue lexing // until next whitespace, and continue lexing
@ -99,8 +89,6 @@ pub fn tokenize(input: []const u8, alloc: std.mem.Allocator) !std.ArrayList(Toke
break; break;
} }
} }
return tokens;
} }
/// Ignores all whitespace on `input` since `start` /// Ignores all whitespace on `input` since `start`
@ -120,12 +108,10 @@ pub fn ignore_whitespace(input: []const u8, start: usize) usize {
test "should insert 1 item" { test "should insert 1 item" {
const input = "322"; const input = "322";
const arrl = try tokenize(input, std.testing.allocator); try tokenize(input, std.testing.allocator);
arrl.deinit();
} }
test "should insert 2 item" { test "should insert 2 item" {
const input = "322 644"; const input = "322 644";
const arrl = try tokenize(input, std.testing.allocator); try tokenize(input, std.testing.allocator);
arrl.deinit();
} }

View File

@ -16,7 +16,8 @@ pub const TokenType = enum {
// punctiation that carries special meaning // punctiation that carries special meaning
Comma, Comma,
Newline, Newline,
// Each keyword will have its own token // Others
Keyword,
}; };
pub const Token = struct { pub const Token = struct {

View File

@ -27,8 +27,14 @@ fn repl() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const alloc = gpa.allocator(); const alloc = gpa.allocator();
const tokens = try lexic.tokenize(line, alloc); try lexic.tokenize(line, alloc);
defer tokens.deinit();
try bw.flush(); try bw.flush();
} }
test "simple test" {
var list = std.ArrayList(i32).init(std.testing.allocator);
defer list.deinit(); // try commenting this out and see if zig detects the memory leak!
try list.append(42);
try std.testing.expectEqual(@as(i32, 42), list.pop());
}