Compare commits

..

2 Commits

Author SHA1 Message Date
5b940c9e44 feat: minimal, single digit, int lexer 2024-11-16 06:42:26 -05:00
d8df211f46 feat: read user input 2024-11-16 06:02:56 -05:00
2 changed files with 66 additions and 3 deletions

52
src/01_lexic/root.zig Normal file
View File

@ -0,0 +1,52 @@
const std = @import("std");
const t = std.testing;
const TokenType = enum {
Int,
Float,
};
const Token = struct {
value: []const u8,
token_type: TokenType,
start_pos: usize,
pub fn init(value: []const u8, token_type: TokenType, start: usize) Token {
return Token{
.value = value,
.token_type = token_type,
.start_pos = start,
};
}
};
pub fn tokenize(input: []const u8) !void {
const next_token = try number(input, 0);
_ = next_token;
std.debug.print("tokenize :D {s}\n", .{input});
}
fn number(input: []const u8, start: usize) !?Token {
const first_char = input[start];
if (!is_digit(first_char)) {
return null;
}
return Token.init(input[start .. start + 1], TokenType.Int, start);
}
fn is_digit(c: u8) bool {
return '0' <= c and c <= '9';
}
test "number lexer" {
const input = "3";
const result = try number(input, 0);
if (result) |r| {
try std.testing.expectEqual("3", r.value);
} else {
try std.testing.expect(false);
}
}

View File

@ -1,11 +1,10 @@
const std = @import("std");
const lexic = @import("./01_lexic/root.zig");
const thp_version: []const u8 = "0.0.0";
pub fn main() !void {
try repl();
// Prints to stderr (it's a shortcut based on `std.io.getStdErr()`)
std.debug.print("All your {s} are belong to us.\n", .{"codebase"});
}
fn repl() !void {
@ -14,6 +13,18 @@ fn repl() !void {
const stdout = bw.writer();
try stdout.print("The THP REPL, v{s}\n", .{thp_version});
try stdout.print("Enter expressions to evaluate. Enter CTRL-D to exit.\n", .{});
try bw.flush();
const stdin = std.io.getStdIn().reader();
try stdout.print("\nthp => ", .{});
try bw.flush();
const bare_line = try stdin.readUntilDelimiterAlloc(std.heap.page_allocator, '\n', 8192);
defer std.heap.page_allocator.free(bare_line);
const line = std.mem.trim(u8, bare_line, "\r");
try lexic.tokenize(line);
try bw.flush();
}