diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..c0688ccc6 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +github: onyx-lang diff --git a/.github/workflows/onyx-build.yml b/.github/workflows/onyx-build.yml index 416829f68..4abfb5327 100644 --- a/.github/workflows/onyx-build.yml +++ b/.github/workflows/onyx-build.yml @@ -16,7 +16,7 @@ on: jobs: setup: name: Set up - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 outputs: VERSION: ${{ steps.setup.outputs.VERSION }} DOING_RELEASE: ${{ steps.setup.outputs.DOING_RELEASE }} @@ -41,53 +41,53 @@ jobs: matrix: include: - build: linux-amd64 - os: ubuntu-20.04 + os: ubuntu-24.04 runtime_library: ovmwasm artifact_name: 'onyx-linux-ovm-amd64' - build: linux-amd64 - os: ubuntu-20.04 + os: ubuntu-24.04 runtime_library: wasmer artifact_name: 'onyx-linux-wasmer-amd64' - build: linux-amd64 - os: ubuntu-20.04 + os: ubuntu-24.04 runtime_library: none artifact_name: 'onyx-linux-none-amd64' - build: linux-arm64 - os: ubuntu-20.04 + os: ubuntu-24.04 runtime_library: ovmwasm artifact_name: 'onyx-linux-ovm-arm64' - build: linux-arm64 - os: ubuntu-20.04 + os: ubuntu-24.04 runtime_library: wasmer artifact_name: 'onyx-linux-wasmer-arm64' - build: linux-arm64 - os: ubuntu-20.04 + os: ubuntu-24.04 runtime_library: none artifact_name: 'onyx-linux-none-arm64' - build: darwin-arm64 - os: macos-12 + os: macos-13 runtime_library: none target: aarch64-apple-darwin artifact_name: 'onyx-darwin-none-arm64' - build: darwin-arm64 - os: macos-12 + os: macos-13 runtime_library: wasmer target: aarch64-apple-darwin artifact_name: 'onyx-darwin-wasmer-arm64' - build: darwin-amd64 - os: macos-12 + os: macos-13 runtime_library: none artifact_name: 'onyx-darwin-none-amd64' - build: darwin-amd64 - os: macos-12 + os: macos-13 runtime_library: wasmer artifact_name: 'onyx-darwin-wasmer-amd64' - build: darwin-amd64 - os: macos-12 + os: macos-13 runtime_library: ovmwasm artifact_name: 'onyx-darwin-ovm-amd64' - build: darwin-arm64 - os: macos-12 + os: macos-13 runtime_library: ovmwasm target: aarch64-apple-darwin artifact_name: 'onyx-darwin-ovm-arm64' @@ -223,7 +223,7 @@ jobs: cmd.exe /c 'build.bat dist' - name: Upload Artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ matrix.artifact_name }} path: dist @@ -245,7 +245,7 @@ jobs: upload_url: ${{ steps.create_release.outputs.upload_url }} steps: - name: Download artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: path: artifacts - name: Create Release diff --git a/.gitignore b/.gitignore index ebe28ebfd..4ab77b446 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,9 @@ bin/onyx-run bin/onyx releases/ compiler/onyx +compiler/libonyx.so +compiler/libonyx.dylib +compiler/libonyx.dll runtime/onyx_runtime.so runtime/onyx_runtime.dylib runtime/onyx_runtime.dll diff --git a/CHANGELOG b/CHANGELOG index 3b12ed550..a8f530242 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,19 +1,171 @@ -Nightly Release +Release 0.1.13 +-------------- +8th November 2024 + +Additions: +- Unwrap operator, `x!`. + - Similar to try operator (`x?`), but panics if value is not present. +- "Field update" syntax. A shorthand to change a field of a structure. + - `.{ ..old_value, field = new_value }` +- `onyx watch` now works on MacOS. +- `onyx run-watch` to automatically re-run the program on changes. +- `#wasm_section` directive to add custom sections to the WASM binary. +- Custom commands per project + - Installed in a `.onyx` directory +- `onyx pkg build` can now run shell commands and multi-stage builds. +- `Stalled` compiler hook to allow for code injection when the compiler stalls. +- `Slice.map` +- `Slice.map_inplace` +- `Slice.fold1` +- `Slice.get_opt` +- `Iterator.from` +- `iter.group_by` +- `core.alloc.debug` +- `core.os.args` +- `core.crypto.hmac` +- `core.crypto.keys` +- `core.encoding.json.Value.as_entry_array` +- `core.encoding.base64 { encode_url, decode_url }` +- `core.encoding.xml` + +Removals: + +Changes: +- Capture/closure syntax is now `(params) use (captures) ...`. + - `(x: i32) use (variable) -> str { ... }` + - `(x) use (variable) => { ... }` +- `&&` and `||` now short-circuit. +- Fixed-sized arrays (`[N] T`) are now passed by value. +- The size of tag field for unions is now dependent on the number of variants. +- Parsing structs no longer sometimes needs `#type`. +- Renamed `core.alloc.memdebug` to `core.alloc.memwatch`. + +Bugfixes: +- Bug when injecting into a `#distinct` type. +- Many, many random bugs. + + + +Release 0.1.12 +-------------- +19th May 2024 + +Additions: +- Ability to pipe into a method call. + - `x |> y->z() === y->z(x)` +- Ability to pipe into a try operator. + - `x |> y()?` === y(x)?` +- Ability to use `_` where `#auto` is used. + - This will be come the default soon and `#auto` will be removed. +- `return #from_proc` to return all the way from the procedure. +- Variant of `new` that accepts an r-value and places it in the heap. +- Builtin `panic` procedure that is equivalent to `assert(false, ...)` +- Format parameter "a" that unpacks an `any` and formats its internal value. +- `--generate-name-section` CLI flag + +Removals: +- `Optional.try` as it is incompatible with new semantics of `?`. + +Changes: +- `str.as_str` is now preferred over `string.as_str` +- `str.compare` is now preferred over `string.compare` +- `str.to_cstr_on_stack` is now preferred over `string.to_cstr_on_stack` +- `str.join` is now preferred over `string.join` +- Implementation of `?` for `Optional` and `Result` to return from the enclosing procedure. +- JavaScript file generated by `-r js` is no longer an ES6 module. + +Bugfixes: +- Fixed WASI compilation due to syntax issue. +- Fixed WASI platform `__file_open` implementation for wasmtime. +- Weird edge case when using multiple placeholder arguments in a quick procedure. + +Contributors: + + +Release 0.1.11 +-------------- +21st April 2024 + +Additions: +- Ability specify where piped arguments are placed using `_`. + - `x |> foo(y, _) == foo(y, x)` +- Alternative syntax for `case #default ...`. You can now just write `case _ ...`. +- Alternative syntax for binding documentation using `///`. +- **Experimental** compiler extensions feature, currently used to create procedural macros. +- `core.misc.any_deep_copy` +- Ability to explicitly specify tag value for tagged unions. + - `Variant as value: type`, i.e. `Foo as 3: i32` + +Removals: +- Deprecated the use of `#default` in case statements. Use `_` instead. +- Removed `iter.take_one`. Use `iter.next` instead. + +Changes: +There are several *breaking* changes in this release related to core library APIs. +- `Iterator.next` now returns `? T` instead of `(T, bool)` +- `io.Stream` uses `Result(T, Error)` for return types instead of `(Error, T)` +- `switch` over a `range` is no longer inclusive by default, since `..=` exists now. +- Enabled optional semicolons by default. + - `//+optional-semicolons` is no longer necessary. + +There are also several non-breaking changes. +- The internal memory layout is different. See pull request #133 for details. + +Bugfixes: + +Contributors: + + + +Release 0.1.10 --------------- -No release date yet +30th March 2024 Additions: -- Added new alternate error format that may become the default in the future. +- JavaScript interop + - `core.js` package for JS FFI. + - `#js` directive to build a JavaScript file during compilation. +- Implicit injections + - `#inject` is no longer required in some cases +- Named return values +- Official builds for Linux AARCH64 +- `Slice` and `Array` structures for placing methods on slices and dynamic arrays. +- Range type improvements + - `range64` type + - `..=` operator that is a range, with an inclusive upper end. +- New alternate error format that may become the default in the future. - Use `--error-format v2` or set environment variable `ONYX_ERROR_FORMAT` to be `v2`. +- Auto-disposing locals (experimental) + - `use x := ...` +- Core library functions + - New process spawning API with builder pattern (`os.command`) + - `sync.MutexGuard` + - `sync.Channel` + - `hash.sha1` + - `net.dial` + - `net.resolve` + - integer constants `i8.MIN`, `i64.MAX`, etc. Removals: Changes: +- Revamped CLI + - Shorthand commands (r for run, b for build, etc.) + - Improved appearance + - Better help pages + - Note: flags must now appear before all files +- Better error messages for common issues +- `Array` should be preferred over `core.array` +- `Slice` should be preferred over `core.slice` +- `str` should be preferred over `core.string` Bugfixes: - Fixed compiler crash when trying to iterate over something that is not iterable. +- Fixed wrong implementation of futexes on MacOS. +- Fixed implementation of `platform.__time()` - +Contributors: +- @Syuparn (1 pull request) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..f20535990 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +me@brendanfh.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..41f75dd10 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,84 @@ +# Contributing to Onyx + +Thank you for wanting to contribute to Onyx! + +Please read Onyx's [Code of Conduct][code-of-conduct] before contributing, as all contributing members are expected to adhere to it. + +[code-of-conduct]: https://github.com/onyx-lang/onyx/blob/main/CODE_OF_CONDUCT.md + +## Bug Reports + +If you found a bug in Onyx, first ensure there is not already +a similar bug open on Onyx's [GitHub issue tracker][issues]. +If there is not, feel free to open one. + +[issues]: https://github.com/onyx-lang/onyx/issues + +A bug could be anything from a compiler crash, to the wrong +output from a core library function. If you are unsure if +what you found is a bug, open an issue anyway and we'll +discuss from there. + + +## Code Changes + +Contributing code changes to Onyx is welcome by following the process outlined below. + +1. Find or open a GitHub issue relevant to the change you are wanting to make + and comment saying you wish to work on this issue. This just helps me know + what is getting done so I know I don't have to worry about something. If + your change would include new functionality, this issue will serve as a place + to discuss the details of what you are adding, to ensure everyone is in agreement + on how the new functionality will work. +2. Create a fork and work on your code change. Commit frequently in small commits. + When making commits, be descriptive in your message. +4. When ready, open a GitHub pull request to merge your changes with the `master` branch. + Once approved, your changes will be tested to ensure they pass CI/CD. +5. I will review the committed request. I reserve the right to provide feedback, + request modifications or deny any requests, with or without an explanation. + There may be multiple rounds of feedback. +6. Once everything is approved, your code will be merged/rebased into the `master` branch. + +### Code Standard + +When contributing code, it is important that your code meets Onyx's code standard. +While not extensive, it does ensure the codebase stays relatively uniform. + +1. Use **4 spaces** per indentation level. +2. Use descriptive identifier names. No one should have to guess what the variable/function is for. +5. Separate a large block of code into small blocks by placing blank lines between distinct parts of the code. +4. The C components of Onyx are intentionally divided into only a handful of files. + You will likely **not** need to create a new `.c` or `.h` file in repository. + +## Development Environment + +Since Onyx is developed in C, the barrier to entry is quite low. +You simply need a C compiler (gcc, clang and MSCV are known to work). + +The only external dependency you may need is [Wasmer](https://wasmer.io) +installed and accessible by your user. Simply run `wasmer` to ensure it +is working. Note, this is not needed if you are on Windows, or compiling +using the `ovmwasm` runtime. + +To compile on Linux and MacOS, you need to source the `settings.sh` file first. +This sets environment variables that are used by the build script. + +```shell +source ./settings.sh +``` + +Now you should be ready to compile the code. Simply run the following on Linux/MacOS. + +```shell +./build.sh compile install +``` + +Or the following on Windows. + +```batch +build.bat +``` + +This will compile the code, place the new version in the `dist` folder, +and then copy the `dist` folder to your `ONYX_PATH`. + diff --git a/README.md b/README.md index a077c3be6..0c33f590c 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,92 @@ -# Onyx +

+ Onyx Logo +
+ An efficient, procedural, and pragmatic programming language +
+
+ + + + + + + + + +
+ + + + + Workflow + + + License + +

-[![Workflow](https://github.com/onyx-lang/onyx/workflows/Build%20and%20Test/badge.svg)](https://github.com/onyx-lang/onyx/actions) -[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg)](https://opensource.org/licenses/BSD-2-Clause) +# The Onyx Programming Language -A simple, yet powerful language for WebAssembly. +Onyx is a general purpose, efficient, procedural and pragmatic programming language for application development. +Onyx compiles solely to WebAssembly through a custom backend, leading to *blazingly-fast* build times. -[Try Online](https://try.onyxlang.io/) | -[Read the Docs](https://onyxlang.io/docs/) | -[Join the Discord](https://discord.gg/bhuN36dqj7) + +```odin +use core {*} + +Person :: struct { age: i32; name: str } + +main :: () { + // Generate a list of 30 random people + people := Iterator.from(0 .. 30) + |> Iterator.map(_ => Person.{ + random.between(1, 10) + random.string(10, alpha_numeric=true) + }) + |> Iterator.collect() + + // Sort them by age + Slice.sort(people, (a, b) => a.age - b.age) + + // Group them by age + group_iter := Iterator.from(people) + |> Iterator.group_by((a, b) => a.age == b.age) + + // Print the groups of people + for group in group_iter { + printf("People of age {}:\n", group.first.age) + + for v in group.second { + printf(" {}\n", v.name) + } + } +} +``` + +# Resources + +### [Onyx Installation](https://onyxlang.io/docs/install) + +Install Onyx on your Linux, MacOS, or Windows system. + +### [Onyx Documentation](https://onyxlang.io/docs) + +Learn about the various aspects of Onyx, from the language semantics and syntax, to the core and third-party packages. + +### [Onyx Book](https://docs.onyxlang.io/book/Overview.html) + +The official source of language syntax and semantics documentation. + +### [Onyx Playground](https://try.onyxlang.io) + +Try Onyx online without installing anything + +### [Onyx Discord](https://discord.com/invite/bhuN36dqj7) + +Discuss with others about Onyx and get quick feedback and help on your Onyx projects. + +# Disclaimer + +> Onyx is still under active development, and some features are subject to change. +> +> Onyx is primarily developed for Linux and MacOS. Windows support exists, but the development experience on Windows is not as refined as the Linux and MacOS development experience. diff --git a/bin/install.sh b/bin/install.sh index fad8592e5..fea6b58fc 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -253,7 +253,7 @@ onyx_install() { printf "The Onyx Programming Language\n\n" fi - onyx_download $1 && onyx_link + onyx_download "$1" "$2" && onyx_link onyx_reset } @@ -336,13 +336,20 @@ onyx_download() { # identify platform based on uname output initArch || return 1 initOS || return 1 - initRuntime || return 1 + + if [ ! -z "$2" ]; then + if [ "$2" = "ovmwasm" ]; then RUNTIME="ovm"; + else RUNTIME="$2"; + fi + else + initRuntime || return 1; + fi # assemble expected release artifact name BINARY="onyx-${OS}-${RUNTIME}-${ARCH}.tar.gz" onyx_install_status "downloading" "onyx-$OS-$RUNTIME-$ARCH" - if [ $# -eq 0 ]; then + if [ -z "$1" ]; then # The version was not provided, assume latest onyx_download_json LATEST_RELEASE "$RELEASES_URL/latest" || return 1 ONYX_RELEASE_TAG=$(echo "${LATEST_RELEASE}" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') @@ -447,4 +454,4 @@ else INSTALL_DIRECTORY="${ONYX_DIR}" fi -onyx_install $1 # $2 +onyx_install "$1" "$2" diff --git a/build.bat b/build.bat index 5244f69bd..e99e155e8 100644 --- a/build.bat +++ b/build.bat @@ -1,7 +1,7 @@ @echo off REM Compile the compiler -set SOURCE_FILES=compiler/src/onyx.c compiler/src/astnodes.c compiler/src/builtins.c compiler/src/checker.c compiler/src/clone.c compiler/src/doc.c compiler/src/entities.c compiler/src/errors.c compiler/src/lex.c compiler/src/parser.c compiler/src/symres.c compiler/src/types.c compiler/src/utils.c compiler/src/wasm_emit.c compiler/src/wasm_runtime.c +set SOURCE_FILES=compiler/src/library_main.c compiler/cli/main.c compiler/src/astnodes.c compiler/src/builtins.c compiler/src/checker.c compiler/src/clone.c compiler/src/doc.c compiler/src/entities.c compiler/src/errors.c compiler/src/lex.c compiler/src/parser.c compiler/src/types.c compiler/src/utils.c compiler/src/wasm_emit.c compiler/src/wasm_runtime.c compiler/src/extensions.c if "%1" == "1" ( set FLAGS=/Od /MTd /Z7 @@ -23,9 +23,19 @@ if %ERRORLEVEL% neq 0 ( exit /b %ERRORLEVEL% ) +set SOURCE_FILES=compiler/src/library_main.c compiler/src/astnodes.c compiler/src/builtins.c compiler/src/checker.c compiler/src/clone.c compiler/src/doc.c compiler/src/entities.c compiler/src/errors.c compiler/src/lex.c compiler/src/parser.c compiler/src/types.c compiler/src/utils.c compiler/src/wasm_emit.c compiler/src/wasm_runtime.c compiler/src/extensions.c +cl.exe %FLAGS% /Icompiler/include /std:c17 /TC %SOURCE_FILES% /link /DLL /IGNORE:4217 %LINK_OPTIONS% /OUT:onyx.dll + +REM Don't continue if we had compilation errors. This prevents CI to succeed. +if %ERRORLEVEL% neq 0 ( + echo Compiler library compilation failed. + exit /b %ERRORLEVEL% +) + del *.pdb > NUL 2> NUL del *.ilk > NUL 2> NUL del *.obj > NUL 2> NUL +del *.exp > NUL 2> NUL del misc\icon_resource.res cl /MT /std:c17 /TC /I compiler/include /I shared/include /D_USRDLL /D_WINDLL runtime\onyx_runtime.c /link /DLL ws2_32.lib bcrypt.lib Synchronization.lib kernel32.lib /OUT:onyx_runtime.dll @@ -50,13 +60,20 @@ if "%1" == "dist" ( copy misc\onyx.sublime-syntax dist\misc\onyx.sublime-syntax copy misc\vscode\onyxlang-0.1.9.vsix dist\misc\onyxlang-0.1.9.vsix - copy onyx_runtime.dll dist\onyx_runtime.dll + mkdir dist\include + copy shared\include\onyx.h dist\include\onyx.h + + mkdir dist\lib + copy onyx_runtime.dll dist\lib\onyx_runtime.dll + copy onyx.dll dist\lib\onyx.dll + copy onyx.lib dist\lib\onyx.lib copy onyx.exe dist\onyx.exe mkdir dist\tools copy scripts\onyx-pkg.onyx dist\tools\onyx-pkg.onyx mkdir dist\tools\pkg_templates copy scripts\default.json dist\tools\pkg_templates\default.json + copy scripts\lsp.wasm dist\tools\lsp.wasm powershell Compress-Archive dist onyx.zip move onyx.zip dist/onyx.zip diff --git a/build.sh b/build.sh index 887722afa..e7273bac4 100755 --- a/build.sh +++ b/build.sh @@ -1,6 +1,6 @@ #!/bin/sh -set -e +# set -e DIST_DIR="./dist" @@ -25,32 +25,35 @@ compile_all() { package_all() { rm -rf "$DIST_DIR" mkdir -p "$DIST_DIR" + mkdir -p "$DIST_DIR/lib" + mkdir -p "$DIST_DIR/include" echo "Installing on '$(uname -a)'" echo "Installing core libs" [ -d "$DIST_DIR/core" ] && rm -r "$DIST_DIR/core" cp -r ./core "$DIST_DIR/core" + case "$(uname)" in + Linux) suffix='so' ;; + *BSD) suffix='so' ;; + Darwin) suffix='dylib' ;; + *) suffix='dll' ;; + esac + echo "Installing core tools" mkdir -p "$DIST_DIR/bin" cp compiler/onyx "$DIST_DIR/bin/" + cp compiler/libonyx.$suffix "$DIST_DIR/lib/" + cp "shared/include/onyx.h" "$DIST_DIR/include/onyx.h" mkdir -p "$DIST_DIR/tools" mkdir -p "$DIST_DIR/tools/pkg_templates" cp ./scripts/onyx-pkg.onyx "$DIST_DIR/tools" cp ./scripts/default.json "$DIST_DIR/tools/pkg_templates" + cp ./scripts/lsp.wasm "$DIST_DIR/tools" if [ ! -z ${ONYX_RUNTIME_LIBRARY+x} ]; then echo "Installing runtime library" - mkdir -p "$DIST_DIR/lib" - mkdir -p "$DIST_DIR/include" - - case "$(uname)" in - Linux) suffix='so' ;; - *BSD) suffix='so' ;; - Darwin) suffix='dylib' ;; - *) suffix='dll' ;; - esac [ -f runtime/onyx_runtime.$suffix ] && cp runtime/onyx_runtime.$suffix "$DIST_DIR/lib/" cp "shared/include/onyx_library.h" "$DIST_DIR/include/onyx_library.h" @@ -76,15 +79,11 @@ compress_all() { # Sign the binaries on MacOS [ "$(uname)" = 'Darwin' ] && \ codesign -s - "$DIST_DIR/bin/onyx" && \ + codesign -s - "$DIST_DIR/lib/libonyx.dylib" && \ [ -f "$DIST_DIR/lib/onyx_runtime.dylib" ] && \ codesign -s - "$DIST_DIR/lib/onyx_runtime.dylib" - if [ ! -z ${ONYX_RUNTIME_LIBRARY+x} ]; then - # When including a runtime library, include the lib and include folders - tar -C "$DIST_DIR" -zcvf onyx.tar.gz bin core examples include lib misc tools LICENSE - else - tar -C "$DIST_DIR" -zcvf onyx.tar.gz bin core examples misc tools LICENSE - fi + tar -C "$DIST_DIR" -zcvf onyx.tar.gz bin core examples include lib misc tools LICENSE mv onyx.tar.gz dist/ } diff --git a/compiler/build.sh b/compiler/build.sh index 8627ad5e6..10b837dc7 100755 --- a/compiler/build.sh +++ b/compiler/build.sh @@ -1,6 +1,6 @@ #!/bin/sh -C_FILES="onyx astnodes builtins checker clone doc entities errors lex parser symres types utils wasm_emit " +C_FILES="library_main astnodes builtins checker clone doc entities errors lex parser types utils wasm_emit extensions " LIBS="-lpthread -ldl -lm" INCLUDES="-I./include -I../shared/include -I../shared/include/dyncall" @@ -12,7 +12,7 @@ else FLAGS="$WARNINGS -O3" fi -FLAGS="$FLAGS -DENABLE_DEBUG_INFO" +FLAGS="$FLAGS -DENABLE_DEBUG_INFO -fvisibility=hidden" if [ ! -z ${ONYX_TARGET+x} ]; then FLAGS="$FLAGS --target=$ONYX_TARGET" @@ -51,8 +51,25 @@ if [ "$ONYX_USE_DYNCALL" = "1" ] && [ "$ONYX_RUNTIME_LIBRARY" = "ovmwasm" ]; the FLAGS="$FLAGS -DUSE_DYNCALL" fi -echo "Compiling onyx" -$ONYX_CC -o "onyx" \ - $FLAGS $INCLUDES \ - $(echo "$C_FILES" | sed 's/ /\n/g;s/\([a-zA-Z_0-9]*\)\n/src\/\1.c\n/g;s/\n/ /g') \ - $LIBS +case "$(uname)" in + Linux) suffix='so' ;; + *BSD) suffix='so' ;; + Darwin) suffix='dylib' ;; + *) suffix='dll' ;; +esac + +echo "Compiling libonyx.$suffix" +for c_file in $(echo "$C_FILES" | sed 's/ /\n/g;s/\([a-zA-Z_0-9]*\)\n/src\/\1.c\n/g;s/\n/ /g'); do + $ONYX_CC $FLAGS $INCLUDES -fPIC -o $(basename $c_file).o -c $c_file +done + +echo "Compiling onyx executable" +$ONYX_CC $INCLUDES $FLAGS cli/main.c *.o -o onyx $LIBS + +FLAGS="" +if [ ! -z ${ONYX_TARGET+x} ]; then + FLAGS="$FLAGS --target=$ONYX_TARGET" +fi +$ONYX_CC -shared $FLAGS -o "libonyx.$suffix" *.o $LIBS + +rm *.o diff --git a/compiler/cli/error_printing.h b/compiler/cli/error_printing.h new file mode 100644 index 000000000..3f2ed0f35 --- /dev/null +++ b/compiler/cli/error_printing.h @@ -0,0 +1,207 @@ +typedef struct onyx_error_details_t { + onyx_error_t rank; + const char *message; + const char *filename; + int line; + int column; + int length; + char *line_text; +} onyx_error_details_t; + +static void print_error_text(const char *text, b32 color) { + if (!color) { + bh_printf("%s", text); + return; + } + + const char *ch = text; + b32 in_color = 0; + + while (*ch != '\0') { + if (*ch == '\'') { + in_color = !in_color; + if (in_color) bh_printf("\033[92m"); + else bh_printf("\033[0m"); + } else { + bh_printf("%c", *ch); + } + + ch++; + } +} + +static void print_underline(onyx_error_details_t *err, i32 len, i32 first_non_whitespace, b32 colored_printing) { + len = bh_min(len, 1024); + char* pointer_str = alloca(sizeof(char) * len); + memset(pointer_str, ' ', len); + + int c = err->column - 1; + int l = err->length; + + memcpy(pointer_str, err->line_text, first_non_whitespace); + memset(pointer_str + c + 1, '~', l - 1); + pointer_str[c] = '^'; + pointer_str[c + l] = 0; + + if (colored_printing) bh_printf("\033[91m"); + bh_printf("%s\n", pointer_str); + if (colored_printing) bh_printf("\033[0m\n"); +} + +static void print_detailed_message_v1(onyx_error_details_t *err, b32 colored_printing) { + bh_printf("(%s:%l,%l) %s\n", err->filename, err->line, err->column, err->message); + + i32 linelength = 0; + i32 first_char = 0; + char* walker = err->line_text; + while (*walker == ' ' || *walker == '\t') first_char++, linelength++, walker++; + while (*walker && *walker != '\n') linelength++, walker++; + + if (colored_printing) bh_printf("\033[90m"); + i32 numlen = bh_printf(" %d | ", err->line); + if (colored_printing) bh_printf("\033[94m"); + bh_printf("%b\n", err->line_text, linelength); + + fori (i, 0, numlen - 1) bh_printf(" "); + print_underline(err, linelength, first_char, colored_printing); +} + +static void print_detailed_message_v2(onyx_error_details_t* err, b32 colored_printing) { + if (colored_printing) { + switch (err->rank) { + case ONYX_ERROR_WARNING: + bh_printf("\033[93mwarning\033[0m: "); + print_error_text(err->message, colored_printing); + bh_printf("\n\033[90m at: %s:%l,%l\033[0m\n", err->filename, err->line, err->column); + break; + + default: + bh_printf("\033[91merror\033[0m: "); + print_error_text(err->message, colored_printing); + bh_printf("\n\033[90m at: %s:%l,%l\033[0m\n", err->filename, err->line, err->column); + break; + } + } else { + switch (err->rank) { + case ONYX_ERROR_WARNING: + bh_printf("warning: "); + print_error_text(err->message, colored_printing); + bh_printf("\n at: %s:%l,%l\n", err->filename, err->line, err->column); + break; + + default: + bh_printf("error: "); + print_error_text(err->message, colored_printing); + bh_printf("\n at: %s:%l,%l\n", err->filename, err->line, err->column); + break; + } + } + + i32 linelength = 0; + i32 first_char = 0; + char* walker = err->line_text; + while (*walker == ' ' || *walker == '\t') first_char++, linelength++, walker++; + while (*walker && *walker != '\n') linelength++, walker++; + + char numbuf[32] = {0}; + i32 numlen = bh_snprintf(numbuf, 31, " %d | ", err->line); + + if (colored_printing) bh_printf("\033[90m"); + fori (i, 0, numlen - 3) bh_printf(" "); + bh_printf("|\n%s", numbuf); + if (colored_printing) bh_printf("\033[94m"); + + bh_printf("%b\n", err->line_text, linelength); + + if (colored_printing) bh_printf("\033[90m"); + fori (i, 0, numlen - 3) bh_printf(" "); + bh_printf("| "); + if (colored_printing) bh_printf("\033[94m"); + + print_underline(err, linelength, first_char, colored_printing); +} + +static void print_detailed_message_json(onyx_error_details_t* err, b32 colored_printing) { + bh_printf( + "{\"rank\":%d,\"file\":\"%s\",\"line\":%d,\"column\":%d,\"length\":%d,\"msg\":\"%s\"}", + err->rank, + err->filename, + err->line, + err->column, + err->length, + err->message + ); +} + +static void print_detailed_message(onyx_error_details_t* err, b32 colored_printing, char *error_format) { + if (!err->filename) { + // This makes the assumption that if a file is not specified for an error, + // the error must have come from the command line. + + if (colored_printing) { + bh_printf("\033[91merror\033[0m: "); + bh_printf("%s\n", err->message); + bh_printf("\033[90m at: command line argument\033[0m\n"); + } else { + bh_printf("error: "); + bh_printf("%s\n", err->message); + bh_printf(" at: command line argument\n"); + } + + return; + } + + if (!strcmp(error_format, "v2")) { + print_detailed_message_v2(err, colored_printing); + } + else if (!strcmp(error_format, "v1")) { + print_detailed_message_v1(err, colored_printing); + } + else if (!strcmp(error_format, "json")) { + print_detailed_message_json(err, colored_printing); + } + else { + bh_printf("Unknown error format: '%s'.\n", error_format); + } +} + +void onyx_errors_print(onyx_context_t *ctx, char *error_format, b32 colored_printing, b32 show_all_errors) { + b32 error_format_json = !strcmp(error_format, "json"); + if (error_format_json) bh_printf("["); + + onyx_error_t last_rank = onyx_error_rank(ctx, 0); + fori (i, 0, onyx_error_count(ctx)) { + onyx_error_details_t err; + err.rank = onyx_error_rank(ctx, i); + err.message = onyx_error_message(ctx, i); + err.filename = onyx_error_filename(ctx, i); + err.line = onyx_error_line(ctx, i); + err.column = onyx_error_column(ctx, i); + err.length = onyx_error_length(ctx, i); + + char line_text[256]; + onyx_error_line_text(ctx, i, line_text, 255); + err.line_text = line_text; + + if (!show_all_errors && last_rank != err.rank) break; + if (error_format_json && i != 0) bh_printf(","); + + print_detailed_message(&err, colored_printing, error_format); + + last_rank = err.rank; + } + + if (error_format_json) bh_printf("]"); +} + +b32 onyx_errors_present(onyx_context_t *ctx) { + fori (i, 0, onyx_error_count(ctx)) { + onyx_error_details_t err; + if (onyx_error_rank(ctx, i) >= ONYX_ERROR_WAITING) { + return 1; + } + } + + return 0; +} + diff --git a/compiler/cli/main.c b/compiler/cli/main.c new file mode 100644 index 000000000..f32eb3594 --- /dev/null +++ b/compiler/cli/main.c @@ -0,0 +1,1014 @@ +#define BH_NO_TABLE +#include "bh.h" + +#include "onyx.h" +#include +#include + +#if defined(_BH_LINUX) || defined(_BH_DARWIN) + #define C_NORM "\e[0m" + #define C_BOLD "\e[1m" + #define C_RED "\e[91m" + #define C_YELLOW "\e[93m" + #define C_GREY "\e[90m" + #define C_GREEN "\e[33m" + #define C_BLUE "\e[34m" + #define C_LBLUE "\e[94m" +#else + #define C_NORM + #define C_BOLD + #define C_RED + #define C_YELLOW + #define C_GREY + #define C_GREEN + #define C_BLUE + #define C_LBLUE +#endif + +#define DOCSTRING_HEADER \ + "\n" \ + "The toolchain for the " C_BLUE C_BOLD "Onyx" C_NORM " programming language, created by Brendan Hansen.\n" \ + "Learn more at " C_BLUE "https://onyxlang.io" C_NORM ".\n" \ + "\n" + +static const char* top_level_docstring = DOCSTRING_HEADER + C_BOLD "Usage: " C_BLUE "onyx" C_LBLUE " " C_NORM C_YELLOW "[..flags] " C_GREEN "[..args]\n" C_NORM + "\n" + C_BOLD "Commands:\n" C_NORM + C_LBLUE " help " C_NORM "Shows this help message\n" + C_LBLUE " version " C_NORM "Prints version information\n" + "\n" + C_LBLUE " build " C_GREY "files " C_NORM "Compiles an Onyx program into an executable " C_GREY "(onyx b)" C_NORM "\n" + C_LBLUE " check " C_GREY "files " C_NORM "Checks syntax and types of a program\n" +#if defined(_BH_LINUX) || defined(_BH_DARWIN) + C_LBLUE " watch " C_NORM "Continuously rebuilds a program on file changes\n" +#endif + "\n" +#if defined(_BH_LINUX) || defined(_BH_DARWIN) + C_LBLUE " self-upgrade " C_NORM "Upgrade your toolchain\n" +#endif + "\n"; + +static const char *run_commands_docstring = + C_LBLUE " run " C_GREY "files " C_NORM "Compiles and runs an Onyx program " C_GREY "(onyx r)" C_NORM "\n" +#if (defined(_BH_LINUX) || defined(_BH_DARWIN)) + C_LBLUE " run-watch " C_NORM "Continuously rebuilds and runs a program on file changes " C_GREY "(onyx rw)" C_NORM "\n" +#endif + "\n" + C_LBLUE " package " C_GREY "cmd " C_NORM "Package manager " C_GREY "(onyx pkg cmd)" C_NORM "\n" + C_LBLUE " new " C_NORM "Create a new project from a template\n" + C_LBLUE " init " C_NORM "Initialize a project in the current directory\n" + C_LBLUE " add " C_GREY "package " C_NORM "Add a package to dependency list " C_GREY "(onyx a)" C_NORM "\n" + C_LBLUE " remove " C_GREY "package " C_NORM "Remove a package from dependency list " C_GREY "(onyx rm)" C_NORM "\n" + C_LBLUE " sync " C_NORM "Synchronize installed packages\n" + "\n"; + +static const char *build_docstring = DOCSTRING_HEADER + C_BOLD "Usage: " C_BLUE "onyx" C_LBLUE " %s " C_NORM C_YELLOW "[..flags] " C_GREEN "files " C_NORM "%s" "\n" + "\n" + C_BOLD "Flags:\n" C_NORM + C_LBLUE " -o, --output " C_GREY "target_file " C_NORM "Specify the target file " C_GREY "(default: out.wasm)\n" + C_LBLUE " -r, --runtime " C_GREY "runtime " C_NORM "Specifies the runtime " C_GREY "(onyx, wasi, js, custom)\n" + C_LBLUE " --map-dir " C_GREY "name:folder " C_NORM "Adds a mapped directory\n" + "\n" + C_LBLUE " --debug " C_NORM "Output a debugable build\n" + C_LBLUE " --feature " C_GREY "feature " C_NORM "Enable an experimental language feature\n" + C_LBLUE " --multi-threaded " C_NORM "Enables multi-threading for this compilation\n" + C_LBLUE " --stack-trace " C_NORM "Enable dynamic stack trace\n" + C_LBLUE " --wasm-mvp " C_NORM "Use only WebAssembly MVP features\n" + "\n" + C_LBLUE " --no-core " C_NORM "Disable automatically including \"core/module\"\n" + C_LBLUE " --no-type-info " C_NORM "Disables generating type information\n" + C_LBLUE " --generate-method-info " C_NORM "Populate method information in type information structures\n" + C_LBLUE " --generate-foreign-info " C_NORM "Generate information for foreign blocks\n" + C_LBLUE " --generate-name-section " C_NORM "Generate the 'name' custom section for better debugging\n" + C_LBLUE " --no-stale-code " C_NORM "Disables use of " C_YELLOW "#allow_stale_code" C_NORM " directive\n" + "\n" + C_LBLUE " --doc " C_NORM "Generate a .odoc file, Onyx's documentation format used by " C_YELLOW "onyx-doc-gen\n" + C_LBLUE " --lspinfo " C_GREY "target_file " C_NORM "Generate an LSP information file\n" + "\n" + C_LBLUE " -V, --verbose " C_NORM "Verbose output\n" + C_LBLUE " --no-colors " C_NORM "Disables colors in the error message\n" + C_LBLUE " --error-format " C_GREY "(v1|v2) " C_NORM "Changes the output error format\n" + C_LBLUE " --show-all-errors " C_NORM "Print all errors\n" + C_LBLUE " --print-function-mappings " C_NORM "Prints a mapping from WASM function index to source location\n" + C_LBLUE " --print-static-if-results " C_NORM "Prints the conditional result of every " C_YELLOW "#if" C_NORM " statement\n" + "\n" + C_LBLUE " --no-file-contents " C_NORM "Disables " C_YELLOW "#file_contents" C_NORM " for security\n" + C_LBLUE " --no-compiler-extensions " C_NORM "Disables " C_YELLOW "#compiler_extension" C_NORM " for security\n" + "\n"; + +static const char *self_upgrade_docstring = DOCSTRING_HEADER + C_BOLD "Usage: " C_BLUE "onyx" C_LBLUE " self-upgrade " C_GREEN "[version]\n" C_NORM + "\n" + C_BOLD "Arguments:\n" C_NORM + C_GREEN " version " C_NORM "Specify which version to install. Defaults to latest\n"; + + +static b32 is_flag(char *s) { + if (!s) return 0; + return s[0] == '-'; +} + + +typedef enum CompileAction CompileAction; +enum CompileAction { + ONYX_COMPILE_ACTION_COMPILE, + ONYX_COMPILE_ACTION_CHECK, + ONYX_COMPILE_ACTION_RUN, + ONYX_COMPILE_ACTION_RUN_WASM, + ONYX_COMPILE_ACTION_WATCH, + ONYX_COMPILE_ACTION_WATCH_RUN, + ONYX_COMPILE_ACTION_DOCUMENT, + ONYX_COMPILE_ACTION_PACKAGE, + ONYX_COMPILE_ACTION_PRINT_HELP, + ONYX_COMPILE_ACTION_PRINT_VERSION, + ONYX_COMPILE_ACTION_SELF_UPGRADE, +}; + +typedef struct CLIArgs { + CompileAction action; + + u32 verbose_output : 2; + b32 fun_output : 1; + b32 print_function_mappings : 1; + b32 print_static_if_results : 1; + b32 debug_session : 1; + b32 no_colors : 1; + b32 show_all_errors : 1; + + i32 passthrough_argument_count; + char** passthrough_argument_data; + + const char* target_file; + const char* symbol_info_file; + const char* help_subcommand; + + char *error_format; + char *debug_socket; + char *core_installation; + char *upgrade_version; +} CLIArgs; + +#include "./error_printing.h" + +static void print_subcommand_help(const char *subcommand); + +static int32_t cli_args_init(CLIArgs *cli_args) { + memset(cli_args, 0, sizeof(* cli_args)); + + cli_args->target_file = "out.wasm"; + cli_args->error_format = "v2"; + + #if defined(_BH_LINUX) || defined(_BH_DARWIN) + cli_args->no_colors = 0; + cli_args->core_installation = getenv("ONYX_PATH"); + + if (getenv("ONYX_ERROR_FORMAT")) { + cli_args->error_format = getenv("ONYX_ERROR_FORMAT"); + } + #endif + + #ifdef _BH_WINDOWS + cli_args->no_colors = 1; + + bh_allocator alloc = bh_heap_allocator(); + char *tmp_core_installation = bh_alloc_array(alloc, u8, 512); + char *tmp_error_format = bh_alloc_array(alloc, u8, 512); + + if (GetEnvironmentVariableA("ONYX_PATH", tmp_core_installation, 512) > 0) { + cli_args->core_installation = tmp_core_installation; + } + if (GetEnvironmentVariableA("ONYX_ERROR_FORMAT", tmp_error_format, 512) > 0) { + cli_args->error_format = tmp_error_format; + } + #endif + + if (cli_args->core_installation == NULL) { + bh_printf(C_RED "error" C_NORM ": ONYX_PATH environment variable is not set. Please set this to the location of your Onyx installation.\n"); + return 0; + } + + return 1; +} + +static int32_t cli_determine_action(CLIArgs *cli_args, int *first_sub_arg, int argc, char *argv[]) { + if (argc == 1 || is_flag(argv[1])) { + cli_args->action = ONYX_COMPILE_ACTION_PRINT_HELP; + return 0; + } + + bh_allocator allocator = bh_heap_allocator(); + cli_args->help_subcommand = argc > 1 ? argv[1] : NULL; + + if (!strcmp(argv[1], "help")) { + cli_args->action = ONYX_COMPILE_ACTION_PRINT_HELP; + cli_args->help_subcommand = argc > 2 ? argv[2] : NULL; + *first_sub_arg = 1; + return 1; + } + + if (!strcmp(argv[1], "version")) { + cli_args->action = ONYX_COMPILE_ACTION_PRINT_VERSION; + *first_sub_arg = 1; + return 1; + } + + if (!strcmp(argv[1], "compile") || !strcmp(argv[1], "build") || !strcmp(argv[1], "b")) { + cli_args->action = ONYX_COMPILE_ACTION_COMPILE; + *first_sub_arg = 2; + return 1; + } + + if (!strcmp(argv[1], "check")) { + cli_args->action = ONYX_COMPILE_ACTION_CHECK; + *first_sub_arg = 2; + return 1; + } + + if (!strcmp(argv[1], "pkg") || !strcmp(argv[1], "package")) { + // Maybe we should consider caching the package WASM file so it doesn't need to be recompiled + // every time? Compilation is very fast, but it would be even snappier if the whole package + // manager didn't need to compile every time. + + cli_args->action = ONYX_COMPILE_ACTION_PACKAGE; + cli_args->passthrough_argument_count = argc - 2; + cli_args->passthrough_argument_data = &argv[2]; + *first_sub_arg = argc; + return 1; + } + + if (!strcmp(argv[1], "add") || !strcmp(argv[1], "a")) { + argv[1] = "add"; + + cli_args->action = ONYX_COMPILE_ACTION_PACKAGE; + cli_args->passthrough_argument_count = argc - 1; + cli_args->passthrough_argument_data = &argv[1]; + *first_sub_arg = argc; + return 1; + } + + if (!strcmp(argv[1], "remove") || !strcmp(argv[1], "rm")) { + argv[1] = "remove"; + + cli_args->action = ONYX_COMPILE_ACTION_PACKAGE; + cli_args->passthrough_argument_count = argc - 1; + cli_args->passthrough_argument_data = &argv[1]; + *first_sub_arg = argc; + return 1; + } + + if (!strcmp(argv[1], "sync") || !strcmp(argv[1], "init") || !strcmp(argv[1], "new")) { + cli_args->action = ONYX_COMPILE_ACTION_PACKAGE; + cli_args->passthrough_argument_count = argc - 1; + cli_args->passthrough_argument_data = &argv[1]; + *first_sub_arg = argc; + return 1; + } + + int32_t has_runtime = strcmp(onyx_version_runtime(), "none") != 0; + + if (has_runtime) { + if (!strcmp(argv[1], "run") || !strcmp(argv[1], "r")) { + cli_args->action = ONYX_COMPILE_ACTION_RUN; + *first_sub_arg = 2; + return 1; + } + } + + #if defined(_BH_LINUX) || defined(_BH_DARWIN) + if (!strcmp(argv[1], "watch")) { + cli_args->action = ONYX_COMPILE_ACTION_WATCH; + *first_sub_arg = 2; + return 1; + } + #endif + + #if (defined(_BH_LINUX) || defined(_BH_DARWIN)) + if (has_runtime) { + if (!strcmp(argv[1], "run-watch") || !strcmp(argv[1], "rw")) { + cli_args->action = ONYX_COMPILE_ACTION_WATCH_RUN; + *first_sub_arg = 2; + return 1; + } + } + #endif + + #if defined(_BH_LINUX) || defined(_BH_DARWIN) + if (!strcmp(argv[1], "self-upgrade")) { + cli_args->action = ONYX_COMPILE_ACTION_SELF_UPGRADE; + *first_sub_arg = 2; + return 1; + } + #endif + + + // First try `./.onyx` for the executable. + char *script_filename = bh_aprintf(allocator, "./.onyx/%s.wasm", argv[1]); + + // If that doesn't exist, then try the core installation. + if (!bh_file_exists(script_filename)) { + script_filename = bh_aprintf(allocator, "%s/tools/%s.wasm", cli_args->core_installation, argv[1]); + } + + if (bh_file_exists(script_filename)) { + cli_args->action = ONYX_COMPILE_ACTION_RUN_WASM; + cli_args->target_file = script_filename; + + cli_args->passthrough_argument_count = argc - 2; + cli_args->passthrough_argument_data = &argv[2]; + return 1; + } + + return 0; +} + +static int32_t cli_parse_compilation_options(CLIArgs *cli_args, onyx_context_t *ctx, int arg_parse_start, int argc, char **argv) { + b32 using_onyx_runtime = 1; + + fori(i, arg_parse_start, argc) { + arg_parse_start = i; + + if (!is_flag(argv[i])) { + // On the first non-flag argument, break to add the files. + break; + } + + if (!strcmp(argv[i], "-o") || !strcmp(argv[i], "--output")) { + cli_args->target_file = argv[++i]; // :InCli + } + else if (!strcmp(argv[i], "--verbose") || !strcmp(argv[i], "-V")) { + cli_args->verbose_output = 1; // :InCli + } + else if (!strcmp(argv[i], "--help")) { + print_subcommand_help(argv[1]); + return 0; + } + else if (!strcmp(argv[i], "-VV")) { + cli_args->verbose_output = 2; // :InCli + } + else if (!strcmp(argv[i], "-VVV")) { + cli_args->verbose_output = 3; // :InCli + } + else if (!strcmp(argv[i], "--print-function-mappings")) { + cli_args->print_function_mappings = 1; // :InCli + } + else if (!strcmp(argv[i], "--print-static-if-results")) { + cli_args->print_static_if_results = 1; // :InCli + } + else if (!strcmp(argv[i], "--no-colors")) { + cli_args->no_colors = 1; // :InCli + } + else if (!strcmp(argv[i], "--no-file-contents")) { + onyx_set_option_int(ctx, ONYX_OPTION_DISABLE_FILE_CONTENTS, 1); + } + else if (!strcmp(argv[i], "--no-compiler-extensions")) { + onyx_set_option_int(ctx, ONYX_OPTION_DISABLE_EXTENSIONS, 1); + } + else if (!strcmp(argv[i], "--wasm-mvp")) { + onyx_set_option_int(ctx, ONYX_OPTION_POST_MVP_FEATURES, 0); + } + else if (!strcmp(argv[i], "--multi-threaded")) { + onyx_set_option_int(ctx, ONYX_OPTION_MULTI_THREADING, 1); + } + else if (!strcmp(argv[i], "--generate-foreign-info")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_FOREIGN_INFO, 1); + } + else if (!strcmp(argv[i], "--generate-method-info")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_METHOD_INFO, 1); + } + else if (!strcmp(argv[i], "--generate-name-section")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_NAME_SECTION, 1); + } + else if (!strcmp(argv[i], "--no-type-info")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_TYPE_INFO, 1); + } + else if (!strcmp(argv[i], "--no-core")) { + onyx_set_option_int(ctx, ONYX_OPTION_DISABLE_CORE, 1); + } + else if (!strcmp(argv[i], "--no-stale-code")) { + onyx_set_option_int(ctx, ONYX_OPTION_DISABLE_STALE_CODE, 1); + } + else if (!strcmp(argv[i], "--show-all-errors")) { + cli_args->show_all_errors = 1; // :InCli + } + else if (!strcmp(argv[i], "--error-format")) { + cli_args->error_format = argv[++i]; // :InCli + } + else if (!strcmp(argv[i], "--feature")) { + char *next_arg = argv[++i]; + if (!strcmp(next_arg, "optional-semicolons")) { + onyx_set_option_int(ctx, ONYX_OPTION_OPTIONAL_SEMICOLONS, 1); + } + } + else if (!strcmp(argv[i], "--map-dir")) { + char *arg = argv[++i]; + int len = strnlen(arg, 256); + + char *name = arg; + char *folder = NULL; + fori (i, 0, len) if (arg[i] == ':') { + arg[i] = '\0'; + folder = &arg[i + 1]; + } + + onyx_add_mapped_dir(ctx, name, -1, folder, -1); + } + else if (!strncmp(argv[i], "-D", 2)) { + i32 len = strlen(argv[i]); + i32 j=2; + while (argv[i][j] != '=' && j < len) j++; + + char *key = argv[i] + 2; + int32_t key_len = j - 2; + char *value = argv[i] + j + 1; + int32_t value_len = len - j - 1; + + if (value_len <= 0) { + value = "true"; + value_len = 4; + } + + onyx_add_defined_var(ctx, key, key_len, value, value_len); + } + else if (!strcmp(argv[i], "-r") || !strcmp(argv[i], "--runtime")) { + using_onyx_runtime = 0; + i += 1; + if (!strcmp(argv[i], "onyx")) using_onyx_runtime = 1; + else if (!strcmp(argv[i], "wasi")) onyx_set_option_int(ctx, ONYX_OPTION_PLATFORM, ONYX_PLATFORM_WASI); + else if (!strcmp(argv[i], "js")) onyx_set_option_int(ctx, ONYX_OPTION_PLATFORM, ONYX_PLATFORM_JS); + else if (!strcmp(argv[i], "custom")) onyx_set_option_int(ctx, ONYX_OPTION_PLATFORM, ONYX_PLATFORM_CUSTOM); + else { + bh_printf(C_YELLOW "warning" C_NORM ": '%s' is not a valid runtime. Defaulting to 'onyx'.\n", argv[i]); + using_onyx_runtime = 1; + } + } + else if (!strcmp(argv[i], "--doc")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_DOC_INFO, 1); + } + else if (!strcmp(argv[i], "--syminfo")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_SYMBOL_INFO, 1); + cli_args->symbol_info_file = argv[++i]; + } + else if (!strcmp(argv[i], "--lspinfo")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_SYMBOL_INFO, 1); + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_LSP_INFO, 1); + cli_args->symbol_info_file = argv[++i]; + } + else if (!strcmp(argv[i], "--debug")) { + cli_args->debug_session = 1; + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_DEBUG_INFO, 1); + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_STACK_TRACE, 1); + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_NAME_SECTION, 1); + } + else if (!strcmp(argv[i], "--debug-socket")) { + cli_args->debug_session = 1; + cli_args->debug_socket = argv[++i]; // :InCli + } + else if (!strcmp(argv[i], "--debug-info")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_DEBUG_INFO, 1); + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_STACK_TRACE, 1); + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_NAME_SECTION, 1); + } + else if (!strcmp(argv[i], "--stack-trace")) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_STACK_TRACE, 1); + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_NAME_SECTION, 1); + } + else if (!strcmp(argv[i], "--perf")) { + onyx_set_option_int(ctx, ONYX_OPTION_COLLECT_PERF, 1); + } +#if defined(_BH_LINUX) || defined(_BH_DARWIN) + // NOTE: Fun output is only enabled for Linux because Windows command line + // is not ANSI compatible and for a silly feature, I don't want to learn + // how to properly do arbitrary graphics in it. + else if (!strcmp(argv[i], "--fun") || !strcmp(argv[i], "-F")) { + cli_args->fun_output = 1; // :InCli + } +#endif + else { + bh_printf(C_RED "error" C_NORM ": Unknown flag '%s'.\n", argv[i]); + print_subcommand_help(argv[1]); + return 0; + } + } + + int32_t at_least_one_file = 0; + + fori (i, arg_parse_start, argc) { + if (is_flag(argv[i])) { + if (!strcmp(argv[i], "--")) { + cli_args->passthrough_argument_count = argc - i - 1; + cli_args->passthrough_argument_data = &argv[i + 1]; + break; + } + + // FLAG AFTER SOURCE FILES + bh_printf(C_RED "error" C_NORM ": Flag provided after start of source files.\n"); + bh_printf(C_GREY " hint: Try moving the flag '%s' to be earlier in your command.\n" C_NORM, argv[i]); + return 0; + } + + if (bh_str_ends_with(argv[i], ".wasm") && cli_args->action == ONYX_COMPILE_ACTION_RUN) { + if (at_least_one_file) { + bh_printf(C_RED "error" C_NORM ": Expected only one '.wasm', or multiple '.onyx' files to be given.\n"); + return 0; + } + cli_args->action = ONYX_COMPILE_ACTION_RUN_WASM; + cli_args->target_file = argv[i]; + + cli_args->passthrough_argument_count = argc - i - 1; + cli_args->passthrough_argument_data = &argv[i + 1]; + + at_least_one_file = 1; + break; + } + + at_least_one_file = 1; + onyx_include_file(ctx, argv[i], -1); + } + + if (!at_least_one_file) { + bh_printf(C_RED "error" C_NORM ": No files were provided.\n"); + print_subcommand_help(argv[1]); + return 0; + } + + if (using_onyx_runtime) { + onyx_set_option_int(ctx, ONYX_OPTION_MULTI_THREADING, 1); + } + + return 1; +} + +static void print_subcommand_help(const char *subcommand) { + if (!strcmp(subcommand, "build") || !strcmp(subcommand, "b") + || !strcmp(subcommand, "check") || !strcmp(subcommand, "watch")) { + bh_printf(build_docstring, subcommand, ""); + return; + } + + if (!strcmp(subcommand, "run") || !strcmp(subcommand, "r") + || !strcmp(subcommand, "run-watch") || !strcmp(subcommand, "rw")) { + bh_printf(build_docstring, subcommand, "[-- program args]"); + bh_printf( + C_LBLUE " --debug-socket " C_GREY "addr " C_NORM "Specifies the address or port used for the debug server.\n" + ); + return; + } + + if (!strcmp(subcommand, "self-upgrade")) { + bh_printf(self_upgrade_docstring); + return; + } + + bh_printf(C_RED "error" C_NORM ": Unknown command: '%s'\n", subcommand); + bh_printf(C_GREY " hint: Run 'onyx --help' for valid commands.\n"); + exit(1); +} + +static char *get_description_for_subcommand(char *path) { + bh_file_contents contents = bh_file_read_contents(bh_heap_allocator(), path); + + if (contents.data == NULL) return NULL; + + u8 *d = contents.data; + char *out = NULL; + + i32 cursor = 8; // Skip magic bytes and version. + while (cursor < contents.length) { + char kind = d[cursor++]; + int section_length = uleb128_to_uint(d, &cursor); + + // If not a custom section, skip it. + if (kind != 0) { + cursor += section_length; + continue; + } + + int previous_cursor = cursor; + int name_length = uleb128_to_uint(d, &cursor); + + if (strncmp("onyx-command-description", (const char *) &d[cursor], name_length)) { + cursor = previous_cursor + section_length; + continue; + } + + cursor += name_length; + int description_length = section_length - (cursor - previous_cursor); + out = bh_alloc_array(bh_heap_allocator(), char, description_length + 1); + memcpy(out, &d[cursor], description_length); + out[description_length] = 0; + + break; + } + + bh_file_contents_free(&contents); + return out; +} + +static void print_commands_in_directory(char *dir) { + bh_dir d = bh_dir_open(dir); + + if (!d) return; + + bh_dirent ent; + while (bh_dir_read(d, &ent)) { + if (bh_str_ends_with(ent.name, ".wasm")) { + ent.name[ent.name_length - 5] = 0; // Remove the .wasm from the name + bh_printf(C_LBLUE " %s", ent.name); + + fori (i, 0, 21 - ent.name_length) bh_printf(" "); + + char *description = get_description_for_subcommand( + bh_aprintf(bh_heap_allocator(), "%s/%s.wasm", dir, ent.name) + ); + + if (!description) { + bh_printf(C_GREY " Description not provided\n"); + } else { + bh_printf(C_NORM " %s\n", description); + } + } + } + + bh_dir_close(d); +} + +static void print_top_level_docs(CLIArgs *cli_args) { + bh_printf(top_level_docstring); + + int32_t has_runtime = strcmp(onyx_version_runtime(), "none") != 0; + if (has_runtime) { + bh_printf(run_commands_docstring); + } + + bh_printf(C_BOLD "Global custom commands:\n" C_NORM); + print_commands_in_directory( + bh_aprintf(bh_heap_allocator(), "%s/tools", cli_args->core_installation) + ); + + bh_printf(C_NORM C_BOLD "\nLocal custom commands:\n" C_NORM); + print_commands_in_directory("./.onyx"); +} + +static int32_t output_file_to_disk(CLIArgs *cli_args, onyx_context_t *ctx, const char *filename, onyx_output_type_t type) { + int64_t output_length = onyx_output_length(ctx, type); + if (output_length > 0) { + void *output = malloc(output_length); + assert(output); + memset(output, 0, output_length); + + onyx_output_write(ctx, type, output); + + bh_file out_file; + if (bh_file_create(&out_file, filename) != BH_FILE_ERROR_NONE) { + bh_printf(C_RED "error" C_NORM ": Failed to open file for writing '%s'\n", filename); + return 0; + } + + bh_file_write(&out_file, output, output_length); + bh_file_close(&out_file); + + free(output); + } + + return 1; +} + +static int32_t output_files_to_disk(CLIArgs *cli_args, onyx_context_t *ctx, const char *filename) { + if (!output_file_to_disk(cli_args, ctx, filename, ONYX_OUTPUT_TYPE_WASM)) return 0; + if (!output_file_to_disk(cli_args, ctx, bh_bprintf("%s.js", filename), ONYX_OUTPUT_TYPE_JS)) return 0; + if (!output_file_to_disk(cli_args, ctx, bh_bprintf("%s.odoc", filename), ONYX_OUTPUT_TYPE_ODOC)) return 0; + + if (cli_args->symbol_info_file) { + if (!output_file_to_disk(cli_args, ctx, cli_args->symbol_info_file, ONYX_OUTPUT_TYPE_OSYM)) return 0; + } + + return 1; +} + +#if defined(_BH_LINUX) || defined(_BH_DARWIN) +#include + +static void perform_self_upgrade(CLIArgs *cli_args, char *version) { + int curl_pid; + bh_file upgrade_script; + + char file_path[512]; + bh_snprintf(file_path, 511, "%s/upgrade.sh", cli_args->core_installation); + + switch (curl_pid = fork()) { + case -1: exit(1); + case 0: + if (bh_file_create(&upgrade_script, file_path)) { + exit(1); + } + + dup2(upgrade_script.fd, STDOUT_FILENO); + bh_file_close(&upgrade_script); + + execlp("curl", "curl", "https://get.onyxlang.io", "-sSfL", NULL); + exit(1); + break; + } + + int status; + waitpid(curl_pid, &status, 0); + + if (status == 0) { + execlp("sh", "sh", file_path, version, onyx_version_runtime(), NULL); + } + + printf("error: Failed to download upgrade script.\n"); + printf(" hint: Ensure you have an active internet connection and 'curl' installed.\n"); +} +#endif + +#if defined(_BH_LINUX) || defined(_BH_DARWIN) +#include +#include + +static bh_file_watch watches; +static i32 watch_run_pid = -1; + +static void onyx_watch_stop(int sig) { + bh_file_watch_stop(&watches); +} + +static void onyx_watch_run_executable(const char *target) { + watch_run_pid = fork(); + switch (watch_run_pid) { + case -1: bh_printf("error: fork() failed\n"); break; + case 0: + setpgid(0, getpid()); + close(STDIN_FILENO); + open("/dev/null", O_RDONLY); + execlp("onyx", "onyx", "run", target, NULL); + exit(1); + break; + default: + break; + } +} + +static void onyx_watch(CLIArgs *cli_args, int arg_parse_start, int argc, char **argv) { + signal(SIGINT, onyx_watch_stop); + + b32 run_the_program = cli_args->action == ONYX_COMPILE_ACTION_WATCH_RUN; + + while (1) { + bh_printf("\e[2J\e[?25l\n"); + bh_printf("\e[3;1H"); + + onyx_context_t *ctx = onyx_context_create(); + onyx_add_mapped_dir(ctx, "core", -1, bh_bprintf("%s/core", cli_args->core_installation), -1); + onyx_set_option_int(ctx, ONYX_OPTION_PLATFORM, ONYX_PLATFORM_ONYX); + + if (!cli_parse_compilation_options(cli_args, ctx, arg_parse_start, argc, argv)) { + return; + } + + onyx_options_ready(ctx); + while (onyx_pump(ctx) == ONYX_PUMP_CONTINUE) { + // doing the compilation + } + + i32 error_count = onyx_error_count(ctx); + if (error_count == 0) { + output_files_to_disk(cli_args, ctx, cli_args->target_file); + + bh_printf("\e[92mNo errors!\n"); + } else { + onyx_errors_print(ctx, cli_args->error_format, !cli_args->no_colors, cli_args->show_all_errors); + } + + char time_buf[128] = {0}; + time_t now = time(NULL); + strftime(time_buf, 128, "%X", localtime(&now)); + bh_printf("\e[1;1H\e[30;105m Onyx %d.%d.%d%s \e[30;104m Built %s \e[0m", + onyx_version_major(), + onyx_version_minor(), + onyx_version_patch(), + onyx_version_suffix(), + time_buf + ); + + if (error_count == 0) { + bh_printf("\e[30;102m Errors 0 \e[0m"); + } else { + bh_printf("\e[30;101m Error%s %d \e[0m", bh_num_plural(error_count), error_count); + } + + if (run_the_program && error_count == 0) { + bh_printf("\n\n\nRunning your program...\n"); + onyx_watch_run_executable(cli_args->target_file); + } + + watches = bh_file_watch_new(); + + fori (i, 0, onyx_stat(ctx, ONYX_STAT_FILE_COUNT)) { + bh_file_watch_add(&watches, onyx_stat_filepath(ctx, i)); + } + + onyx_context_free(ctx); + + b32 wait_successful = bh_file_watch_wait(&watches); + + if (run_the_program && watch_run_pid > 0) { + int status; + killpg(watch_run_pid, SIGTERM); + waitpid(watch_run_pid, &status, 0); + watch_run_pid = -1; + } + + bh_file_watch_free(&watches); + + if (!wait_successful) { + break; + } + } + + bh_printf("\e[2J\e[1;1H\e[?25h\n"); +} +#endif + +int main(int argc, char *argv[]) { + CLIArgs cli_args; + if (!cli_args_init(&cli_args)) { + return 1; + } + + int32_t arg_parse_start = 0; + int32_t action_was_determined = cli_determine_action(&cli_args, &arg_parse_start, argc, argv); + + if (!action_was_determined || cli_args.action == ONYX_COMPILE_ACTION_PRINT_HELP) { + if (cli_args.help_subcommand) { + print_subcommand_help(cli_args.help_subcommand); + } else { + print_top_level_docs(&cli_args); + } + + return !action_was_determined; + } + + if (cli_args.action == ONYX_COMPILE_ACTION_PRINT_VERSION) { + bh_printf("Onyx tool-chain version %d.%d.%d%s\n", + onyx_version_major(), + onyx_version_minor(), + onyx_version_patch(), + onyx_version_suffix() + ); + bh_printf("Runtime: %s\n", onyx_version_runtime()); + bh_printf("Built: %s\n", onyx_version_build_time()); + + return 0; + } + + #if defined(_BH_LINUX) || defined(_BH_DARWIN) + if (cli_args.action == ONYX_COMPILE_ACTION_SELF_UPGRADE) { + if (arg_parse_start < argc && !is_flag(argv[arg_parse_start])) { + perform_self_upgrade(&cli_args, argv[arg_parse_start]); + } + + if (arg_parse_start < argc && !strcmp(argv[arg_parse_start], "--help")) { + print_subcommand_help(argv[1]); + } + + return 1; + } + + if (cli_args.action == ONYX_COMPILE_ACTION_WATCH || cli_args.action == ONYX_COMPILE_ACTION_WATCH_RUN) { + onyx_watch(&cli_args, arg_parse_start, argc, argv); + return 0; + } + #endif + + onyx_context_t *ctx = onyx_context_create(); + onyx_add_mapped_dir(ctx, "core", -1, bh_bprintf("%s/core", cli_args.core_installation), -1); + onyx_set_option_int(ctx, ONYX_OPTION_PLATFORM, ONYX_PLATFORM_ONYX); + + if (cli_args.action == ONYX_COMPILE_ACTION_PACKAGE) { + onyx_set_option_int(ctx, ONYX_OPTION_GENERATE_METHOD_INFO, 1); + onyx_set_option_int(ctx, ONYX_OPTION_MULTI_THREADING, 1); + onyx_include_file(ctx, bh_aprintf(bh_heap_allocator(), "%s/tools/onyx-pkg.onyx", cli_args.core_installation), -1); + + } else { + if (!cli_parse_compilation_options(&cli_args, ctx, arg_parse_start, argc, argv)) { + return 1; + } + } + + if (cli_args.action == ONYX_COMPILE_ACTION_RUN_WASM) { + bh_file_contents wasm_content = bh_file_read_contents(bh_heap_allocator(), cli_args.target_file); + if (wasm_content.length <= 0) { + bh_printf(C_RED "error" C_NORM ": Failed to read '%s'\n", cli_args.target_file); + return 1; + } + + if (cli_args.debug_session) { + onyx_run_wasm_with_debug(wasm_content.data, wasm_content.length, cli_args.passthrough_argument_count, cli_args.passthrough_argument_data, cli_args.debug_socket); + } else { + onyx_run_wasm(wasm_content.data, wasm_content.length, cli_args.passthrough_argument_count, cli_args.passthrough_argument_data); + } + + return 0; + } + + u64 start_time = bh_time_curr(); + + onyx_options_ready(ctx); + while (onyx_pump(ctx) == ONYX_PUMP_CONTINUE) { + fori (i, 0, onyx_event_count(ctx)) { + switch (onyx_event_type(ctx, i)) { + case ONYX_EVENT_LOG: + if (cli_args.verbose_output > 0) { + bh_printf("%s %s\n", + cli_args.no_colors ? "INFO " : "\x1b[94mINFO\x1b[0m ", + onyx_event_field_str(ctx, i, "message") + ); + } + break; + + case ONYX_EVENT_ALL_TYPES_CHECKED: + break; + + case ONYX_EVENT_PHASE_START: + break; + + case ONYX_EVENT_SYMBOL_DEFINED: + // bh_printf("DEFINED SYMBOL AT %s:%d,%d\n", + // onyx_event_field_str(ctx, i, "filename"), + // onyx_event_field_int(ctx, i, "line"), + // onyx_event_field_int(ctx, i, "column") + // ); + break; + + case ONYX_EVENT_UNKNOWN: + break; + } + } + } + + u64 duration = bh_time_duration(start_time); + + onyx_errors_print(ctx, cli_args.error_format, !cli_args.no_colors, cli_args.show_all_errors); + if (onyx_errors_present(ctx)) { + return 1; + } + + if (cli_args.verbose_output > 0) { + int tokens = onyx_stat(ctx, ONYX_STAT_TOKEN_COUNT); + int lines = onyx_stat(ctx, ONYX_STAT_LINE_COUNT); + + float tokens_per_sec = (1000.0f * tokens) / duration; + float lines_per_sec = (1000.0f * lines) / duration; + + printf("\nStatistics:\n"); + printf(" Time taken: %lf ms\n", (double) duration); + printf(" Processed %d lines (%f lines/second).\n", lines, lines_per_sec); + printf(" Processed %d tokens (%f tokens/second).\n", tokens, tokens_per_sec); + printf("\n"); + } + + switch (cli_args.action) { + case ONYX_COMPILE_ACTION_RUN: + case ONYX_COMPILE_ACTION_PACKAGE: { + int64_t output_length = onyx_output_length(ctx, ONYX_OUTPUT_TYPE_WASM); + void *output = malloc(output_length); + onyx_output_write(ctx, ONYX_OUTPUT_TYPE_WASM, output); + onyx_context_free(ctx); + + if (cli_args.debug_session) { + onyx_run_wasm_with_debug(output, output_length, cli_args.passthrough_argument_count, cli_args.passthrough_argument_data, cli_args.debug_socket); + } else { + onyx_run_wasm(output, output_length, cli_args.passthrough_argument_count, cli_args.passthrough_argument_data); + } + + free(output); + return 0; + } + + case ONYX_COMPILE_ACTION_CHECK: { + if (cli_args.symbol_info_file) { + if (!output_file_to_disk(&cli_args, ctx, cli_args.symbol_info_file, ONYX_OUTPUT_TYPE_OSYM)) { + return 1; + } + } + break; + } + + case ONYX_COMPILE_ACTION_COMPILE: { + if (!output_files_to_disk(&cli_args, ctx, cli_args.target_file)) { + return 1; + } + + break; + } + + default: + break; + } + + onyx_context_free(ctx); + return 0; +} diff --git a/compiler/include/astnodes.h b/compiler/include/astnodes.h index 259540998..85287831e 100644 --- a/compiler/include/astnodes.h +++ b/compiler/include/astnodes.h @@ -3,7 +3,8 @@ #define VERSION_MAJOR 0 #define VERSION_MINOR 1 -#define VERSION_PATCH 9 +#define VERSION_PATCH 14 +#define VERSION_SUFFIX "-preview" #include "stb_ds.h" #include "lex.h" @@ -48,7 +49,7 @@ NODE(DirectiveRemove) \ NODE(DirectiveFirst) \ NODE(DirectiveExportName) \ - NODE(DirectiveThisPackage) \ + NODE(DirectiveWasmSection) \ \ NODE(Return) \ NODE(Jump) \ @@ -118,7 +119,13 @@ NODE(Package) \ NODE(Import) \ \ - NODE(ZeroValue) + NODE(ZeroValue) \ + \ + NODE(JsNode) \ + \ + NODE(CompilerExtension) \ + NODE(ProceduralMacro) \ + NODE(ProceduralExpansion) #define NODE(name) typedef struct Ast ## name Ast ## name; AST_NODES @@ -237,6 +244,7 @@ typedef enum AstKind { Ast_Kind_Directive_First, Ast_Kind_Directive_Export_Name, Ast_Kind_Directive_This_Package, + Ast_Kind_Directive_Wasm_Section, Ast_Kind_Call_Site, Ast_Kind_Code_Block, @@ -251,6 +259,12 @@ typedef enum AstKind { Ast_Kind_Zero_Value, + Ast_Kind_Js_Code, + + Ast_Kind_Compiler_Extension, + Ast_Kind_Procedural_Macro, + Ast_Kind_Procedural_Expansion, + Ast_Kind_Count } AstKind; @@ -263,13 +277,13 @@ typedef enum AstFlags { Ast_Flag_Private_Package = BH_BIT(3), Ast_Flag_Private_File = BH_BIT(4), + Ast_Flag_Block_Returns = BH_BIT(5), + // Expression flags Ast_Flag_Expr_Ignored = BH_BIT(6), Ast_Flag_Address_Taken = BH_BIT(7), // Type flags - Ast_Flag_Type_Is_Resolved = BH_BIT(8), - Ast_Flag_No_Clone = BH_BIT(9), Ast_Flag_Cannot_Take_Addr = BH_BIT(10), @@ -311,7 +325,7 @@ typedef enum AstFlags { Ast_Flag_Constraint_Is_Expression = BH_BIT(28), - Ast_Flag_Has_Been_Scheduled_For_Emit = BH_BIT(29) + Ast_Flag_Has_Been_Scheduled_For_Emit = BH_BIT(29), } AstFlags; typedef enum UnaryOp { @@ -321,6 +335,7 @@ typedef enum UnaryOp { Unary_Op_Cast, Unary_Op_Auto_Cast, Unary_Op_Try, + Unary_Op_Unwrap, Unary_Op_Count, } UnaryOp; @@ -366,13 +381,14 @@ typedef enum BinaryOp { Binary_Op_Pipe = 33, Binary_Op_Range = 34, - Binary_Op_Method_Call = 35, + Binary_Op_Range_Equal = 35, + Binary_Op_Method_Call = 36, - Binary_Op_Subscript = 36, - Binary_Op_Subscript_Equals = 37, - Binary_Op_Ptr_Subscript = 38, + Binary_Op_Subscript = 37, + Binary_Op_Subscript_Equals = 38, + Binary_Op_Ptr_Subscript = 39, - Binary_Op_Coalesce = 39, + Binary_Op_Coalesce = 40, Binary_Op_Count } BinaryOp; @@ -613,6 +629,11 @@ typedef struct ForeignReference { AstTyped *import_name; } ForeignReference; +typedef struct ValueWithOffset { + AstTyped *value; + u32 offset; +} ValueWithOffset; + // Base Nodes #define AstNode_base \ @@ -643,7 +664,7 @@ struct AstTyped { AstTyped_base; }; // Expression Nodes struct AstNamedValue { AstTyped_base; AstTyped* value; }; struct AstStrLit { AstTyped_base; u64 data_id; u64 length; b32 is_cstr: 1; }; -struct AstLocal { AstTyped_base; }; +struct AstLocal { AstTyped_base; b32 auto_dispose : 1; }; struct AstDereference { AstTyped_base; AstTyped *expr; }; struct AstSizeOf { AstTyped_base; AstType *so_ast_type; Type *so_type; u64 size; }; struct AstAlignOf { AstTyped_base; AstType *ao_ast_type; Type *ao_type; u64 alignment; }; @@ -742,6 +763,11 @@ struct AstStructLiteral { Arguments args; Type *generated_inferred_type; + + // Value used when you do .{ ..value, val = 123 } + AstTyped *extension_value; + + bh_arr(ValueWithOffset) values_to_initialize; }; struct AstArrayLiteral { AstTyped_base; @@ -757,7 +783,10 @@ struct AstRangeLiteral { // the first sizeof(AstBinaryOp) bytes of this structure must match that of // AstBinaryOp, which means I need this dummy field here. // - brendanfh 2020/12/23 - BinaryOp __unused_operation; + union { + BinaryOp __unused_operation; + b32 inclusive: 1; + }; AstTyped *low, *high; // Currently, there is no way to specify this in the grammar, but it is set @@ -770,6 +799,7 @@ struct AstCall { AstTyped_base; Arguments args; + i32 placeholder_argument_position; union { AstTyped *callee; @@ -795,6 +825,7 @@ struct AstDoBlock { AstTyped_base; AstBlock* block; + bh_arr(AstLocal *) named_return_locals; }; struct AstZeroValue { AstTyped_base; @@ -810,7 +841,7 @@ struct AstDirectiveSolidify { }; // Intruction Node -struct AstReturn { AstNode_base; AstTyped* expr; u32 count; }; // Note: This count is one less than it should be, because internal codegen with macros would have to know about this and that is error prone. +struct AstReturn { AstNode_base; AstTyped* expr; u32 count; b32 from_proc: 1; }; // Note: This count is one less than it should be, because internal codegen with macros would have to know about this and that is error prone. struct AstJump { AstNode_base; JumpType jump; u32 count; }; // Structure Nodes @@ -825,6 +856,8 @@ struct AstBlock { BlockRule rules; u32 statement_idx; + + OnyxToken *macro_generated_from; }; struct AstDefer { AstNode_base; AstNode *stmt; }; struct AstFor { @@ -1083,6 +1116,8 @@ struct AstUnionType { struct AstUnionVariant { AstTyped_base; bh_arr(AstTyped *) meta_tags; + + AstTyped *explicit_tag_value; }; struct AstPolyUnionType { AstType_base; @@ -1132,16 +1167,24 @@ struct AstDistinctType { }; // Top level nodes -struct AstBinding { AstTyped_base; AstNode* node; OnyxToken *documentation; }; struct AstAlias { AstTyped_base; AstTyped* alias; }; struct AstInclude { AstNode_base; AstTyped* name_node; char* name; b32 recursive: 1; }; +struct AstBinding { + AstTyped_base; + AstNode* node; + + // Used for the old '#doc' scheme + OnyxToken *documentation_token_old; + + // Used for the new `///` scheme + const char *documentation_string; +}; struct AstInjection { AstTyped_base; AstTyped* full_loc; - AstTyped* to_inject; AstTyped* dest; OnyxToken *symbol; - OnyxToken *documentation; + AstBinding *binding; }; struct AstMemRes { AstTyped_base; @@ -1385,10 +1428,12 @@ struct AstFunction { bh_arr(AstParam) params; AstType* return_type; + bh_arr(AstLocal *) named_return_locals; AstBlock *body; char* name; + char* assembly_name; // NOTE: This is NULL, unless this function was generated from a polymorphic // procedure call. Then it is set to the token of the call node. @@ -1440,6 +1485,9 @@ struct AstFunction { b32 is_foreign : 1; b32 is_foreign_dyncall : 1; b32 is_intrinsic : 1; + + b32 named_return_locals_added : 1; + b32 ready_for_body_to_be_checked : 1; }; struct AstCaptureBlock { @@ -1472,7 +1520,6 @@ struct AstPolyQuery { AstFunction *function_header; b32 error_on_fail : 1; // Whether or not to report errors on failing to match. - b32 successful_symres : 1; // If something successful happened in symbol resolution }; @@ -1535,6 +1582,18 @@ struct AstDirectiveExportName { b32 created_export_entity : 1; }; +struct AstDirectiveWasmSection { + AstNode_base; + AstTyped *section_name; + AstTyped *section_contents; + + char *name; + char *contents; + u32 length; + + b32 from_file : 1; +}; + struct AstCallSite { AstTyped_base; @@ -1599,8 +1658,49 @@ struct AstForeignBlock { b32 uses_dyncall : 1; }; +struct AstJsNode { + AstNode_base; + + u32 order; + AstTyped *order_expr; + + AstTyped *code; + AstTyped *filepath; +}; + + +struct AstCompilerExtension { + AstNode_base; + + OnyxToken *name; + bh_arr(AstProceduralMacro *) proc_macros; + + i32 extension_id; +}; + +struct AstProceduralMacro { + AstNode_base; + + // name is stored in the `token` + + AstCompilerExtension *extension; +}; + +struct AstProceduralExpansion { + AstTyped_base; + + AstTyped *proc_macro; + OnyxToken *expansion_body; + + u32 expansion_id; +}; + +// Need to forward declare this for later on. +typedef struct Context Context; + + typedef struct EntityJobData { - enum TypeMatch (*func)(void *job_data); + enum TypeMatch (*func)(Context *context, void *job_data); void *job_data; } EntityJobData; @@ -1635,6 +1735,8 @@ typedef enum EntityType { Entity_Type_Static_If, Entity_Type_String_Literal, Entity_Type_File_Contents, + Entity_Type_Compiler_Extension, + Entity_Type_Procedural_Expansion, Entity_Type_Enum, Entity_Type_Enum_Value, Entity_Type_Type_Alias, @@ -1644,8 +1746,8 @@ typedef enum EntityType { Entity_Type_Constraint_Check, Entity_Type_Polymorphic_Proc, Entity_Type_Polymorph_Query, - Entity_Type_Macro, Entity_Type_Foreign_Block, + Entity_Type_Macro, Entity_Type_Foreign_Function_Header, Entity_Type_Temp_Function_Header, // Same as a Function_Header, except it disappears after it checks completely. Entity_Type_Function_Header, @@ -1658,6 +1760,7 @@ typedef enum EntityType { Entity_Type_Global, Entity_Type_Overloaded_Function, Entity_Type_Function, + Entity_Type_JS, Entity_Type_Count, } EntityType; @@ -1706,11 +1809,16 @@ typedef struct Entity { AstConstraint *constraint; AstDirectiveLibrary *library; EntityJobData *job_data; + AstJsNode *js; + AstCompilerExtension *compiler_extension; + AstProceduralExpansion *proc_expansion; }; } Entity; typedef struct EntityHeap { bh_arena entity_arena; + bh_allocator allocator; + bh_arr(Entity *) entities; bh_arr(Entity *) quick_unsorted_entities; i32 next_id; @@ -1721,7 +1829,7 @@ typedef struct EntityHeap { i32 all_count[Entity_State_Count][Entity_Type_Count]; } EntityHeap; -void entity_heap_init(EntityHeap* entities); +void entity_heap_init(bh_allocator a, EntityHeap* entities); void entity_heap_insert_existing(EntityHeap* entities, Entity* e); Entity* entity_heap_insert(EntityHeap* entities, Entity e); Entity* entity_heap_top(EntityHeap* entities); @@ -1729,14 +1837,13 @@ void entity_heap_change_top(EntityHeap* entities, Entity* new_top); void entity_heap_remove_top(EntityHeap* entities); void entity_change_type(EntityHeap* entities, Entity *ent, EntityType new_type); void entity_change_state(EntityHeap* entities, Entity *ent, EntityState new_state); -void entity_heap_add_job(EntityHeap *entities, enum TypeMatch (*func)(void *), void *job_data); +void entity_heap_add_job(EntityHeap *entities, enum TypeMatch (*func)(Context *, void *), void *job_data); // If target_arr is null, the entities will be placed directly in the heap. -void add_entities_for_node(bh_arr(Entity *)* target_arr, AstNode* node, Scope* scope, Package* package); +void add_entities_for_node(EntityHeap *entities, bh_arr(Entity *)* target_arr, AstNode* node, Scope* scope, Package* package); -void symres_entity(Entity* ent); -void check_entity(Entity* ent); -void emit_entity(Entity* ent); +void check_entity(Context *context, Entity* ent); +void emit_entity(Context *context, Entity* ent); struct Package { char *name; @@ -1757,7 +1864,9 @@ struct Package { bh_arr(Entity *) use_package_entities; // NOTE: This tracks all #package_doc statements used for this package. - bh_arr(OnyxToken *) doc_strings; + bh_arr(OnyxToken *) doc_string_tokens; + + bh_arr(const char *) doc_strings; // NOTE: These are entities that are stored in packages marked with `#allow_stale_code`. // These entities are flushed to the entity heap when the package has been explicit used @@ -1773,9 +1882,11 @@ enum CompileAction { ONYX_COMPILE_ACTION_RUN, ONYX_COMPILE_ACTION_RUN_WASM, ONYX_COMPILE_ACTION_WATCH, + ONYX_COMPILE_ACTION_WATCH_RUN, ONYX_COMPILE_ACTION_DOCUMENT, ONYX_COMPILE_ACTION_PRINT_HELP, - ONYX_COMPILE_ACTION_PRINT_VERSION + ONYX_COMPILE_ACTION_PRINT_VERSION, + ONYX_COMPILE_ACTION_SELF_UPGRADE, }; @@ -1801,6 +1912,11 @@ typedef struct OnyxDocInfo { u32 next_file_id; } OnyxDocInfo; +typedef enum CheckerMode { + CM_Dont_Resolve_Symbols = BH_BIT(1), + CM_Dont_Check_Case_Bodies = BH_BIT(2), + CM_Allow_Init_Expressions = BH_BIT(3), +} CheckerMode; typedef struct CheckerData { b32 expression_types_must_be_known; @@ -1811,8 +1927,37 @@ typedef struct CheckerData { AstCall __op_maybe_overloaded; Entity *current_entity; bh_arr(Type **) expected_return_type_stack; + bh_arr(bh_arr(AstLocal *)) named_return_values_stack; + + u32 current_checking_level; + CheckerMode mode; + + Scope *current_scope; + bh_arr(Scope *) scope_stack; + + b32 resolved_a_symbol; } CheckerData; +typedef struct ClonerData { + u32 clone_depth; + b32 dont_copy_structs; + + bh_arr(AstNode *) captured_entities; +} ClonerData; + +typedef struct PolymorphData { + // This flag is used by some of the procedures that try working with polymorphic things, + // but need to wait until more information is known. Instead of passing a out parameter + // into each of these procedures, a single global variable is used instead. If the type + // checker ever gets multi-threaded, this would have to become a threadlocal variable. + b32 flag_to_yield; + + // This flag is used in the very special case that you are passing a polymorphic procedure + // to a polymorphic procedure, and you have enough information to instantiate said procedure + // in order to resolve the type of one of the return values. + b32 doing_nested_polymorph_lookup; +} PolymorphData; + typedef struct ContextCaches { bh_imap implicit_cast_to_bool_cache; } ContextCaches; @@ -1823,30 +1968,62 @@ typedef struct DefinedVariable { } DefinedVariable; +typedef enum ProceduralMacroExpansionKind { + PMEK_Expression, + PMEK_Statement, + PMEK_Top_Level +} ProceduralMacroExpansionKind; + +typedef enum CompilerExtensionState { + COMP_EXT_STATE_SPAWNING, + COMP_EXT_STATE_INITIATING, + COMP_EXT_STATE_READY, + COMP_EXT_STATE_EXPANDING, + COMP_EXT_STATE_HANDLING_HOOK, +} CompilerExtensionState; + +typedef struct CompilerExtension { + u32 id; + + u64 pid; + u64 send_file; + u64 recv_file; + + char *name; + + i32 current_expansion_id; + CompilerExtensionState state; + + Entity *entity; + + bh_arena arena; + + b32 alive : 1; + + b32 supports_stalled_hook : 1; +} CompilerExtension; + typedef struct CompileOptions CompileOptions; struct CompileOptions { bh_allocator allocator; - CompileAction action; - u32 verbose_output : 2; - b32 fun_output : 1; - b32 print_function_mappings : 1; b32 print_static_if_results : 1; - b32 no_colors : 1; b32 no_file_contents : 1; + b32 no_compiler_extensions : 1; b32 use_post_mvp_features : 1; b32 use_multi_threading : 1; b32 generate_foreign_info : 1; b32 generate_type_info : 1; b32 generate_method_info : 1; + b32 generate_name_section : 1; + b32 generate_odoc : 1; b32 no_core : 1; b32 no_stale_code : 1; b32 show_all_errors : 1; b32 enable_optional_semicolons : 1; - b32 generate_tag_file : 1; b32 generate_symbol_info_file : 1; b32 generate_lsp_info_file : 1; @@ -1854,25 +2031,148 @@ struct CompileOptions { Runtime runtime; - bh_arr(const char *) included_folders; - bh_arr(const char *) files; - const char* target_file; - const char* documentation_file; + bh_arr(bh_mapped_folder) mapped_folders; const char* symbol_info_file; - const char* help_subcommand; - bh_arr(DefinedVariable) defined_variables; - - char* error_format; b32 debug_session; b32 debug_info_enabled; b32 stack_trace_enabled; +}; - i32 passthrough_argument_count; - char** passthrough_argument_data; +typedef struct CompilerBasicTypes CompilerBasicTypes; +struct CompilerBasicTypes { + AstBasicType type_void; + AstBasicType type_bool; + AstBasicType type_i8; + AstBasicType type_u8; + AstBasicType type_i16; + AstBasicType type_u16; + AstBasicType type_i32; + AstBasicType type_u32; + AstBasicType type_i64; + AstBasicType type_u64; + AstBasicType type_f32; + AstBasicType type_f64; + AstBasicType type_rawptr; + AstBasicType type_type_expr; // :TypeExprHack + + AstBasicType type_int_unsized; + AstBasicType type_float_unsized; + + AstBasicType type_i8x16; + AstBasicType type_i16x8; + AstBasicType type_i32x4; + AstBasicType type_i64x2; + AstBasicType type_f32x4; + AstBasicType type_f64x2; + AstBasicType type_v128; + + AstBasicType type_auto_return; +}; + +typedef struct CompilerBuiltins CompilerBuiltins; +struct CompilerBuiltins { + AstGlobal heap_start; + AstGlobal stack_top; + AstGlobal tls_base; + AstGlobal tls_size; + AstGlobal closure_base; + AstGlobal stack_trace; + AstType *string_type; + AstType *cstring_type; + AstType *range_type; + Type *range_type_type; + AstType *range64_type; + Type *range64_type_type; + AstType *vararg_type; + Type *vararg_type_type; + AstTyped *context_variable; + AstType *allocator_type; + AstType *iterator_type; + AstType *optional_type; + AstType *callsite_type; + AstType *any_type; + AstType *code_type; + AstType *link_options_type; + AstType *package_id_type; + AstType *stack_trace_type; + AstType *slice_type; + AstType *array_type; + AstTyped *type_table_node; + AstTyped *foreign_blocks_node; + AstType *foreign_block_type; + AstTyped *tagged_procedures_node; + AstTyped *tagged_globals_node; + AstFunction *initialize_data_segments; + AstFunction *run_init_procedures; + AstFunction *closure_block_allocate; + bh_arr(AstFunction *) init_procedures; + AstOverloadedFunction *implicit_bool_cast; + AstOverloadedFunction *dispose_used_local; +}; + +typedef struct TypeStore TypeStore; +struct TypeStore { + bh_imap type_map; + + bh_imap pointer_map; + bh_imap multi_pointer_map; + bh_imap array_map; + bh_imap slice_map; + bh_imap dynarr_map; + bh_imap vararg_map; + Table(u64) func_map; + + Type* basic[Basic_Kind_Count]; + Type* auto_return; +}; + +typedef struct CompilerStats CompilerStats; +struct CompilerStats { + u64 lexer_lines_processed; + u64 lexer_tokens_processed; + + u64 microseconds_per_state[Entity_State_Count]; + u64 microseconds_per_type[Entity_Type_Count]; }; -typedef struct Context Context; +typedef struct SpecialGlobalEntities SpecialGlobalEntities; +struct SpecialGlobalEntities { + u32 remaining; + Entity *runtime_info_types_entity; + Entity *runtime_info_foreign_entity; + Entity *runtime_info_proc_tags_entity; + Entity *runtime_info_global_tags_entity; + Entity *runtime_info_stack_trace_entity; +}; + +typedef struct CompilerEventField { + struct CompilerEventField *next; + char *field; + u32 type; + union { + char *s; + i32 i; + }; +} CompilerEventField; + +typedef struct CompilerEvent { + struct CompilerEvent *next; + + u32 type; + CompilerEventField *first_field; +} CompilerEvent; + +typedef struct EventSystem { + bh_arena event_arena; + bh_allocator event_alloc; + + i32 event_count; + + CompilerEvent *first; + CompilerEvent *last; +} EventSystem; + struct Context { Table(Package *) packages; EntityHeap entities; @@ -1884,20 +2184,53 @@ struct Context { bh_arena ast_arena; bh_allocator token_alloc, ast_alloc; + bh_scratch scratch; + bh_allocator scratch_alloc; + + bh_managed_heap heap; + bh_allocator gp_alloc; // General purpose allocator + bh_arr(bh_file_contents) loaded_files; + bh_arr(DefinedVariable) defined_variables; - // NOTE: This is defined in onyxwasm.h + // NOTE: This is defined in wasm_emit.h struct OnyxWasmModule* wasm_module; - - // NOTE: All definitions (bindings, injections, aliases) are - // present in this list when generating CTags. - bh_arr(AstNode *) tag_locations; + bh_buffer generated_wasm_buffer; + bh_buffer generated_js_buffer; + bh_buffer generated_odoc_buffer; + bh_buffer generated_osym_buffer; struct SymbolInfoTable *symbol_info; struct OnyxDocInfo *doc_info; - CheckerData checker; + bh_arr(CompilerExtension) extensions; + u32 next_expansion_id; + + CompilerBuiltins builtins; + CompilerBasicTypes basic_types; + TypeStore types; + + CheckerData checker; + ClonerData cloner; + PolymorphData polymorph; ContextCaches caches; + + + // TODO: Move these + bh_arr(OverloadOption) operator_overloads[Binary_Op_Count]; + bh_arr(OverloadOption) unary_operator_overloads[Unary_Op_Count]; + + // The name is pretty self-descriptive, but this is a node that is returned from things + // like polymorphic_proc_lookup when it is determined that everything works so far, but + // the caller must yield in order to finish checking this polymorphic procedure. + AstTyped node_that_signals_a_yield; + AstTyped node_that_signals_failure; + + // Currently, this only needs to exist so all the scope's symbol array can be freed later. + bh_arr(Scope *) scopes; + + EventSystem events; + OnyxErrors errors; b32 errors_enabled; @@ -1906,90 +2239,25 @@ struct Context { u32 next_type_id; u32 next_entity_id; - u64 lexer_lines_processed; - u64 lexer_tokens_processed; + CompilerStats stats; - u64 microseconds_per_state[Entity_State_Count]; - u64 microseconds_per_type[Entity_Type_Count]; + // HACK + SpecialGlobalEntities special_global_entities; - u32 cycle_almost_detected : 2; + Entity* watermarked_node; + u32 highest_watermark; + + u32 cycle_almost_detected : 3; b32 cycle_detected : 1; b32 builtins_initialized : 1; + b32 wasm_module_linked : 1; }; -extern Context context; - -// NOTE: Basic internal types constructed in the parser -extern AstBasicType basic_type_void; -extern AstBasicType basic_type_bool; -extern AstBasicType basic_type_i8; -extern AstBasicType basic_type_u8; -extern AstBasicType basic_type_i16; -extern AstBasicType basic_type_u16; -extern AstBasicType basic_type_i32; -extern AstBasicType basic_type_u32; -extern AstBasicType basic_type_i64; -extern AstBasicType basic_type_u64; -extern AstBasicType basic_type_f32; -extern AstBasicType basic_type_f64; -extern AstBasicType basic_type_rawptr; -extern AstBasicType basic_type_type_expr; // :TypeExprHack - -extern AstBasicType basic_type_int_unsized; -extern AstBasicType basic_type_float_unsized; - -extern AstBasicType basic_type_i8x16; -extern AstBasicType basic_type_i16x8; -extern AstBasicType basic_type_i32x4; -extern AstBasicType basic_type_i64x2; -extern AstBasicType basic_type_f32x4; -extern AstBasicType basic_type_f64x2; -extern AstBasicType basic_type_v128; - -// HACK -// :AutoReturnType -extern Type type_auto_return; -extern AstBasicType basic_type_auto_return; - -extern AstGlobal builtin_heap_start; -extern AstGlobal builtin_stack_top; -extern AstGlobal builtin_tls_base; -extern AstGlobal builtin_tls_size; -extern AstGlobal builtin_closure_base; -extern AstGlobal builtin_stack_trace; -extern AstType *builtin_string_type; -extern AstType *builtin_cstring_type; -extern AstType *builtin_range_type; -extern Type *builtin_range_type_type; -extern AstType *builtin_vararg_type; -extern Type *builtin_vararg_type_type; -extern AstTyped *builtin_context_variable; -extern AstType *builtin_allocator_type; -extern AstType *builtin_iterator_type; -extern AstType *builtin_optional_type; -extern AstType *builtin_callsite_type; -extern AstType *builtin_any_type; -extern AstType *builtin_code_type; -extern AstType *builtin_link_options_type; -extern AstType *builtin_package_id_type; -extern AstType *builtin_stack_trace_type; -extern AstTyped *type_table_node; -extern AstTyped *foreign_blocks_node; -extern AstType *foreign_block_type; -extern AstTyped *tagged_procedures_node; -extern AstTyped *tagged_globals_node; -extern AstFunction *builtin_initialize_data_segments; -extern AstFunction *builtin_run_init_procedures; -extern AstFunction *builtin_closure_block_allocate; -extern bh_arr(AstFunction *) init_procedures; -extern AstOverloadedFunction *builtin_implicit_bool_cast; - - typedef struct BuiltinSymbol { char* package; char* sym; - AstNode* node; + u32 offset; // Offset into the context structure where the builtin node lives } BuiltinSymbol; extern const BuiltinSymbol builtin_symbols[]; @@ -1999,27 +2267,23 @@ typedef struct IntrinsicMap { OnyxIntrinsic intrinsic; } IntrinsicMap; -typedef Table(OnyxIntrinsic) IntrinsicTable; -extern IntrinsicTable intrinsic_table; +extern const IntrinsicMap builtin_intrinsics[]; -extern bh_arr(OverloadOption) operator_overloads[Binary_Op_Count]; -extern bh_arr(OverloadOption) unary_operator_overloads[Unary_Op_Count]; - -void prepare_builtins(); -void initialize_builtins(bh_allocator a); -void initalize_special_globals(); -void introduce_build_options(bh_allocator a); +void prepare_builtins(Context *context); +void initialize_builtins(Context *context); +void initalize_special_globals(Context *context); +void introduce_build_options(Context *context); // NOTE: Useful not inlined functions -AstTyped* ast_reduce(bh_allocator a, AstTyped* node); -AstNode* ast_clone(bh_allocator a, void* n); -AstNode* ast_clone_with_captured_entities(bh_allocator a, void* n, bh_arr(AstNode *)* ents); -AstFunction* clone_function_header(bh_allocator a, AstFunction* func); -void clone_function_body(bh_allocator a, AstFunction* dest, AstFunction* source); +AstTyped* ast_reduce(Context *context, AstTyped* node); +AstNode* ast_clone(Context *context, void* n); +AstNode* ast_clone_with_captured_entities(Context *context, void* n, bh_arr(AstNode *)* ents); +AstFunction* clone_function_header(Context *context, AstFunction* func); +void clone_function_body(Context *context, AstFunction* dest, AstFunction* source); -void promote_numlit_to_larger(AstNumLit* num); -b32 convert_numlit_to_type(AstNumLit* num, Type* type); +void promote_numlit_to_larger(Context *context, AstNumLit* num); +b32 convert_numlit_to_type(Context *context, AstNumLit* num, Type* type, b32 permanent); typedef enum TypeMatch { TYPE_MATCH_SUCCESS, @@ -2028,71 +2292,74 @@ typedef enum TypeMatch { TYPE_MATCH_SPECIAL, // Only used for nest polymorph function lookups } TypeMatch; -#define unify_node_and_type(node, type) (unify_node_and_type_((node), (type), 1)) -TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent); +#define unify_node_and_type(ctx, node, type) (unify_node_and_type_((ctx), (node), (type), 1)) +TypeMatch unify_node_and_type_(Context *context, AstTyped** pnode, Type* type, b32 permanent); // resolve_expression_type is a permanent action that modifies // the node in whatever is necessary to cement a type into it. -Type* resolve_expression_type(AstTyped* node); +Type* resolve_expression_type(Context *context, AstTyped* node); // query_expression_type does not modify the node at all, but // does its best to deduce the type of the node without context. -Type* query_expression_type(AstTyped *node); +Type* query_expression_type(Context *context, AstTyped *node); -i64 get_expression_integer_value(AstTyped* node, b32 *out_is_valid); -char *get_expression_string_value(AstTyped* node, b32 *out_is_valid); +i64 get_expression_integer_value(Context *context, AstTyped* node, b32 *out_is_valid); +char *get_expression_string_value(Context *context, AstTyped* node, b32 *out_is_valid); -b32 cast_is_legal(Type* from_, Type* to_, char** err_msg); -char* get_function_name(AstFunction* func); +b32 cast_is_legal(Context *context, Type* from_, Type* to_, char** err_msg); +char* get_function_name(Context *context, AstFunction* func); +char* get_function_assembly_name(Context *context, AstFunction* func); +char* generate_name_within_scope(Context *context, Scope *scope, OnyxToken* symbol); -TypeMatch implicit_cast_to_bool(AstTyped **pnode); +TypeMatch implicit_cast_to_bool(Context *context, AstTyped **pnode); AstNode* strip_aliases(AstNode* node); -AstNumLit* make_bool_literal(bh_allocator, b32 b); -AstNumLit* make_int_literal(bh_allocator a, i64 value); -AstNumLit* make_float_literal(bh_allocator a, f64 value); -AstRangeLiteral* make_range_literal(bh_allocator a, AstTyped* low, AstTyped* high); -AstStrLit* make_string_literal(bh_allocator a, OnyxToken *token); -AstBinaryOp* make_binary_op(bh_allocator a, BinaryOp operation, AstTyped* left, AstTyped* right); -AstArgument* make_argument(bh_allocator a, AstTyped* value); -AstFieldAccess* make_field_access(bh_allocator a, AstTyped* node, char* field); -AstAddressOf* make_address_of(bh_allocator a, AstTyped* node); -AstLocal* make_local(bh_allocator a, OnyxToken* token, AstType* type_node); -AstLocal* make_local_with_type(bh_allocator a, OnyxToken* token, Type* type); -AstNode* make_symbol(bh_allocator a, OnyxToken* sym); -AstUnaryOp* make_cast(bh_allocator a, AstTyped* expr, Type* to); -AstZeroValue* make_zero_value(bh_allocator a, OnyxToken *token, Type* type); -AstStructLiteral* make_optional_literal_some(bh_allocator a, AstTyped *expr, Type* opt_type); -AstStructLiteral* make_union_variant_of_void(bh_allocator a, Type* union_type, OnyxToken* token, UnionVariant* variant); - -void arguments_initialize(Arguments* args); -b32 fill_in_arguments(Arguments* args, AstNode* provider, char** err_msg, b32 insert_zero_values); -void arguments_ensure_length(Arguments* args, u32 count); -void arguments_copy(Arguments* dest, Arguments* src); -void arguments_clone(Arguments* dest, Arguments* src); -void arguments_deep_clone(bh_allocator a, Arguments* dest, Arguments* src); +AstNumLit* make_bool_literal(Context *context, b32 b); +AstNumLit* make_int_literal(Context *context, i64 value); +AstNumLit* make_float_literal(Context *context, f64 value); +AstRangeLiteral* make_range_literal(Context *context, AstTyped* low, AstTyped* high); +AstStrLit* make_string_literal(Context *context, OnyxToken *token); +AstBinaryOp* make_binary_op(Context *context, BinaryOp operation, AstTyped* left, AstTyped* right); +AstArgument* make_argument(Context *context, AstTyped* value); +AstFieldAccess* make_field_access(Context *context, AstTyped* node, char* field); +AstAddressOf* make_address_of(Context *context, AstTyped* node); +AstLocal* make_local(Context *context, OnyxToken* token, AstType* type_node); +AstLocal* make_local_with_type(Context *context, OnyxToken* token, Type* type); +AstNode* make_symbol(Context *context, OnyxToken* sym); +AstUnaryOp* make_cast(Context *context, AstTyped* expr, Type* to); +AstZeroValue* make_zero_value(Context *context, OnyxToken *token, Type* type); +AstStructLiteral* make_optional_literal_some(Context *context, AstTyped *expr, Type* opt_type); +AstStructLiteral* make_union_variant_of_void(Context *context, Type* union_type, OnyxToken* token, UnionVariant* variant); + +void arguments_initialize(Context *context, Arguments* args); +b32 fill_in_arguments(Context *context, Arguments* args, AstNode* provider, char** err_msg, b32 insert_zero_values); +void arguments_ensure_length(Context *context, Arguments* args, u32 count); +void arguments_copy(Context *context, Arguments* dest, Arguments* src); +void arguments_clone(Context *context, Arguments* dest, Arguments* src); +void arguments_deep_clone(Context *context, Arguments* dest, Arguments* src); void arguments_remove_baked(Arguments* args); void arguments_clear_baked_flags(Arguments* args); -TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, VarArgKind* va_kind, +TypeMatch check_arguments_against_type(Context *context, Arguments* args, TypeFunction* func_type, VarArgKind* va_kind, OnyxToken* location, char* func_name, struct OnyxError* error); -i32 get_argument_buffer_size(TypeFunction* type, Arguments* args); +i32 get_argument_buffer_size(Context *, TypeFunction* type, Arguments* args); // GROSS: Using void* to avoid having to cast everything. -const char* node_get_type_name(void* node); +const char* node_get_type_name(Context *context, void* node); -b32 static_if_resolution(AstIf* static_if); +b32 static_if_resolution(Context *context, AstIf* static_if); -void insert_poly_sln_into_scope(Scope* scope, AstPolySolution *sln); -TypeMatch find_polymorphic_sln(AstPolySolution *out, AstPolyParam *param, AstFunction *func, PolyProcLookupMethod pp_lookup, ptr actual, OnyxError* err_msg); -AstFunction* polymorphic_proc_lookup(AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken* tkn); -AstFunction* polymorphic_proc_solidify(AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn); -AstNode* polymorphic_proc_try_solidify(AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn); -AstFunction* polymorphic_proc_build_only_header(AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual); -AstFunction* polymorphic_proc_build_only_header_with_slns(AstFunction* pp, bh_arr(AstPolySolution) slns, b32 error_if_failed); -b32 potentially_convert_function_to_polyproc(AstFunction *func); -AstPolyCallType* convert_call_to_polycall(AstCall* call); +void insert_poly_sln_into_scope(Context *context, Scope* scope, AstPolySolution *sln); +TypeMatch find_polymorphic_sln(Context *context, AstPolySolution *out, AstPolyParam *param, AstFunction *func, PolyProcLookupMethod pp_lookup, ptr actual, OnyxError* err_msg); +AstFunction* polymorphic_proc_lookup(Context *context, AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken* tkn); +AstFunction* polymorphic_proc_solidify(Context *context, AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn); +AstNode* polymorphic_proc_try_solidify(Context *context, AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn); +AstFunction* polymorphic_proc_build_only_header(Context *context, AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual); +AstFunction* polymorphic_proc_build_only_header_with_slns(Context *context, AstFunction* pp, bh_arr(AstPolySolution) slns, b32 error_if_failed); +b32 potentially_convert_function_to_polyproc(Context *context, AstFunction *func); +AstPolyCallType* convert_call_to_polycall(Context *context, AstCall* call); +void insert_auto_dispose_call(Context *context, AstLocal *local); typedef struct OverloadReturnTypeCheck { Type *expected_type; @@ -2101,25 +2368,47 @@ typedef struct OverloadReturnTypeCheck { } OverloadReturnTypeCheck; void add_overload_option(bh_arr(OverloadOption)* poverloads, u64 order, AstTyped* overload); -AstTyped* find_matching_overload_by_arguments(bh_arr(OverloadOption) overloads, Arguments* args); -AstTyped* find_matching_overload_by_type(bh_arr(OverloadOption) overloads, Type* type); -void report_unable_to_match_overload(AstCall* call, bh_arr(OverloadOption) overloads); -void report_incorrect_overload_expected_type(Type *given, Type *expected, OnyxToken *overload, OnyxToken *group); -void ensure_overload_returns_correct_type(AstTyped *overload, AstOverloadedFunction *group); +AstTyped* find_matching_overload_by_arguments(Context *context, bh_arr(OverloadOption) overloads, Arguments* args); +AstTyped* find_matching_overload_by_type(Context *context, bh_arr(OverloadOption) overloads, Type* type); +void report_unable_to_match_overload(Context *context, AstCall* call, bh_arr(OverloadOption) overloads); +void report_incorrect_overload_expected_type(Context *context, Type *given, Type *expected, OnyxToken *overload, OnyxToken *group); +void ensure_overload_returns_correct_type(Context *context, AstTyped *overload, AstOverloadedFunction *group); + +void expand_macro(Context *context, AstCall** pcall, AstFunction* template); +AstFunction* macro_resolve_header(Context *context, AstMacro* macro, Arguments* args, OnyxToken* callsite, b32 error_if_failed); + +Type* polymorphic_struct_lookup(Context *context, AstPolyStructType* ps_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos, b32 error_if_failed); +Type* polymorphic_union_lookup(Context *context, AstPolyUnionType* pu_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos, b32 error_if_failed); + +b32 resolve_intrinsic_interface_constraint(Context *context, AstConstraint *constraint); + +void track_declaration_for_symbol_info(Context *context, OnyxFilePos, AstNode *); +void track_documentation_for_symbol_info(Context *context, AstNode *, AstBinding *); +void track_resolution_for_symbol_info(Context *context, AstNode *original, AstNode *resolved); -void expand_macro(AstCall** pcall, AstFunction* template); -AstFunction* macro_resolve_header(AstMacro* macro, Arguments* args, OnyxToken* callsite, b32 error_if_failed); -Type* polymorphic_struct_lookup(AstPolyStructType* ps_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos, b32 error_if_failed); -Type* polymorphic_union_lookup(AstPolyUnionType* pu_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos, b32 error_if_failed); +// Compiler Extensions +TypeMatch compiler_extension_start(Context *context, const char *name, const char *containing_filename, Entity *ent, i32 *out_extension_id); +TypeMatch compiler_extension_expand_macro( + Context *context, + int extension_id, + ProceduralMacroExpansionKind kind, + const char *macro_name, + OnyxToken *body, + Entity *entity, + AstNode **out_node, + u32 *out_expansion_id, + b32 wait_for_response); +TypeMatch compiler_extension_hook_stalled(Context *context, int extension_id); -b32 resolve_intrinsic_interface_constraint(AstConstraint *constraint); -void track_declaration_for_tags(AstNode *); +// Compiler Events +void compiler_events_init(Context *context); +void compiler_events_clear(Context *context); +CompilerEvent *compiler_event_add(Context *context, u32 event_type); +void compiler_event_add_field_str(Context *context, CompilerEvent *event, char *field, char *value); +void compiler_event_add_field_int(Context *context, CompilerEvent *event, char *field, i32 value); -void track_declaration_for_symbol_info(OnyxFilePos, AstNode *); -void track_documentation_for_symbol_info(AstNode *, OnyxToken *); -void track_resolution_for_symbol_info(AstNode *original, AstNode *resolved); // NOTE: Useful inlined functions static inline b32 is_lval(AstNode* node) { @@ -2168,10 +2457,10 @@ static inline b32 node_is_addressable_literal(AstNode* node) { || (node->kind == Ast_Kind_Array_Literal); } -static inline Type* get_expression_type(AstTyped* expr) { +static inline Type* get_expression_type(Context *context, AstTyped* expr) { switch (expr->kind) { case Ast_Kind_Block: case Ast_Kind_If: case Ast_Kind_While: return NULL; - case Ast_Kind_Typeof: return &basic_types[Basic_Kind_Type_Index]; + case Ast_Kind_Typeof: return context->types.basic[Basic_Kind_Type_Index]; default: return expr->type; } } @@ -2220,13 +2509,13 @@ static inline void convert_polyproc_to_function(AstFunction *func) { func->tags = NULL; } -static inline void convert_function_to_polyproc(AstFunction *func) { +static inline void convert_function_to_polyproc(Context *context, AstFunction *func) { if (func->kind != Ast_Kind_Function) return; func->kind = Ast_Kind_Polymorphic_Proc; func->parent_scope_of_poly_proc = func->scope->parent; func->scope = NULL; - if (func->entity) entity_change_type(&context.entities, func->entity, Entity_Type_Polymorphic_Proc); + if (func->entity) entity_change_type(&context->entities, func->entity, Entity_Type_Polymorphic_Proc); } #endif // #ifndef ONYXASTNODES_H diff --git a/compiler/include/doc.h b/compiler/include/doc.h index 8ac81777c..f910313d8 100644 --- a/compiler/include/doc.h +++ b/compiler/include/doc.h @@ -7,12 +7,7 @@ // Onyx Documentation generation void onyx_docs_submit(OnyxDocInfo *docs, AstBinding *binding); -void onyx_docs_emit_odoc(const char *dest); - - -// Tag generation - -void onyx_docs_emit_tags(char *dest); +void onyx_docs_generate_odoc(Context *context, bh_buffer *out_buffer); @@ -24,7 +19,8 @@ struct SymbolInfo { u32 file_id; u32 line; u32 column; - OnyxToken *documentation; + const char *documentation; + u32 documentation_length; }; typedef struct SymbolResolution SymbolResolution; @@ -52,6 +48,6 @@ struct SymbolInfoTable { bh_imap node_to_id; }; -void onyx_docs_emit_symbol_info(const char *dest); +void onyx_docs_emit_symbol_info(Context *context, bh_buffer *out_buffer); #endif diff --git a/compiler/include/errors.h b/compiler/include/errors.h index 5a34083a1..e37a8b5c0 100644 --- a/compiler/include/errors.h +++ b/compiler/include/errors.h @@ -31,18 +31,20 @@ typedef struct OnyxErrors { bh_arr(OnyxError) errors; } OnyxErrors; -extern OnyxErrors msgs; - -void onyx_errors_init(bh_arr(bh_file_contents)* files); -void onyx_errors_enable(); -void onyx_errors_disable(); -b32 onyx_errors_are_enabled(); -void onyx_submit_error(OnyxError error); -void onyx_report_error(OnyxFilePos pos, OnyxErrorRank rank, char * format, ...); -void onyx_submit_warning(OnyxError error); -void onyx_report_warning(OnyxFilePos pos, char* format, ...); -void onyx_errors_print(); -b32 onyx_has_errors(); -void onyx_clear_errors(); +struct Context; + +void onyx_errors_init(struct Context *context, bh_arr(bh_file_contents)* files); +void onyx_errors_enable(struct Context *context); +void onyx_errors_disable(struct Context *context); +b32 onyx_errors_are_enabled(struct Context *context); +void onyx_submit_error(struct Context *context, OnyxError error); +void onyx_report_error(struct Context *context, OnyxFilePos pos, OnyxErrorRank rank, char * format, ...); +void onyx_report_warning(struct Context *context, OnyxFilePos pos, char* format, ...); +void onyx_errors_print(struct Context *context); +b32 onyx_has_errors(struct Context *context); +void onyx_clear_errors(struct Context *context); + +#define ONYX_ERROR(pos, rank, ...) (onyx_report_error(context, (pos), (rank), __VA_ARGS__)) +#define ONYX_WARNING(pos, ...) (onyx_report_warning(context, (pos), __VA_ARGS__)) #endif diff --git a/compiler/include/lex.h b/compiler/include/lex.h index 5e2981df1..885552286 100644 --- a/compiler/include/lex.h +++ b/compiler/include/lex.h @@ -1,12 +1,11 @@ #ifndef ONYXLEX_H #define ONYXLEX_H +#ifndef BH_INTERNAL_ALLOCATOR + #define BH_INTERNAL_ALLOCATOR (context->gp_alloc) +#endif #include "bh.h" -// NOTE: Used for global statistics -extern u64 lexer_lines_processed; -extern u64 lexer_tokens_processed; - typedef enum TokenType { Token_Type_Ascii_End = 256, Token_Type_Unknown = 256, @@ -73,6 +72,7 @@ typedef enum TokenType { Token_Type_Sar_Equal, Token_Type_Dot_Dot, + Token_Type_Dot_Dot_Equal, Token_Type_Tilde_Tilde, Token_Type_Question_Question, @@ -86,6 +86,10 @@ typedef enum TokenType { Token_Type_Inserted_Semicolon, + Token_Type_Doc_Comment, + + Token_Type_Proc_Macro_Body, + Token_Type_Count, } TokenType; @@ -106,6 +110,8 @@ typedef struct OnyxToken { } OnyxToken; typedef struct OnyxTokenizer { + struct Context *context; + char *start, *curr, *end; const char* filename; @@ -123,7 +129,7 @@ const char *token_type_name(TokenType tkn_type); const char* token_name(OnyxToken *tkn); void token_toggle_end(OnyxToken* tkn); OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer); -OnyxTokenizer onyx_tokenizer_create(bh_allocator allocator, bh_file_contents *fc); +OnyxTokenizer onyx_tokenizer_create(struct Context *context, bh_file_contents *fc); void onyx_tokenizer_free(OnyxTokenizer* tokenizer); void onyx_lex_tokens(OnyxTokenizer* tokenizer); diff --git a/compiler/include/parser.h b/compiler/include/parser.h index 237f5f8fd..dbdb224c9 100644 --- a/compiler/include/parser.h +++ b/compiler/include/parser.h @@ -14,6 +14,7 @@ typedef struct PolymorphicContext { typedef struct OnyxParser { bh_allocator allocator; + Context *context; Package *package; Scope *file_scope; @@ -49,9 +50,12 @@ typedef struct OnyxParser { // Used by `#doc` directives to store their documentation // string. This is then used by binding nodes to capture - // documentation. + // documentation. DEPRECATED OnyxToken *last_documentation_token; + // Used by `///` doc comments + bh_arr(OnyxToken *) documentation_tokens; + u16 tag_depth : 16; b32 hit_unexpected_token : 1; @@ -64,8 +68,11 @@ typedef struct OnyxParser { const char* onyx_ast_node_kind_string(AstKind kind); void* onyx_ast_node_new(bh_allocator alloc, i32 size, AstKind kind); -OnyxParser onyx_parser_create(bh_allocator alloc, OnyxTokenizer *tokenizer); +OnyxParser onyx_parser_create(Context *context, OnyxTokenizer *tokenizer); void onyx_parser_free(OnyxParser* parser); void onyx_parse(OnyxParser *parser); +AstTyped *onyx_parse_expression(OnyxParser *parser, Scope *scope); +AstNode *onyx_parse_statement(OnyxParser *parser, Scope *scope); +void onyx_parse_top_level_statements(OnyxParser *parser, Scope *scope); #endif // #ifndef ONYXPARSER_H diff --git a/compiler/include/types.h b/compiler/include/types.h index a44620186..3b8f83585 100644 --- a/compiler/include/types.h +++ b/compiler/include/types.h @@ -138,10 +138,12 @@ typedef struct UnionVariant { struct AstType *constructed_from; \ bh_arr(struct AstTyped *) meta_tags; \ StructProcessingStatus status; \ + struct Scope* scope; \ }) \ TYPE_KIND(PolyStruct, struct { \ char* name; \ bh_arr(struct AstTyped *) meta_tags; \ + struct Scope* scope; \ }) \ TYPE_KIND(Compound, struct { \ u32 count; \ @@ -150,8 +152,11 @@ typedef struct UnionVariant { Type* types[]; \ }) \ TYPE_KIND(Array, struct { Type* elem; u32 size; u32 count; }) \ - TYPE_KIND(Slice, struct { Type *elem; }) \ - TYPE_KIND(DynArray, struct { Type *elem; }) \ + TYPE_KIND(Slice, struct { Type *elem; struct Scope *scope; }) \ + TYPE_KIND(DynArray, struct { \ + Type *elem; \ + struct Scope *scope; \ + }) \ TYPE_KIND(VarArgs, struct { Type *elem; }) \ TYPE_KIND(Enum, struct { \ char* name; \ @@ -161,6 +166,7 @@ typedef struct UnionVariant { TYPE_KIND(Distinct, struct { \ char* name; \ Type* base_type; \ + struct Scope* scope; \ }) \ TYPE_KIND(Union, struct { \ u32 size; \ @@ -173,10 +179,12 @@ typedef struct UnionVariant { struct AstType *constructed_from; \ bh_arr(struct AstTyped *) meta_tags; \ StructProcessingStatus status; \ + struct Scope* scope; \ }) \ TYPE_KIND(PolyUnion, struct { \ char* name; \ bh_arr(struct AstTyped *) meta_tags; \ + struct Scope* scope; \ }) \ @@ -217,50 +225,47 @@ struct Type { }; }; -extern bh_imap type_map; - -extern Type basic_types[]; - +struct Context; struct AstType; struct AstFunction; struct AstCompound; struct AstStructLiteral; -void types_init(); -void types_dump_type_info(); -Type* type_lookup_by_id(u32 id); +void types_init(struct Context *context); +void types_dump_type_info(struct Context *context); +Type* type_lookup_by_id(struct Context *context, u32 id); -b32 types_are_compatible(Type* t1, Type* t2); +b32 types_are_compatible(struct Context *context, Type* t1, Type* t2); u32 type_size_of(Type* type); u32 type_alignment_of(Type* type); -Type* type_build_from_ast(bh_allocator alloc, struct AstType* type_node); -Type* type_build_implicit_type_of_struct_literal(bh_allocator alloc, struct AstStructLiteral* lit, b32 is_query); +Type* type_build_from_ast(struct Context *context, struct AstType* type_node); +Type* type_build_implicit_type_of_struct_literal(struct Context *context, struct AstStructLiteral* lit, b32 is_query); -Type* type_build_function_type(bh_allocator alloc, struct AstFunction* func); -Type* type_build_compound_type(bh_allocator alloc, struct AstCompound* compound); +Type* type_build_function_type(struct Context *context, struct AstFunction* func); +Type* type_build_compound_type(struct Context *context, struct AstCompound* compound); -Type* type_make_pointer(bh_allocator alloc, Type* to); -Type* type_make_multi_pointer(bh_allocator alloc, Type* to); -Type* type_make_array(bh_allocator alloc, Type* to, u32 count); -Type* type_make_slice(bh_allocator alloc, Type* of); -Type* type_make_dynarray(bh_allocator alloc, Type* of); -Type* type_make_varargs(bh_allocator alloc, Type* of); -Type* type_make_optional(bh_allocator alloc, Type* of); +Type* type_make_pointer(struct Context *context, Type* to); +Type* type_make_multi_pointer(struct Context *context, Type* to); +Type* type_make_array(struct Context *context, Type* to, u32 count); +Type* type_make_slice(struct Context *context, Type* of); +Type* type_make_dynarray(struct Context *context, Type* of); +Type* type_make_varargs(struct Context *context, Type* of); +Type* type_make_optional(struct Context *context, Type* of); -void build_linear_types_with_offset(Type* type, bh_arr(TypeWithOffset)* pdest, u32 offset); -b32 type_struct_member_apply_use(bh_allocator alloc, Type *s_type, StructMember *smem); +void build_linear_types_with_offset(struct Context *context, Type* type, bh_arr(TypeWithOffset)* pdest, u32 offset); +b32 type_struct_member_apply_use(struct Context *context, Type *s_type, StructMember *smem); -const char* type_get_unique_name(Type* type); -const char* type_get_name(Type* type); +const char* type_get_unique_name(struct Context *context, Type* type); +const char* type_get_name(struct Context *context, Type* type); u32 type_get_alignment_log2(Type* type); Type* type_get_contained_type(Type* type); b32 type_is_ready_for_lookup(Type* type); -b32 type_lookup_member(Type* type, char* member, StructMember* smem); -b32 type_lookup_member_by_idx(Type* type, i32 idx, StructMember* smem); +b32 type_lookup_member(struct Context *context, Type* type, char* member, StructMember* smem); +b32 type_lookup_member_by_idx(struct Context *context, Type* type, i32 idx, StructMember* smem); i32 type_linear_member_count(Type* type); -b32 type_linear_member_lookup(Type* type, i32 idx, TypeWithOffset* two); +b32 type_linear_member_lookup(struct Context *context, Type* type, i32 idx, TypeWithOffset* two); i32 type_get_idx_of_linear_member_with_offset(Type* type, u32 offset); b32 type_struct_is_simple(Type* type); diff --git a/compiler/include/utils.h b/compiler/include/utils.h index e6701e52f..819995993 100644 --- a/compiler/include/utils.h +++ b/compiler/include/utils.h @@ -2,34 +2,29 @@ #include "astnodes.h" -extern bh_scratch global_scratch; -extern bh_allocator global_scratch_allocator; - -extern bh_managed_heap global_heap; -extern bh_allocator global_heap_allocator; - const char* onyx_ast_node_kind_string(AstKind kind); -Package* package_lookup(char* package_name); -Package* package_lookup_or_create(char* package_name, Scope* parent_scope, bh_allocator alloc, OnyxFilePos pos); -void package_track_use_package(Package* package, Entity* entity); -void package_reinsert_use_packages(Package* package); -void package_mark_as_used(Package* package); - -Scope* scope_create(bh_allocator a, Scope* parent, OnyxFilePos created_at); -void scope_include(Scope* target, Scope* source, OnyxFilePos pos); -b32 symbol_introduce(Scope* scope, OnyxToken* tkn, AstNode* symbol); -b32 symbol_raw_introduce(Scope* scope, char* tkn, OnyxFilePos pos, AstNode* symbol); -void symbol_builtin_introduce(Scope* scope, char* sym, AstNode *node); -void symbol_subpackage_introduce(Package *parent, char* sym, AstPackage *node); -AstNode* symbol_raw_resolve_no_ascend(Scope* scope, char* sym); -AstNode* symbol_raw_resolve(Scope* start_scope, char* sym); -AstNode* symbol_resolve(Scope* start_scope, OnyxToken* tkn); -AstNode* try_symbol_raw_resolve_from_node(AstNode* node, char* symbol); -AstNode* try_symbol_resolve_from_node(AstNode* node, OnyxToken* token); -AstNode* try_symbol_raw_resolve_from_type(Type *type, char* symbol); -Scope *get_scope_from_node(AstNode *node); -Scope *get_scope_from_node_or_create(AstNode *node); +Package* package_lookup(Context *context, char* package_name); +Package* package_lookup_or_create(Context *context, char* package_name, Scope* parent_scope, OnyxFilePos pos); +void package_track_use_package(Context *context, Package* package, Entity* entity); +void package_reinsert_use_packages(Context *context, Package* package); +void package_mark_as_used(Context *context, Package* package); + +Scope* scope_create(Context *context, Scope* parent, OnyxFilePos created_at); +void scope_include(Context *context, Scope* target, Scope* source, OnyxFilePos pos); +b32 symbol_introduce(Context *context, Scope* scope, OnyxToken* tkn, AstNode* symbol); +b32 symbol_raw_introduce(Context *context, Scope* scope, char* tkn, OnyxFilePos pos, AstNode* symbol); +void symbol_builtin_introduce(Context *context, Scope* scope, char* sym, AstNode *node); +void symbol_subpackage_introduce(Context *context, Package *parent, char* sym, AstPackage *node); +AstNode* symbol_raw_resolve_no_ascend(Context *context, Scope* scope, char* sym); +AstNode* symbol_raw_resolve(Context *context, Scope* start_scope, char* sym); +AstNode* symbol_resolve(Context *context, Scope* start_scope, OnyxToken* tkn); +AstNode* try_symbol_raw_resolve_from_node(Context *context, AstNode* node, char* symbol); +AstNode* try_symbol_resolve_from_node(Context *context, AstNode* node, OnyxToken* token); +AstNode* try_symbol_raw_resolve_from_type(Context *context, Type *type, char* symbol); +AstNode* try_symbol_resolve_from_type(Context *context, Type *type, OnyxToken *token); +Scope *get_scope_from_node(Context *context, AstNode *node); +Scope *get_scope_from_node_or_create(Context *context, AstNode *node); void build_all_overload_options(bh_arr(OverloadOption) overloads, bh_imap* all_overloads); @@ -38,9 +33,6 @@ u32 char_to_base16_value(char x); // Returns the length after processing the string. i32 string_process_escape_seqs(char* dest, char* src, i32 len); -u32 levenshtein_distance(const char *str1, const char *str2); -char *find_closest_symbol_in_scope_and_parents(Scope *scope, char *sym); -char *find_closest_symbol_in_node(AstNode *node, char *sym); - -extern AstTyped node_that_signals_a_yield; -extern AstTyped node_that_signals_failure; +u32 levenshtein_distance(Context *context, const char *str1, const char *str2); +char *find_closest_symbol_in_scope_and_parents(Context *context, Scope *scope, char *sym); +char *find_closest_symbol_in_node(Context *context, AstNode *node, char *sym); diff --git a/compiler/include/wasm_emit.h b/compiler/include/wasm_emit.h index c947ba396..ffdb51702 100644 --- a/compiler/include/wasm_emit.h +++ b/compiler/include/wasm_emit.h @@ -544,6 +544,7 @@ typedef struct WasmFunc { LocalAllocator locals; bh_arr(WasmInstruction) code; OnyxToken *location; + char *name; } WasmFunc; typedef struct WasmGlobal { @@ -583,6 +584,12 @@ typedef struct WasmDatum { ptr data; } WasmDatum; +typedef struct WasmCustomSection { + char *name; + char *contents; + u32 len; +} WasmCustomSection; + typedef enum DatumPatchInfoKind { Datum_Patch_Instruction, Datum_Patch_Data, @@ -698,7 +705,13 @@ typedef struct ForRemoveInfo { i32 remove_func_type_idx; } ForRemoveInfo; +typedef struct JsPartial { + u32 order; + char *code; +} JsPartial; + typedef struct OnyxWasmModule { + Context *context; bh_allocator allocator; bh_arena *extended_instr_data; @@ -756,6 +769,10 @@ typedef struct OnyxWasmModule { u32 memory_min_size; u32 memory_max_size; + Table(WasmCustomSection) custom_sections; + + bh_arr(JsPartial) js_partials; + // NOTE: Set of things used when compiling; not part of the actual module u32 export_count; u32 next_type_idx; @@ -768,11 +785,18 @@ typedef struct OnyxWasmModule { i32 *tls_size_ptr; i32 *heap_start_ptr; u64 stack_base_idx; + u64 stack_restore_idx; + u64 stack_return_location_idx; u64 closure_base_idx; u64 stack_trace_idx; CallingConvention curr_cc; i32 null_proc_func_idx; + i32 global_type_table_data_id; + i32 type_info_size; + i32 *type_info_entry_count; + bh_arr(i32) types_enqueued_for_info; + b32 has_stack_locals : 1; b32 doing_linking : 1; @@ -803,17 +827,19 @@ typedef struct OnyxWasmLinkOptions { u32 memory_max_size; } OnyxWasmLinkOptions; -b32 onyx_wasm_build_link_options_from_node(OnyxWasmLinkOptions *opts, struct AstTyped *node); +b32 onyx_wasm_build_link_options_from_node(Context *context, OnyxWasmLinkOptions *opts, struct AstTyped *node); -OnyxWasmModule onyx_wasm_module_create(bh_allocator alloc); -void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options); +void onyx_wasm_module_initialize(Context *context, OnyxWasmModule *module); +void onyx_wasm_module_link(Context *context, OnyxWasmModule *module, OnyxWasmLinkOptions *options); void onyx_wasm_module_free(OnyxWasmModule* module); void onyx_wasm_module_write_to_buffer(OnyxWasmModule* module, bh_buffer* buffer); void onyx_wasm_module_write_to_file(OnyxWasmModule* module, bh_file file); +void onyx_wasm_module_write_js_partials_to_buffer(OnyxWasmModule* module, bh_buffer* buffer); +void onyx_wasm_module_write_js_partials_to_file(OnyxWasmModule* module, bh_file file); #ifdef ONYX_RUNTIME_LIBRARY -void onyx_run_initialize(b32 debug_enabled); -b32 onyx_run_wasm(bh_buffer code_buffer, int argc, char *argv[]); +void onyx_run_initialize(b32 debug_enabled, const char *debug_socket); +b32 onyx_run_wasm_code(bh_buffer code_buffer, int argc, char *argv[]); #endif #ifdef ENABLE_DEBUG_INFO diff --git a/compiler/src/astnodes.c b/compiler/src/astnodes.c index 0a3381bdb..f79bf362e 100644 --- a/compiler/src/astnodes.c +++ b/compiler/src/astnodes.c @@ -104,6 +104,7 @@ static const char* ast_node_names[] = { "FIRST", "EXPORT NAME", "THIS PACKAGE", + "WASM SECTION", "CALL SITE", "CODE BLOCK", @@ -117,6 +118,12 @@ static const char* ast_node_names[] = { "FOREIGN BLOCK", "ZERO VALUE", + "JS CODE", + + "COMPILER EXTENSION", + "PROCEDURAL MACRO", + "PROCEDURAL EXPANSION", + "AST_NODE_KIND_COUNT", }; @@ -135,7 +142,7 @@ const char *binaryop_string[Binary_Op_Count] = { "&=", "|=", "^=", "<<=", ">>=", ">>>=", "NONE", - "|>", "..", "->", + "|>", "..", "..=", "->", "[]", "[]=", "^[]", @@ -165,6 +172,8 @@ const char* entity_type_strings[Entity_Type_Count] = { "Static If", "String Literal", "File Contents", + "CompilerExtension", + "Procedural Expansion", "Enum", "Enum Value", "Type Alias", @@ -188,21 +197,22 @@ const char* entity_type_strings[Entity_Type_Count] = { "Global", "Overloaded_Function", "Function", + "JS", }; -AstNumLit* ast_reduce_type_compare(bh_allocator a, AstBinaryOp* node) { - AstType* left = (AstType *) ast_reduce(a, node->left); - AstType* right = (AstType *) ast_reduce(a, node->right); +AstNumLit* ast_reduce_type_compare(Context *context, AstBinaryOp* node) { + AstType* left = (AstType *) ast_reduce(context, node->left); + AstType* right = (AstType *) ast_reduce(context, node->right); - Type* left_type = type_build_from_ast(context.ast_alloc, left); - Type* right_type = type_build_from_ast(context.ast_alloc, right); + Type* left_type = type_build_from_ast(context, left); + Type* right_type = type_build_from_ast(context, right); - AstNumLit* res = onyx_ast_node_new(a, sizeof(AstNumLit), Ast_Kind_NumLit); + AstNumLit* res = onyx_ast_node_new(context->ast_alloc, sizeof(AstNumLit), Ast_Kind_NumLit); res->token = node->token; res->flags |= node->flags; res->flags |= Ast_Flag_Comptime; - res->type_node = (AstType *) &basic_type_bool; - res->type = &basic_types[Basic_Kind_Bool]; + res->type_node = (AstType *) &context->basic_types.type_bool; + res->type = context->types.basic[Basic_Kind_Bool]; res->next = node->next; switch (node->operation) { @@ -244,23 +254,35 @@ AstNumLit* ast_reduce_type_compare(bh_allocator a, AstBinaryOp* node) { } \ break; -AstNumLit* ast_reduce_binop(bh_allocator a, AstBinaryOp* node) { - AstNumLit* left = (AstNumLit *) ast_reduce(a, node->left); - AstNumLit* right = (AstNumLit *) ast_reduce(a, node->right); +AstNumLit* ast_reduce_binop(Context *context, AstBinaryOp* node) { + AstNumLit* left = (AstNumLit *) ast_reduce(context, node->left); + AstNumLit* right = (AstNumLit *) ast_reduce(context, node->right); if (node_is_type((AstNode *) left) && node_is_type((AstNode *) right)) { if (node->operation == Binary_Op_Equal || node->operation == Binary_Op_Not_Equal) { - return (AstNumLit *) ast_reduce_type_compare(a, node); + return (AstNumLit *) ast_reduce_type_compare(context, node); } } + if (left->kind != Ast_Kind_NumLit) { + b32 valid = 0; + AstNumLit *tmp = make_int_literal(context, get_expression_integer_value(context, (AstTyped *) left, &valid)); + if (valid) left = tmp; + } + + if (right->kind != Ast_Kind_NumLit) { + b32 valid = 0; + AstNumLit *tmp = make_int_literal(context, get_expression_integer_value(context, (AstTyped *) right, &valid)); + if (valid) right = tmp; + } + if (left->kind != Ast_Kind_NumLit || right->kind != Ast_Kind_NumLit) { node->left = (AstTyped *) left; node->right = (AstTyped *) right; return (AstNumLit *) node; } - AstNumLit* res = onyx_ast_node_new(a, sizeof(AstNumLit), Ast_Kind_NumLit); + AstNumLit* res = onyx_ast_node_new(context->ast_alloc, sizeof(AstNumLit), Ast_Kind_NumLit); res->token = node->token; res->flags |= node->flags; res->flags |= Ast_Flag_Comptime; @@ -317,16 +339,16 @@ AstNumLit* ast_reduce_binop(bh_allocator a, AstBinaryOp* node) { res->value.l = op (operand)->value.l; \ } -AstTyped* ast_reduce_unaryop(bh_allocator a, AstUnaryOp* unop) { +AstTyped* ast_reduce_unaryop(Context *context, AstUnaryOp* unop) { // GROSS - AstNumLit* operand = (AstNumLit *) ast_reduce(a, unop->expr); + AstNumLit* operand = (AstNumLit *) ast_reduce(context, unop->expr); unop->expr = (AstTyped *) operand; if (operand->kind != Ast_Kind_NumLit) { return (AstTyped *) unop; } - AstNumLit* res = onyx_ast_node_new(a, sizeof(AstNumLit), Ast_Kind_NumLit); + AstNumLit* res = onyx_ast_node_new(context->ast_alloc, sizeof(AstNumLit), Ast_Kind_NumLit); res->token = unop->token; res->flags |= Ast_Flag_Comptime; res->type_node = unop->type_node; @@ -413,41 +435,41 @@ AstTyped* ast_reduce_unaryop(bh_allocator a, AstUnaryOp* unop) { return (AstTyped *) res; } -AstTyped* ast_reduce(bh_allocator a, AstTyped* node) { +AstTyped* ast_reduce(Context *context, AstTyped* node) { assert(node->flags & Ast_Flag_Comptime); switch (node->kind) { - case Ast_Kind_Binary_Op: return (AstTyped *) ast_reduce_binop(a, (AstBinaryOp *) node); - case Ast_Kind_Unary_Op: return (AstTyped *) ast_reduce_unaryop(a, (AstUnaryOp *) node); + case Ast_Kind_Binary_Op: return (AstTyped *) ast_reduce_binop(context, (AstBinaryOp *) node); + case Ast_Kind_Unary_Op: return (AstTyped *) ast_reduce_unaryop(context, (AstUnaryOp *) node); case Ast_Kind_Enum_Value: return (AstTyped *) ((AstEnumValue *) node)->value; - case Ast_Kind_Alias: return (AstTyped *) ast_reduce(a, ((AstAlias *) node)->alias); + case Ast_Kind_Alias: return (AstTyped *) ast_reduce(context, ((AstAlias *) node)->alias); default: return node; } } -void promote_numlit_to_larger(AstNumLit* num) { +void promote_numlit_to_larger(Context *context, AstNumLit* num) { assert(num->type != NULL); if (type_is_integer(num->type) && num->type->Basic.size <= 4) { // NOTE: Int32, Int16, Int8 i64 val = (i64) num->value.i; num->value.l = val; - num->type = &basic_types[Basic_Kind_I64]; + num->type = context->types.basic[Basic_Kind_I64]; } else if (num->type->Basic.size <= 4) { // NOTE: Float32 f64 val = (f64) num->value.f; num->value.d = val; - num->type = &basic_types[Basic_Kind_F64]; + num->type = context->types.basic[Basic_Kind_F64]; } } // NOTE: Returns 1 if the conversion was successful. -b32 convert_numlit_to_type(AstNumLit* num, Type* to_type) { +b32 convert_numlit_to_type(Context *context, AstNumLit* num, Type* to_type, b32 permanent) { if (num->type == NULL) - num->type = type_build_from_ast(context.ast_alloc, num->type_node); + num->type = type_build_from_ast(context, num->type_node); assert(num->type); - if (types_are_compatible(num->type, to_type)) return 1; + if (types_are_compatible(context, num->type, to_type)) return 1; if (!type_is_numeric(to_type)) return 0; Type *type = to_type; @@ -467,25 +489,25 @@ b32 convert_numlit_to_type(AstNumLit* num, Type* to_type) { if (type->Basic.flags & Basic_Flag_Unsigned) { u64 value = (u64) num->value.l; if (type->Basic.size == 8) { - num->type = to_type; + if (permanent) num->type = to_type; return 1; } switch (type->Basic.size) { case 1: if (value <= 255) { - num->type = to_type; + if (permanent) num->type = to_type; return 1; } case 2: if (value <= 65535) { - num->type = to_type; + if (permanent) num->type = to_type; return 1; } case 4: if (value <= 4294967295) { - num->type = to_type; + if (permanent) num->type = to_type; return 1; } } - onyx_report_error(num->token->pos, Error_Critical, "Unsigned integer constant with value '%l' does not fit into %d-bits.", + if (permanent) ONYX_ERROR(num->token->pos, Error_Critical, "Unsigned integer constant with value '%l' does not fit into %d-bits.", num->value.l, type->Basic.size * 8); @@ -493,26 +515,26 @@ b32 convert_numlit_to_type(AstNumLit* num, Type* to_type) { i64 value = (i64) num->value.l; switch (type->Basic.size) { case 1: if (-128ll <= value && value <= 127ll) { - num->value.i = (i32) value; - num->type = to_type; + if (permanent) num->value.i = (i32) value; + if (permanent) num->type = to_type; return 1; } break; case 2: if (-32768ll <= value && value <= 32767ll) { - num->value.i = (i32) value; - num->type = to_type; + if (permanent) num->value.i = (i32) value; + if (permanent) num->type = to_type; return 1; } break; case 4: if (-2147483648ll <= value && value <= 2147483647ll) { - num->value.i = (i32) value; - num->type = to_type; + if (permanent) num->value.i = (i32) value; + if (permanent) num->type = to_type; return 1; } break; - case 8: { num->type = to_type; + case 8: { if (permanent) num->type = to_type; return 1; } break; } - onyx_report_error(num->token->pos, Error_Critical, "Integer constant with value '%l' does not fit into %d-bits.", + if (permanent) ONYX_ERROR(num->token->pos, Error_Critical, "Integer constant with value '%l' does not fit into %d-bits.", num->value.l, type->Basic.size * 8); } @@ -521,22 +543,22 @@ b32 convert_numlit_to_type(AstNumLit* num, Type* to_type) { else if (type->Basic.flags & Basic_Flag_Float) { if (type->Basic.size == 4) { if (bh_abs(num->value.l) >= (1 << 23)) { - onyx_report_error(num->token->pos, Error_Critical, "Integer '%l' does not fit in 32-bit float exactly.", num->value.l); + if (permanent) ONYX_ERROR(num->token->pos, Error_Critical, "Integer '%l' does not fit in 32-bit float exactly.", num->value.l); return 0; } - num->type = to_type; - num->value.f = (f32) num->value.l; + if (permanent) num->type = to_type; + if (permanent) num->value.f = (f32) num->value.l; return 1; } if (type->Basic.size == 8) { if (bh_abs(num->value.l) >= (1ll << 52)) { - onyx_report_error(num->token->pos, Error_Critical, "Integer '%l' does not fit in 64-bit float exactly.", num->value.l); + if (permanent) ONYX_ERROR(num->token->pos, Error_Critical, "Integer '%l' does not fit in 64-bit float exactly.", num->value.l); return 0; } - num->type = to_type; - num->value.d = (f64) num->value.l; + if (permanent) num->type = to_type; + if (permanent) num->value.d = (f64) num->value.l; return 1; } } @@ -546,10 +568,10 @@ b32 convert_numlit_to_type(AstNumLit* num, Type* to_type) { if ((type->Basic.flags & Basic_Flag_Float) == 0) return 0; if (type->Basic.kind == Basic_Kind_F32) { - num->value.f = (f32) num->value.d; + if (permanent) num->value.f = (f32) num->value.d; } - num->type = to_type; + if (permanent) num->type = to_type; return 1; } else if (num->type->Basic.kind == Basic_Kind_F32) { @@ -557,8 +579,8 @@ b32 convert_numlit_to_type(AstNumLit* num, Type* to_type) { if ((type->Basic.flags & Basic_Flag_Float) == 0) return 0; if (type->Basic.kind == Basic_Kind_F64) { - num->value.d = (f64) num->value.f; - num->type = to_type; + if (permanent) num->value.d = (f64) num->value.f; + if (permanent) num->type = to_type; return 1; } } @@ -566,10 +588,7 @@ b32 convert_numlit_to_type(AstNumLit* num, Type* to_type) { return 0; } -// TODO: This function should be able return a "yield" condition. There -// are a couple cases that need to yield in order to be correct, like -// polymorphic functions with a typeof for the return type. -TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { +TypeMatch unify_node_and_type_(Context *context, AstTyped** pnode, Type* type, b32 permanent) { AstTyped* node = *pnode; if (type == NULL) return TYPE_MATCH_FAILED; if (node == NULL) return TYPE_MATCH_FAILED; @@ -596,7 +615,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { node->type = type; - add_entities_for_node(NULL, (AstNode *) node, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) node, NULL, NULL); return TYPE_MATCH_SUCCESS; } @@ -622,9 +641,9 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { case Type_Kind_Slice: { Type* elem_type = type->Slice.elem; AstArrayLiteral* al = (AstArrayLiteral *) node; - array_type = type_make_array(context.ast_alloc, elem_type, bh_arr_length(al->values)); + array_type = type_make_array(context, elem_type, bh_arr_length(al->values)); - *pnode = (AstTyped *) make_cast(context.ast_alloc, node, type); + *pnode = (AstTyped *) make_cast(context, node, type); break; } @@ -639,7 +658,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { node->type = array_type; node->flags |= Ast_Flag_Array_Literal_Typed; - add_entities_for_node(NULL, (AstNode *) node, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) node, NULL, NULL); return TYPE_MATCH_SUCCESS; } @@ -649,12 +668,12 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { int index = 0; if ((index = shgeti(type->Union.variants, node->token->text)) != -1) { UnionVariant *uv = type->Union.variants[index].value; - if (uv->type != &basic_types[Basic_Kind_Void]) { + if (uv->type != context->types.basic[Basic_Kind_Void]) { if (permanent) { - onyx_report_error(node->token->pos, Error_Critical, + ONYX_ERROR(node->token->pos, Error_Critical, "Shorthand union literal syntax '.%s' is not all for this variant, because its type is not void; it is '%s'. Use the longer syntax, '.{ %s = value }'", node->token->text, - type_get_name(uv->type), + type_get_name(context, uv->type), node->token->text); } token_toggle_end(node->token); @@ -662,7 +681,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { } if (permanent) { - AstStructLiteral *sl = make_union_variant_of_void(context.ast_alloc, type, node->token, uv); + AstStructLiteral *sl = make_union_variant_of_void(context, type, node->token, uv); *pnode = (AstTyped *) sl; } @@ -672,22 +691,21 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { token_toggle_end(node->token); } - AstType* ast_type = type->ast_type; - AstNode* resolved = try_symbol_resolve_from_node((AstNode *) ast_type, node->token); + AstNode* resolved = try_symbol_resolve_from_type(context, type, node->token); if (resolved == NULL) { - if (context.cycle_detected) { + if (context->cycle_detected) { token_toggle_end(node->token); - char *closest = find_closest_symbol_in_node((AstNode *) ast_type, node->token->text); + char *closest = find_closest_symbol_in_node(context, (AstNode *) type->ast_type, node->token->text); token_toggle_end(node->token); if (closest) { - onyx_report_error(node->token->pos, Error_Critical, "'%b' does not exist in '%s'. Did you mean '%s'?", + ONYX_ERROR(node->token->pos, Error_Critical, "'%b' does not exist in '%s'. Did you mean '%s'?", node->token->text, node->token->length, - type_get_name(type), + type_get_name(context, type), closest); } else { - onyx_report_error(node->token->pos, Error_Critical, "'%b' does not exist in '%s'.", - node->token->text, node->token->length, type_get_name(type)); + ONYX_ERROR(node->token->pos, Error_Critical, "'%b' does not exist in '%s'.", + node->token->text, node->token->length, type_get_name(context, type)); } return TYPE_MATCH_FAILED; @@ -697,7 +715,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { } if (permanent) { - track_resolution_for_symbol_info((AstNode *) node, resolved); + track_resolution_for_symbol_info(context, (AstNode *) node, resolved); *pnode = (AstTyped *) resolved; } @@ -705,12 +723,12 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { } if (node->kind == Ast_Kind_Overloaded_Function) { - AstTyped* func = find_matching_overload_by_type(((AstOverloadedFunction *) node)->overloads, type); + AstTyped* func = find_matching_overload_by_type(context, ((AstOverloadedFunction *) node)->overloads, type); if (func == NULL) return TYPE_MATCH_FAILED; - if (func == (AstTyped *) &node_that_signals_a_yield) return TYPE_MATCH_YIELD; + if (func == (AstTyped *) &context->node_that_signals_a_yield) return TYPE_MATCH_YIELD; if (permanent) { - ensure_overload_returns_correct_type(func, (AstOverloadedFunction *) node); + ensure_overload_returns_correct_type(context, func, (AstOverloadedFunction *) node); *pnode = func; } @@ -718,9 +736,9 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { } if (node->kind == Ast_Kind_Polymorphic_Proc) { - AstFunction* func = polymorphic_proc_lookup((AstFunction *) node, PPLM_By_Function_Type, type, node->token); + AstFunction* func = polymorphic_proc_lookup(context, (AstFunction *) node, PPLM_By_Function_Type, type, node->token); if (func == NULL) return TYPE_MATCH_FAILED; - if (func == (AstFunction *) &node_that_signals_a_yield) return TYPE_MATCH_YIELD; + if (func == (AstFunction *) &context->node_that_signals_a_yield) return TYPE_MATCH_YIELD; *pnode = (AstTyped *) func; node = *pnode; @@ -732,35 +750,41 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { if (type->kind == Type_Kind_Function && (node->flags & Ast_Flag_Proc_Is_Null) != 0) return TYPE_MATCH_SUCCESS; // The normal case where everything works perfectly. - Type* node_type = get_expression_type(node); - if (types_are_compatible(node_type, type)) return TYPE_MATCH_SUCCESS; + Type* node_type = get_expression_type(context, node); + if (types_are_compatible(context, node_type, type)) return TYPE_MATCH_SUCCESS; - Type* any_type = type_build_from_ast(context.ast_alloc, builtin_any_type); - if (any_type == NULL) return TYPE_MATCH_YIELD; - i64 any_id = any_type->id; - if (node_type && node_type->id != any_id && type->id == any_id) return TYPE_MATCH_SUCCESS; + // 'any' matches any type. This is commented out because it was causing + // many issues with incorrect implementations in the rest of the compiler + // since this bypasses all normal type checking and forces the rest of the + // compiler to handle "mismatched" types. This should be properly fixed soon, + // but will remain commented out for now. + // + // Type* any_type = type_build_from_ast(context, context->builtins.any_type); + // if (any_type == NULL) return TYPE_MATCH_YIELD; + // i64 any_id = any_type->id; + // if (node_type && node_type->id != any_id && type->id == any_id) return TYPE_MATCH_SUCCESS; + // // Here are some of the ways you can unify a node with a type if the type of the // node does not match the given type: // // If the nodes type is a function type and that function has an automatic return - // value placeholder, fill in that placeholder with the actual type. + // value placeholder, wait for the return type to be solved by the function first. // :AutoReturnType if (node_type && node_type->kind == Type_Kind_Function - && node_type->Function.return_type == &type_auto_return + && node_type->Function.return_type == context->types.auto_return && type->kind == Type_Kind_Function) { - node_type->Function.return_type = type->Function.return_type; - return TYPE_MATCH_SUCCESS; + return TYPE_MATCH_YIELD; } // If the node is an auto cast (~~) node, then check to see if the cast is legal // to the destination type, and if it is change the type to cast to. if (node_is_auto_cast((AstNode *) node)) { char* dummy; - Type* from_type = get_expression_type(((AstUnaryOp *) node)->expr); - if (!from_type || !cast_is_legal(from_type, type, &dummy)) { + Type* from_type = get_expression_type(context, ((AstUnaryOp *) node)->expr); + if (!from_type || !cast_is_legal(context, from_type, type, &dummy)) { return TYPE_MATCH_FAILED; } else { @@ -772,7 +796,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { // String literals implicitly become c-strings for convience. if (node->kind == Ast_Kind_StrLit && type->kind == Type_Kind_MultiPointer - && type->MultiPointer.elem == &basic_types[Basic_Kind_U8]) { + && type->MultiPointer.elem == context->types.basic[Basic_Kind_U8]) { if (permanent) { AstStrLit *strlit = (AstStrLit *) node; @@ -793,9 +817,9 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { (node_type->kind == Type_Kind_Array || node_type->kind == Type_Kind_DynArray || node_type->kind == Type_Kind_VarArgs)) { char* dummy; - b32 legal = cast_is_legal(node_type, type, &dummy); + b32 legal = cast_is_legal(context, node_type, type, &dummy); if (permanent && legal) { - *pnode = (AstTyped *) make_cast(context.ast_alloc, node, type); + *pnode = (AstTyped *) make_cast(context, node, type); } return legal ? TYPE_MATCH_SUCCESS : TYPE_MATCH_FAILED; @@ -818,7 +842,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { AstSwitchCase *casestmt = *pcasestmt; if (!casestmt->body_is_expr) continue; - switch (unify_node_and_type_(&casestmt->expr, type, permanent)) { + switch (unify_node_and_type_(context, &casestmt->expr, type, permanent)) { case TYPE_MATCH_SUCCESS: break; case TYPE_MATCH_FAILED: return TYPE_MATCH_FAILED; case TYPE_MATCH_YIELD: return TYPE_MATCH_YIELD; @@ -827,7 +851,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { } if (switchnode->default_case) { - switch (unify_node_and_type_((AstTyped **) &switchnode->default_case, type, permanent)) { + switch (unify_node_and_type_(context, (AstTyped **) &switchnode->default_case, type, permanent)) { case TYPE_MATCH_SUCCESS: break; case TYPE_MATCH_FAILED: return TYPE_MATCH_FAILED; case TYPE_MATCH_YIELD: return TYPE_MATCH_YIELD; @@ -842,11 +866,11 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { // If the destination type is an optional, and the node's type is a value of // the same underlying type, then we can construct an optional with a value // implicitly. This makes working with optionals barable. - if (type_constructed_from_poly(type, builtin_optional_type)) { - TypeMatch match = unify_node_and_type_(pnode, type->Union.poly_sln[0].type, permanent); + if (type_constructed_from_poly(type, context->builtins.optional_type)) { + TypeMatch match = unify_node_and_type_(context, pnode, type->Union.poly_sln[0].type, permanent); if (match == TYPE_MATCH_SUCCESS) { if (permanent) { - AstStructLiteral *opt_lit = make_optional_literal_some(context.ast_alloc, node, type); + AstStructLiteral *opt_lit = make_optional_literal_some(context, node, type); *(AstStructLiteral **) pnode = opt_lit; } @@ -860,7 +884,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { // If the node is a numeric literal, try to convert it to the destination type. if (node->kind == Ast_Kind_NumLit) { - if (convert_numlit_to_type((AstNumLit *) node, type)) return TYPE_MATCH_SUCCESS; + if (convert_numlit_to_type(context, (AstNumLit *) node, type, permanent)) return TYPE_MATCH_SUCCESS; return TYPE_MATCH_FAILED; } @@ -876,13 +900,13 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { if (expr_count != type->Compound.count) return TYPE_MATCH_FAILED; fori (i, 0, (i64) expr_count) { - TypeMatch tm = unify_node_and_type_(&compound->exprs[i], type->Compound.types[i], permanent); + TypeMatch tm = unify_node_and_type_(context, &compound->exprs[i], type->Compound.types[i], permanent); if (tm != TYPE_MATCH_SUCCESS) { return tm; } } - compound->type = type_build_compound_type(context.ast_alloc, compound); + compound->type = type_build_compound_type(context, compound); return TYPE_MATCH_SUCCESS; } @@ -890,8 +914,8 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { if (node->kind == Ast_Kind_If_Expression) { AstIfExpression* if_expr = (AstIfExpression *) node; - TypeMatch true_success = unify_node_and_type_(&if_expr->true_expr, type, permanent); - TypeMatch false_success = unify_node_and_type_(&if_expr->false_expr, type, permanent); + TypeMatch true_success = unify_node_and_type_(context, &if_expr->true_expr, type, permanent); + TypeMatch false_success = unify_node_and_type_(context, &if_expr->false_expr, type, permanent); if (true_success == TYPE_MATCH_SUCCESS && false_success == TYPE_MATCH_SUCCESS) { if (permanent) if_expr->type = type; @@ -907,18 +931,18 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { if (node->kind == Ast_Kind_Alias) { AstAlias* alias = (AstAlias *) node; - return unify_node_and_type_(&alias->alias, type, permanent); + return unify_node_and_type_(context, &alias->alias, type, permanent); } if (node->kind == Ast_Kind_Address_Of) { AstAddressOf *address_of = (AstAddressOf *) node; if (address_of->can_be_removed) { if (!permanent) { - return unify_node_and_type_(&address_of->expr, type, permanent); + return unify_node_and_type_(context, &address_of->expr, type, permanent); } else { *pnode = (AstTyped *) address_of->expr; - return unify_node_and_type_(pnode, type, permanent); + return unify_node_and_type_(context, pnode, type, permanent); } } } @@ -960,7 +984,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { i32 keep = 0; if (type->kind != Type_Kind_Compound) { - if (!types_are_compatible(node_type->Compound.types[0], type)) { + if (!types_are_compatible(context, node_type->Compound.types[0], type)) { return TYPE_MATCH_FAILED; } @@ -968,7 +992,7 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { } else { fori (i, 0, type->Compound.count) { - if (!types_are_compatible(node_type->Compound.types[i], type->Compound.types[i])) { + if (!types_are_compatible(context, node_type->Compound.types[i], type->Compound.types[i])) { return TYPE_MATCH_FAILED; } @@ -990,36 +1014,36 @@ TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent) { // code between them, but I think there enough minor differences that that // might not be possible. -Type* query_expression_type(AstTyped *node) { +Type* query_expression_type(Context *context, AstTyped *node) { if (node == NULL) return NULL; if (node->kind == Ast_Kind_Argument) { - return query_expression_type(((AstArgument *) node)->value); + return query_expression_type(context, ((AstArgument *) node)->value); } if (node->kind == Ast_Kind_If_Expression) { AstIfExpression* if_expr = (AstIfExpression *) node; - return query_expression_type(if_expr->true_expr); + return query_expression_type(context, if_expr->true_expr); } if (node->kind == Ast_Kind_Alias) { AstAlias* alias = (AstAlias *) node; - return query_expression_type(alias->alias); + return query_expression_type(context, alias->alias); } if (node_is_type((AstNode *) node)) { - return &basic_types[Basic_Kind_Type_Index]; + return context->types.basic[Basic_Kind_Type_Index]; } if (node->kind == Ast_Kind_Array_Literal && node->type == NULL) { AstArrayLiteral* al = (AstArrayLiteral *) node; - Type* elem_type = &basic_types[Basic_Kind_Void]; + Type* elem_type = context->types.basic[Basic_Kind_Void]; if (bh_arr_length(al->values) > 0) { - elem_type = query_expression_type(al->values[0]); + elem_type = query_expression_type(context, al->values[0]); } if (elem_type) { - return type_make_array(context.ast_alloc, elem_type, bh_arr_length(al->values)); + return type_make_array(context, elem_type, bh_arr_length(al->values)); } } @@ -1036,7 +1060,7 @@ Type* query_expression_type(AstTyped *node) { return NULL; } - return type_build_implicit_type_of_struct_literal(context.ast_alloc, sl, 1); + return type_build_implicit_type_of_struct_literal(context, sl, 1); } // If polymorphic procedures HAVE to have a type, most likely @@ -1044,33 +1068,33 @@ Type* query_expression_type(AstTyped *node) { // assigned a void type. This is cleared before the procedure // is solidified. if (node->kind == Ast_Kind_Polymorphic_Proc) { - return &basic_types[Basic_Kind_Void]; + return context->types.basic[Basic_Kind_Void]; } if (node->kind == Ast_Kind_Macro) { - return query_expression_type((AstTyped *) ((AstMacro *) node)->body); + return query_expression_type(context, (AstTyped *) ((AstMacro *) node)->body); } if (node->kind == Ast_Kind_Package) { - return type_build_from_ast(context.ast_alloc, node->type_node); + return type_build_from_ast(context, node->type_node); } if (node->type == NULL) - return type_build_from_ast(context.ast_alloc, node->type_node); + return type_build_from_ast(context, node->type_node); if (node->kind == Ast_Kind_NumLit && node->type->kind == Type_Kind_Basic) { if (node->type->Basic.kind == Basic_Kind_Int_Unsized) { b32 big = bh_abs(((AstNumLit *) node)->value.l) >= (1ll << 32); b32 unsign = ((AstNumLit *) node)->was_hex_literal; - if (((AstNumLit *) node)->was_char_literal) return &basic_types[Basic_Kind_U8]; - else if ( big && !unsign) return &basic_types[Basic_Kind_I64]; - else if ( big && unsign) return &basic_types[Basic_Kind_U64]; - else if (!big && !unsign) return &basic_types[Basic_Kind_I32]; - else if (!big && unsign) return &basic_types[Basic_Kind_U32]; + if (((AstNumLit *) node)->was_char_literal) return context->types.basic[Basic_Kind_U8]; + else if ( big && !unsign) return context->types.basic[Basic_Kind_I64]; + else if ( big && unsign) return context->types.basic[Basic_Kind_U64]; + else if (!big && !unsign) return context->types.basic[Basic_Kind_I32]; + else if (!big && unsign) return context->types.basic[Basic_Kind_U32]; } else if (node->type->Basic.kind == Basic_Kind_Float_Unsized) { - return &basic_types[Basic_Kind_F64]; + return context->types.basic[Basic_Kind_F64]; } } @@ -1078,53 +1102,53 @@ Type* query_expression_type(AstTyped *node) { } // See note above about query_expresion_type. -Type* resolve_expression_type(AstTyped* node) { +Type* resolve_expression_type(Context *context, AstTyped* node) { if (node == NULL) return NULL; if (node->kind == Ast_Kind_Compound) { bh_arr_each(AstTyped *, expr, ((AstCompound *) node)->exprs) { - resolve_expression_type(*expr); + resolve_expression_type(context, *expr); } - node->type = type_build_compound_type(context.ast_alloc, (AstCompound *) node); + node->type = type_build_compound_type(context, (AstCompound *) node); return node->type; } if (node->kind == Ast_Kind_Argument) { - node->type = resolve_expression_type(((AstArgument *) node)->value); + node->type = resolve_expression_type(context, ((AstArgument *) node)->value); } if (node->kind == Ast_Kind_If_Expression) { AstIfExpression* if_expr = (AstIfExpression *) node; - Type* ltype = resolve_expression_type(if_expr->true_expr); - unify_node_and_type(&if_expr->false_expr, ltype); + Type* ltype = resolve_expression_type(context, if_expr->true_expr); + unify_node_and_type(context, &if_expr->false_expr, ltype); if_expr->type = ltype; } if (node->kind == Ast_Kind_Alias) { AstAlias* alias = (AstAlias *) node; - alias->type = resolve_expression_type(alias->alias); + alias->type = resolve_expression_type(context, alias->alias); } if (node_is_type((AstNode *) node)) { - return &basic_types[Basic_Kind_Type_Index]; + return context->types.basic[Basic_Kind_Type_Index]; } if (node->kind == Ast_Kind_Array_Literal && node->type == NULL) { AstArrayLiteral* al = (AstArrayLiteral *) node; - Type* elem_type = &basic_types[Basic_Kind_Void]; + Type* elem_type = context->types.basic[Basic_Kind_Void]; if (bh_arr_length(al->values) > 0) { - elem_type = resolve_expression_type(al->values[0]); + elem_type = resolve_expression_type(context, al->values[0]); } if (elem_type) { - node->type = type_make_array(context.ast_alloc, elem_type, bh_arr_length(al->values)); + node->type = type_make_array(context, elem_type, bh_arr_length(al->values)); node->flags |= Ast_Flag_Array_Literal_Typed; if (node->entity == NULL) { - add_entities_for_node(NULL, (AstNode *) node, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) node, NULL, NULL); } } } @@ -1142,9 +1166,9 @@ Type* resolve_expression_type(AstTyped* node) { return NULL; } - sl->type = type_build_implicit_type_of_struct_literal(context.ast_alloc, sl, 0); + sl->type = type_build_implicit_type_of_struct_literal(context, sl, 0); if (sl->type) { - add_entities_for_node(NULL, (AstNode *) sl, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) sl, NULL, NULL); } } @@ -1153,44 +1177,44 @@ Type* resolve_expression_type(AstTyped* node) { // assigned a void type. This is cleared before the procedure // is solidified. if (node->kind == Ast_Kind_Polymorphic_Proc) { - node->type = &basic_types[Basic_Kind_Void]; + node->type = context->types.basic[Basic_Kind_Void]; } if (node->kind == Ast_Kind_Macro) { - return resolve_expression_type((AstTyped *) ((AstMacro *) node)->body); + return resolve_expression_type(context, (AstTyped *) ((AstMacro *) node)->body); } if (node->kind == Ast_Kind_Package) { - node->type_node = builtin_package_id_type; - node->type = type_build_from_ast(context.ast_alloc, node->type_node); + node->type_node = context->builtins.package_id_type; + node->type = type_build_from_ast(context, node->type_node); } if (node->type == NULL) - node->type = type_build_from_ast(context.ast_alloc, node->type_node); + node->type = type_build_from_ast(context, node->type_node); if (node->kind == Ast_Kind_NumLit && node->type->kind == Type_Kind_Basic) { if (node->type->Basic.kind == Basic_Kind_Int_Unsized) { b32 big = bh_abs(((AstNumLit *) node)->value.l) >= (1ll << 32); b32 unsign = ((AstNumLit *) node)->was_hex_literal; - if (((AstNumLit *) node)->was_char_literal) convert_numlit_to_type((AstNumLit *) node, &basic_types[Basic_Kind_U8]); - else if ( big && !unsign) convert_numlit_to_type((AstNumLit *) node, &basic_types[Basic_Kind_I64]); - else if ( big && unsign) convert_numlit_to_type((AstNumLit *) node, &basic_types[Basic_Kind_U64]); - else if (!big && !unsign) convert_numlit_to_type((AstNumLit *) node, &basic_types[Basic_Kind_I32]); - else if (!big && unsign) convert_numlit_to_type((AstNumLit *) node, &basic_types[Basic_Kind_U32]); + if (((AstNumLit *) node)->was_char_literal) convert_numlit_to_type(context, (AstNumLit *) node, context->types.basic[Basic_Kind_U8], 1); + else if ( big && !unsign) convert_numlit_to_type(context, (AstNumLit *) node, context->types.basic[Basic_Kind_I64], 1); + else if ( big && unsign) convert_numlit_to_type(context, (AstNumLit *) node, context->types.basic[Basic_Kind_U64], 1); + else if (!big && !unsign) convert_numlit_to_type(context, (AstNumLit *) node, context->types.basic[Basic_Kind_I32], 1); + else if (!big && unsign) convert_numlit_to_type(context, (AstNumLit *) node, context->types.basic[Basic_Kind_U32], 1); } else if (node->type->Basic.kind == Basic_Kind_Float_Unsized) { - convert_numlit_to_type((AstNumLit *) node, &basic_types[Basic_Kind_F64]); + convert_numlit_to_type(context, (AstNumLit *) node, context->types.basic[Basic_Kind_F64], 1); } } return node->type; } -i64 get_expression_integer_value(AstTyped* node, b32 *is_valid) { +i64 get_expression_integer_value(Context *context, AstTyped* node, b32 *is_valid) { if (!node) return 0; - resolve_expression_type(node); + resolve_expression_type(context, node); if (is_valid) *is_valid = 1; @@ -1203,7 +1227,7 @@ i64 get_expression_integer_value(AstTyped* node, b32 *is_valid) { } if (node->kind == Ast_Kind_Argument) { - return get_expression_integer_value(((AstArgument *) node)->value, is_valid); + return get_expression_integer_value(context, ((AstArgument *) node)->value, is_valid); } if (node->kind == Ast_Kind_Size_Of) { @@ -1215,19 +1239,19 @@ i64 get_expression_integer_value(AstTyped* node, b32 *is_valid) { } if (node->kind == Ast_Kind_Alias) { - return get_expression_integer_value(((AstAlias *) node)->alias, is_valid); + return get_expression_integer_value(context, ((AstAlias *) node)->alias, is_valid); } if (node->kind == Ast_Kind_Enum_Value) { - return get_expression_integer_value(((AstEnumValue *) node)->value, is_valid); + return get_expression_integer_value(context, ((AstEnumValue *) node)->value, is_valid); } if (node->kind == Ast_Kind_Unary_Op && type_is_integer(node->type)) { - return get_expression_integer_value(((AstUnaryOp *) node)->expr, is_valid); + return get_expression_integer_value(context, ((AstUnaryOp *) node)->expr, is_valid); } if (node_is_type((AstNode*) node)) { - Type* type = type_build_from_ast(context.ast_alloc, (AstType *) node); + Type* type = type_build_from_ast(context, (AstType *) node); if (type) return type->id; } @@ -1235,8 +1259,8 @@ i64 get_expression_integer_value(AstTyped* node, b32 *is_valid) { return 0; } -char *get_expression_string_value(AstTyped* node, b32 *out_is_valid) { - resolve_expression_type(node); +char *get_expression_string_value(Context *context, AstTyped* node, b32 *out_is_valid) { + resolve_expression_type(context, node); if (out_is_valid) *out_is_valid = 1; @@ -1246,7 +1270,7 @@ char *get_expression_string_value(AstTyped* node, b32 *out_is_valid) { // CLEANUP: Maybe this should allocate on the heap? // I guess if in all cases the memory is allocated on the heap, // then the caller can free the memory. - char* strdata = bh_alloc_array(global_heap_allocator, char, str->token->length + 1); + char* strdata = bh_alloc_array(context->gp_alloc, char, str->token->length + 1); i32 length = string_process_escape_seqs(strdata, str->token->text, str->token->length); strdata[length] = '\0'; @@ -1254,7 +1278,7 @@ char *get_expression_string_value(AstTyped* node, b32 *out_is_valid) { } if (node->kind == Ast_Kind_Alias) { - return get_expression_string_value(((AstAlias *) node)->alias, out_is_valid); + return get_expression_string_value(context, ((AstAlias *) node)->alias, out_is_valid); } if (out_is_valid) *out_is_valid = 0; @@ -1276,7 +1300,7 @@ static const b32 cast_legality[][12] = { /* TYP */ { 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1,} }; -b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { +b32 cast_is_legal(Context *context, Type* from_, Type* to_, char** err_msg) { Type* from = from_; Type* to = to_; @@ -1292,11 +1316,11 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { if (from_->id == to_->id) return 1; if (to->kind == Type_Kind_Distinct) { - if (types_are_compatible(to->Distinct.base_type, from)) { + if (types_are_compatible(context, to->Distinct.base_type, from)) { return 1; } - if (from->kind == Type_Kind_Distinct && types_are_compatible(to, from->Distinct.base_type)) { + if (from->kind == Type_Kind_Distinct && types_are_compatible(context, to, from->Distinct.base_type)) { return 1; } @@ -1305,11 +1329,11 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { } if (from->kind == Type_Kind_Distinct) { - if (types_are_compatible(from->Distinct.base_type, to)) { + if (types_are_compatible(context, from->Distinct.base_type, to)) { return 1; } - if (to->kind == Type_Kind_Distinct && types_are_compatible(from, to->Distinct.base_type)) { + if (to->kind == Type_Kind_Distinct && types_are_compatible(context, from, to->Distinct.base_type)) { return 1; } @@ -1334,7 +1358,7 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { // CLEANUP: These error messages should be a lot better and actually // provide the types of the things in question. if (to->kind == Type_Kind_Slice && from->kind == Type_Kind_Array) { - if (!types_are_compatible(to->Slice.elem, from->Array.elem)) { + if (!types_are_compatible(context, to->Slice.elem, from->Array.elem)) { *err_msg = "Array to slice cast is not valid here because the types are different."; return 0; } else { @@ -1343,7 +1367,7 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { } if (to->kind == Type_Kind_Slice && from->kind == Type_Kind_DynArray) { - //if (!types_are_compatible(to->Slice.elem, from->DynArray.elem)) { + //if (!types_are_compatible(context, to->Slice.elem, from->DynArray.elem)) { if (type_size_of(to->Slice.elem) != type_size_of(from->DynArray.elem)) { *err_msg = "Dynmaic array to slice cast is not valid here because the types are different sizes."; return 0; @@ -1353,7 +1377,7 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { } if (to->kind == Type_Kind_Slice && from->kind == Type_Kind_VarArgs) { - if (!types_are_compatible(to->Slice.elem, from->VarArgs.elem)) { + if (!types_are_compatible(context, to->Slice.elem, from->VarArgs.elem)) { *err_msg = "Variadic argument to slice cast is not valid here because the types are different."; return 0; } else { @@ -1364,7 +1388,7 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { if (from->kind == Type_Kind_Slice || to->kind == Type_Kind_Slice) { if ((from->kind != Type_Kind_Slice || to->kind != Type_Kind_Slice) || to->Slice.elem->kind != Type_Kind_Pointer || from->Slice.elem->kind != Type_Kind_Pointer - || !types_are_compatible(from->Slice.elem, to->Slice.elem)) { + || !types_are_compatible(context, from->Slice.elem, to->Slice.elem)) { *err_msg = "Cannot only cast between slice types when both are a slice of compatible pointers."; return 0; } else { @@ -1384,7 +1408,7 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { if (from->kind == Type_Kind_Function) { *err_msg = "Can only cast a function to a 'u32'."; - return to == &basic_types[Basic_Kind_U32]; + return to == context->types.basic[Basic_Kind_U32]; } if ( (type_is_simd(to) && !type_is_simd(from)) @@ -1438,11 +1462,11 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { if (fromidx != -1 && toidx != -1) { if (!cast_legality[fromidx][toidx]) { - *err_msg = bh_aprintf(global_heap_allocator, "Cast from '%s' to '%s' is not allowed.", type_get_name(from_), type_get_name(to_)); + *err_msg = bh_aprintf(context->gp_alloc, "Cast from '%s' to '%s' is not allowed.", type_get_name(context, from_), type_get_name(context, to_)); return 0; } } else { - *err_msg = bh_aprintf(global_heap_allocator, "Cast from '%s' to '%s' is not allowed.", type_get_name(from_), type_get_name(to_)); + *err_msg = bh_aprintf(context->gp_alloc, "Cast from '%s' to '%s' is not allowed.", type_get_name(context, from_), type_get_name(context, to_)); return 0; } @@ -1452,18 +1476,20 @@ b32 cast_is_legal(Type* from_, Type* to_, char** err_msg) { -TypeMatch implicit_cast_to_bool(AstTyped **pnode) { +TypeMatch implicit_cast_to_bool(Context *context, AstTyped **pnode) { AstTyped *node = *pnode; + if (!node->type) return TYPE_MATCH_YIELD; + if ((node->type->kind == Type_Kind_Basic && node->type->Basic.kind == Basic_Kind_Rawptr) || (node->type->kind == Type_Kind_Pointer) || (node->type->kind == Type_Kind_MultiPointer)) { - AstNumLit *zero = make_int_literal(context.ast_alloc, 0); - zero->type = &basic_types[Basic_Kind_Rawptr]; + AstNumLit *zero = make_int_literal(context, 0); + zero->type = context->types.basic[Basic_Kind_Rawptr]; - AstBinaryOp* cmp = make_binary_op(context.ast_alloc, Binary_Op_Not_Equal, node, (AstTyped *) zero); + AstBinaryOp* cmp = make_binary_op(context, Binary_Op_Not_Equal, node, (AstTyped *) zero); cmp->token = node->token; - cmp->type = &basic_types[Basic_Kind_Bool]; + cmp->type = context->types.basic[Basic_Kind_Bool]; *pnode = (AstTyped *) cmp; return TYPE_MATCH_SUCCESS; @@ -1473,71 +1499,130 @@ TypeMatch implicit_cast_to_bool(AstTyped **pnode) { node->type->kind == Type_Kind_DynArray || node->type->kind == Type_Kind_VarArgs) { StructMember smem; - assert(type_lookup_member(node->type, "count", &smem)); + assert(type_lookup_member(context, node->type, "count", &smem)); // These fields are filled out here in order to prevent // going through the type checker one more time. - AstFieldAccess *field = make_field_access(context.ast_alloc, node, "count"); + AstFieldAccess *field = make_field_access(context, node, "count"); field->offset = smem.offset; field->idx = smem.idx; field->type = smem.type; field->flags |= Ast_Flag_Has_Been_Checked; - AstNumLit *zero = make_int_literal(context.ast_alloc, 0); + AstNumLit *zero = make_int_literal(context, 0); zero->type = smem.type; - AstBinaryOp* cmp = make_binary_op(context.ast_alloc, Binary_Op_Not_Equal, (AstTyped *) field, (AstTyped *) zero); - cmp->type = &basic_types[Basic_Kind_Bool]; + AstBinaryOp* cmp = make_binary_op(context, Binary_Op_Not_Equal, (AstTyped *) field, (AstTyped *) zero); + cmp->type = context->types.basic[Basic_Kind_Bool]; *pnode = (AstTyped *) cmp; return TYPE_MATCH_SUCCESS; } - if (context.caches.implicit_cast_to_bool_cache.entries == NULL) { - bh_imap_init(&context.caches.implicit_cast_to_bool_cache, global_heap_allocator, 8); + if (context->caches.implicit_cast_to_bool_cache.entries == NULL) { + bh_imap_init(&context->caches.implicit_cast_to_bool_cache, context->gp_alloc, 8); } - if (!bh_imap_has(&context.caches.implicit_cast_to_bool_cache, (u64) node)) { - AstArgument *implicit_arg = make_argument(context.ast_alloc, node); + if (!bh_imap_has(&context->caches.implicit_cast_to_bool_cache, (u64) node)) { + AstArgument *implicit_arg = make_argument(context, node); - Arguments *args = bh_alloc_item(context.ast_alloc, Arguments); - bh_arr_new(context.ast_alloc, args->values, 1); + Arguments *args = bh_alloc_item(context->ast_alloc, Arguments); + bh_arr_new(context->ast_alloc, args->values, 1); bh_arr_push(args->values, (AstTyped *) implicit_arg); - bh_imap_put(&context.caches.implicit_cast_to_bool_cache, (u64) node, (u64) args); + bh_imap_put(&context->caches.implicit_cast_to_bool_cache, (u64) node, (u64) args); } - Arguments *args = (Arguments *) bh_imap_get(&context.caches.implicit_cast_to_bool_cache, (u64) node); - AstFunction *overload = (AstFunction *) find_matching_overload_by_arguments(builtin_implicit_bool_cast->overloads, args); + Arguments *args = (Arguments *) bh_imap_get(&context->caches.implicit_cast_to_bool_cache, (u64) node); + AstFunction *overload = (AstFunction *) find_matching_overload_by_arguments(context, context->builtins.implicit_bool_cast->overloads, args); if (overload == NULL) return TYPE_MATCH_FAILED; - if (overload == (AstFunction *) &node_that_signals_a_yield) return TYPE_MATCH_YIELD; + if (overload == (AstFunction *) &context->node_that_signals_a_yield) return TYPE_MATCH_YIELD; - AstCall *implicit_call = onyx_ast_node_new(context.ast_alloc, sizeof(AstCall), Ast_Kind_Call); + AstCall *implicit_call = onyx_ast_node_new(context->ast_alloc, sizeof(AstCall), Ast_Kind_Call); implicit_call->token = node->token; implicit_call->callee = (AstTyped *) overload; implicit_call->va_kind = VA_Kind_Not_VA; implicit_call->args.values = args->values; *(AstCall **) pnode = implicit_call; - bh_imap_delete(&context.caches.implicit_cast_to_bool_cache, (u64) node); + bh_imap_delete(&context->caches.implicit_cast_to_bool_cache, (u64) node); return TYPE_MATCH_YIELD; } -char* get_function_name(AstFunction* func) { - if (func->kind != Ast_Kind_Function) return ""; +static char *sanitize_name(bh_allocator a, char *name) { + if (!name) return name; + + char *sanitized = bh_strdup(a, name); + char *c = sanitized; + while (*c) { + if (!char_is_alphanum(*c)) { + *c = '_'; + } + c++; + } + return sanitized; +} + +char* get_function_name(Context *context, AstFunction* func) { + if (func->kind != Ast_Kind_Function) return "unnamed_proc"; if (func->name != NULL) return func->name; if (func->exported_name != NULL) { - return bh_aprintf(global_scratch_allocator, - "EXPORTED:%b", + return bh_aprintf(context->scratch_alloc, + "%b", func->exported_name->text, func->exported_name->length); } - return ""; + return "unnamed_proc"; +} + +char* get_function_assembly_name(Context *context, AstFunction* func) { + if (func->kind == Ast_Kind_Function) { + if (func->assembly_name != NULL) return func->assembly_name; + + if (func->exported_name != NULL) { + return bh_aprintf(context->scratch_alloc, + "%b", + func->exported_name->text, + func->exported_name->length); + } + } + + if (func->token) { + return bh_aprintf(context->ast_alloc, + "unnamed_at_%s_%d", + sanitize_name(context->scratch_alloc, (char *) func->token->pos.filename), + func->token->pos.line); + } + + return "unnamed"; +} + +char* generate_name_within_scope(Context *context, Scope *scope, OnyxToken* symbol) { + char name[512]; + memset(name, 0, 512); + + bh_arr(char *) names=NULL; + bh_arr_new(context->gp_alloc, names, 4); + + while (scope != NULL) { + bh_arr_push(names, scope->name); + scope = scope->parent; + } + + bh_arr_each(char *, n, names) { + if (*n == NULL) continue; + + strncat(name, *n, 511); + strncat(name, ".", 511); + } + bh_arr_free(names); + + return bh_aprintf(context->gp_alloc, "%s%b", name, symbol->text, symbol->length); } AstNode* strip_aliases(AstNode* n) { @@ -1548,64 +1633,63 @@ AstNode* strip_aliases(AstNode* n) { return n; } -AstNumLit* make_bool_literal(bh_allocator a, b32 b) { - AstNumLit* bl = onyx_ast_node_new(a, sizeof(AstNumLit), Ast_Kind_NumLit); +AstNumLit* make_bool_literal(Context *context, b32 b) { + AstNumLit* bl = onyx_ast_node_new(context->ast_alloc, sizeof(AstNumLit), Ast_Kind_NumLit); bl->flags |= Ast_Flag_Comptime; - bl->type_node = (AstType *) &basic_type_bool; - bl->type = &basic_types[Basic_Kind_Bool]; + bl->type_node = (AstType *) &context->basic_types.type_bool; + bl->type = context->types.basic[Basic_Kind_Bool]; bl->value.i = b ? 1 : 0; return bl; } -AstNumLit* make_int_literal(bh_allocator a, i64 i) { - AstNumLit* num = onyx_ast_node_new(a, sizeof(AstNumLit), Ast_Kind_NumLit); +AstNumLit* make_int_literal(Context *context, i64 i) { + AstNumLit* num = onyx_ast_node_new(context->ast_alloc, sizeof(AstNumLit), Ast_Kind_NumLit); num->flags |= Ast_Flag_Comptime; if (bh_abs(i) >= (1ll << 32)) - num->type_node = (AstType *) &basic_type_i64; + num->type_node = (AstType *) &context->basic_types.type_i64; else - num->type_node = (AstType *) &basic_type_i32; + num->type_node = (AstType *) &context->basic_types.type_i32; num->value.l = i; return num; } -AstNumLit* make_float_literal(bh_allocator a, f64 d) { +AstNumLit* make_float_literal(Context *context, f64 d) { // NOTE: Use convert_numlit_to_type to make this a concrete float - AstNumLit* num = onyx_ast_node_new(a, sizeof(AstNumLit), Ast_Kind_NumLit); + AstNumLit* num = onyx_ast_node_new(context->ast_alloc, sizeof(AstNumLit), Ast_Kind_NumLit); num->flags |= Ast_Flag_Comptime; - num->type_node = (AstType *) &basic_type_float_unsized; + num->type_node = (AstType *) &context->basic_types.type_float_unsized; num->value.d = d; return num; } -AstRangeLiteral* make_range_literal(bh_allocator a, AstTyped* low, AstTyped* high) { - AstRangeLiteral* rl = onyx_ast_node_new(a, sizeof(AstRangeLiteral), Ast_Kind_Range_Literal); - rl->type = builtin_range_type_type; +AstRangeLiteral* make_range_literal(Context *context, AstTyped* low, AstTyped* high) { + AstRangeLiteral* rl = onyx_ast_node_new(context->ast_alloc, sizeof(AstRangeLiteral), Ast_Kind_Range_Literal); rl->low = low; rl->high = high; return rl; } -AstStrLit* make_string_literal(bh_allocator a, OnyxToken *token) { - AstStrLit *str = onyx_ast_node_new(a, sizeof(AstStrLit), Ast_Kind_StrLit); +AstStrLit* make_string_literal(Context *context, OnyxToken *token) { + AstStrLit *str = onyx_ast_node_new(context->ast_alloc, sizeof(AstStrLit), Ast_Kind_StrLit); str->flags |= Ast_Flag_Comptime; - str->type_node = builtin_string_type; + str->type_node = context->builtins.string_type; str->token = token; return str; } -AstBinaryOp* make_binary_op(bh_allocator a, BinaryOp operation, AstTyped* left, AstTyped* right) { - AstBinaryOp* binop_node = onyx_ast_node_new(a, sizeof(AstBinaryOp), Ast_Kind_Binary_Op); +AstBinaryOp* make_binary_op(Context *context, BinaryOp operation, AstTyped* left, AstTyped* right) { + AstBinaryOp* binop_node = onyx_ast_node_new(context->ast_alloc, sizeof(AstBinaryOp), Ast_Kind_Binary_Op); binop_node->left = left; binop_node->right = right; binop_node->operation = operation; return binop_node; } -AstArgument* make_argument(bh_allocator a, AstTyped* value) { - AstArgument* arg = onyx_ast_node_new(a, sizeof(AstArgument), Ast_Kind_Argument); +AstArgument* make_argument(Context *context, AstTyped* value) { + AstArgument* arg = onyx_ast_node_new(context->ast_alloc, sizeof(AstArgument), Ast_Kind_Argument); if (value->token) arg->token = value->token; arg->value = value; arg->type = value->type; @@ -1614,8 +1698,8 @@ AstArgument* make_argument(bh_allocator a, AstTyped* value) { return arg; } -AstFieldAccess* make_field_access(bh_allocator a, AstTyped* node, char* field) { - AstFieldAccess* fa = onyx_ast_node_new(a, sizeof(AstFieldAccess), Ast_Kind_Field_Access); +AstFieldAccess* make_field_access(Context *context, AstTyped* node, char* field) { + AstFieldAccess* fa = onyx_ast_node_new(context->ast_alloc, sizeof(AstFieldAccess), Ast_Kind_Field_Access); if (node->token) fa->token = node->token; fa->field = field; fa->expr = node; @@ -1623,38 +1707,38 @@ AstFieldAccess* make_field_access(bh_allocator a, AstTyped* node, char* field) { return fa; } -AstAddressOf* make_address_of(bh_allocator a, AstTyped* node) { - AstAddressOf* ao = onyx_ast_node_new(a, sizeof(AstAddressOf), Ast_Kind_Address_Of); +AstAddressOf* make_address_of(Context *context, AstTyped* node) { + AstAddressOf* ao = onyx_ast_node_new(context->ast_alloc, sizeof(AstAddressOf), Ast_Kind_Address_Of); if (node->token) ao->token = node->token; ao->expr = node; return ao; } -AstLocal* make_local(bh_allocator a, OnyxToken* token, AstType* type_node) { - AstLocal* local = onyx_ast_node_new(a, sizeof(AstLocal), Ast_Kind_Local); +AstLocal* make_local(Context *context, OnyxToken* token, AstType* type_node) { + AstLocal* local = onyx_ast_node_new(context->ast_alloc, sizeof(AstLocal), Ast_Kind_Local); local->token = token; local->type_node = type_node; return local; } -AstLocal* make_local_with_type(bh_allocator a, OnyxToken* token, Type* type) { - AstLocal* local = onyx_ast_node_new(a, sizeof(AstLocal), Ast_Kind_Local); +AstLocal* make_local_with_type(Context *context, OnyxToken* token, Type* type) { + AstLocal* local = onyx_ast_node_new(context->ast_alloc, sizeof(AstLocal), Ast_Kind_Local); local->token = token; local->type = type; return local; } -AstNode* make_symbol(bh_allocator a, OnyxToken* sym) { - AstNode* symbol = onyx_ast_node_new(a, sizeof(AstTyped), Ast_Kind_Symbol); +AstNode* make_symbol(Context *context, OnyxToken* sym) { + AstNode* symbol = onyx_ast_node_new(context->ast_alloc, sizeof(AstTyped), Ast_Kind_Symbol); symbol->token = sym; return symbol; } -AstUnaryOp* make_cast(bh_allocator a, AstTyped* expr, Type* to) { - AstUnaryOp* cast = onyx_ast_node_new(a, sizeof(AstUnaryOp), Ast_Kind_Unary_Op); +AstUnaryOp* make_cast(Context *context, AstTyped* expr, Type* to) { + AstUnaryOp* cast = onyx_ast_node_new(context->ast_alloc, sizeof(AstUnaryOp), Ast_Kind_Unary_Op); cast->token = expr->token; cast->operation = Unary_Op_Cast; cast->expr = expr; @@ -1662,52 +1746,50 @@ AstUnaryOp* make_cast(bh_allocator a, AstTyped* expr, Type* to) { return cast; } -AstZeroValue* make_zero_value(bh_allocator a, OnyxToken* token, Type* type) { - AstZeroValue* zero_value = onyx_ast_node_new(a, sizeof(AstZeroValue), Ast_Kind_Zero_Value); +AstZeroValue* make_zero_value(Context *context, OnyxToken* token, Type* type) { + AstZeroValue* zero_value = onyx_ast_node_new(context->ast_alloc, sizeof(AstZeroValue), Ast_Kind_Zero_Value); zero_value->token = token; zero_value->flags |= Ast_Flag_Comptime; zero_value->type = type; return zero_value; } -AstStructLiteral* make_optional_literal_some(bh_allocator a, AstTyped *expr, Type *opt_type) { - AstStructLiteral *opt_lit = onyx_ast_node_new(a, sizeof(AstStructLiteral), Ast_Kind_Struct_Literal); +AstStructLiteral* make_optional_literal_some(Context *context, AstTyped *expr, Type *opt_type) { + AstStructLiteral *opt_lit = onyx_ast_node_new(context->ast_alloc, sizeof(AstStructLiteral), Ast_Kind_Struct_Literal); opt_lit->token = expr->token; - arguments_initialize(&opt_lit->args); - arguments_ensure_length(&opt_lit->args, 2); - opt_lit->args.values[0] = (AstTyped *) make_int_literal(a, 1); // 1 is Some - opt_lit->args.values[1] = expr; + bh_arr_new(context->ast_alloc, opt_lit->values_to_initialize, 2); + bh_arr_push(opt_lit->values_to_initialize, ((ValueWithOffset) { (AstTyped *) make_int_literal(context, 1), 0 })); // 1 is Some + bh_arr_push(opt_lit->values_to_initialize, ((ValueWithOffset) { expr, opt_type->Union.alignment })); opt_lit->type = opt_type; - opt_lit->args.values[0]->type = opt_type->Union.tag_type; + opt_lit->values_to_initialize[0].value->type = opt_type->Union.tag_type; opt_lit->flags |= Ast_Flag_Has_Been_Checked; return opt_lit; } -AstStructLiteral* make_union_variant_of_void(bh_allocator a, Type* union_type, OnyxToken* token, UnionVariant* variant) { - AstStructLiteral *lit = onyx_ast_node_new(a, sizeof(AstStructLiteral), Ast_Kind_Struct_Literal); +AstStructLiteral* make_union_variant_of_void(Context *context, Type* union_type, OnyxToken* token, UnionVariant* variant) { + AstStructLiteral *lit = onyx_ast_node_new(context->ast_alloc, sizeof(AstStructLiteral), Ast_Kind_Struct_Literal); lit->token = token; - assert(variant->type == &basic_types[Basic_Kind_Void]); + assert(variant->type == context->types.basic[Basic_Kind_Void]); - arguments_initialize(&lit->args); - arguments_ensure_length(&lit->args, 2); - lit->args.values[0] = (AstTyped *) make_int_literal(a, variant->tag_value); - lit->args.values[1] = (AstTyped *) make_zero_value(a, token, variant->type); + bh_arr_new(context->ast_alloc, lit->values_to_initialize, 2); + bh_arr_push(lit->values_to_initialize, ((ValueWithOffset) { (AstTyped *) make_int_literal(context, variant->tag_value), 0 })); + bh_arr_push(lit->values_to_initialize, ((ValueWithOffset) { (AstTyped *) make_zero_value(context, token, variant->type), union_type->Union.alignment })); lit->type = union_type; - lit->args.values[0]->type = union_type->Union.tag_type; + lit->values_to_initialize[0].value->type = union_type->Union.tag_type; lit->flags |= Ast_Flag_Has_Been_Checked; return lit; } -void arguments_initialize(Arguments* args) { - if (args->values == NULL) bh_arr_new(global_heap_allocator, args->values, 2); - if (args->named_values == NULL) bh_arr_new(global_heap_allocator, args->named_values, 2); +void arguments_initialize(Context *context, Arguments* args) { + if (args->values == NULL) bh_arr_new(context->gp_alloc, args->values, 2); + if (args->named_values == NULL) bh_arr_new(context->gp_alloc, args->named_values, 2); // CLEANUP: I'm not sure if I need to initialize these to NULL values, but it doesn't hurt. fori (i, 0, 2) { @@ -1718,7 +1800,7 @@ void arguments_initialize(Arguments* args) { args->used_argument_count = -1; } -void arguments_ensure_length(Arguments* args, u32 count) { +void arguments_ensure_length(Context *context, Arguments* args, u32 count) { // Make the array big enough bh_arr_grow(args->values, count); @@ -1729,7 +1811,7 @@ void arguments_ensure_length(Arguments* args, u32 count) { bh_arr_set_length(args->values, bh_max(count, (u32) bh_arr_length(args->values))); } -void arguments_copy(Arguments* dest, Arguments* src) { +void arguments_copy(Context *context, Arguments* dest, Arguments* src) { dest->used_argument_count = -1; dest->named_values = src->named_values; @@ -1740,25 +1822,25 @@ void arguments_copy(Arguments* dest, Arguments* src) { } // In clone, the named_values are not copied. This is used in find_matching_overload_by_arguments since it doesn't need them to be copied. -void arguments_clone(Arguments* dest, Arguments* src) { +void arguments_clone(Context *context, Arguments* dest, Arguments* src) { dest->used_argument_count = -1; dest->named_values = src->named_values; - dest->values = bh_arr_copy(global_heap_allocator, src->values); + dest->values = bh_arr_copy(context->gp_alloc, src->values); } -void arguments_deep_clone(bh_allocator a, Arguments* dest, Arguments* src) { +void arguments_deep_clone(Context *context, Arguments* dest, Arguments* src) { dest->used_argument_count = -1; dest->values = NULL; dest->named_values = NULL; - bh_arr_new(global_heap_allocator, dest->named_values, bh_arr_length(src->named_values)); - bh_arr_new(global_heap_allocator, dest->values, bh_arr_length(src->values)); + bh_arr_new(context->gp_alloc, dest->named_values, bh_arr_length(src->named_values)); + bh_arr_new(context->gp_alloc, dest->values, bh_arr_length(src->values)); bh_arr_each(AstNamedValue *, nv, src->named_values) - bh_arr_push(dest->named_values, (AstNamedValue *) ast_clone(a, *nv)); + bh_arr_push(dest->named_values, (AstNamedValue *) ast_clone(context, *nv)); bh_arr_each(AstTyped *, val, src->values) - bh_arr_push(dest->values, (AstTyped *) ast_clone(a, (AstNode *) *val)); + bh_arr_push(dest->values, (AstTyped *) ast_clone(context, (AstNode *) *val)); } void arguments_remove_baked(Arguments* args) { @@ -1794,9 +1876,9 @@ void arguments_clear_baked_flags(Arguments* args) { } // GROSS: Using void* to avoid having to cast everything. -const char* node_get_type_name(void* node) { +const char* node_get_type_name(Context *context, void* node) { if (((AstNode *) node)->kind == Ast_Kind_Argument) { - return node_get_type_name(((AstArgument *) node)->value); + return node_get_type_name(context, ((AstArgument *) node)->value); } if (((AstNode *) node)->kind == Ast_Kind_Polymorphic_Proc) { @@ -1808,28 +1890,28 @@ const char* node_get_type_name(void* node) { } if (((AstNode *) node)->kind == Ast_Kind_Alias) { - return node_get_type_name(((AstAlias *) node)->alias); + return node_get_type_name(context, ((AstAlias *) node)->alias); } - return type_get_name(((AstTyped *) node)->type); + return type_get_name(context, ((AstTyped *) node)->type); } -b32 static_if_resolution(AstIf* static_if) { +b32 static_if_resolution(Context *context, AstIf* static_if) { if (static_if->kind != Ast_Kind_Static_If) return 0; // assert(condition_value->kind == Ast_Kind_NumLit); // This should be right, right? - i64 value = get_expression_integer_value(static_if->cond, NULL); + i64 value = get_expression_integer_value(context, static_if->cond, NULL); return value != 0; } -AstPolyCallType* convert_call_to_polycall(AstCall* call) { +AstPolyCallType* convert_call_to_polycall(Context *context, AstCall* call) { // HACK HACK HACK - AstPolyCallType *pct = onyx_ast_node_new(context.ast_alloc, sizeof(AstPolyCallType), Ast_Kind_Poly_Call_Type); + AstPolyCallType *pct = onyx_ast_node_new(context->ast_alloc, sizeof(AstPolyCallType), Ast_Kind_Poly_Call_Type); pct->token = call->token; pct->next = call->next; pct->callee = (AstType *) call->callee; - pct->params = (AstNode **) bh_arr_copy(global_heap_allocator, call->args.values); + pct->params = (AstNode **) bh_arr_copy(context->gp_alloc, call->args.values); bh_arr_each(AstNode *, pp, pct->params) { if ((*pp)->kind == Ast_Kind_Argument) { *pp = (AstNode *) (*(AstArgument **) pp)->value; @@ -1839,10 +1921,36 @@ AstPolyCallType* convert_call_to_polycall(AstCall* call) { return pct; } +void insert_auto_dispose_call(Context *context, AstLocal *local) { + AstAddressOf *aof = make_address_of(context, (AstTyped *) local); + aof->token = local->token; + aof->can_be_removed = 1; + + AstCall *dispose_call = onyx_ast_node_new(context->ast_alloc, sizeof(AstCall), Ast_Kind_Call); + dispose_call->token = local->token; + dispose_call->callee = (AstTyped *) context->builtins.dispose_used_local; + + arguments_initialize(context, &dispose_call->args); + bh_arr_push(dispose_call->args.values, (AstTyped *) make_argument(context, (AstTyped *) aof)); + + AstDefer *defered = onyx_ast_node_new(context->ast_alloc, sizeof(AstDefer), Ast_Kind_Defer); + defered->token = local->token; + defered->stmt = (AstNode *) dispose_call; + + AstNode **insertion_point = &local->next; + if (local->type_node == NULL && (*insertion_point)->kind == Ast_Kind_Binary_Op) { + // Handle `use x := ...`. + insertion_point = &(*insertion_point)->next; + } + + defered->next = *insertion_point; + *insertion_point = (AstNode *) defered; +} + -b32 resolve_intrinsic_interface_constraint(AstConstraint *constraint) { +b32 resolve_intrinsic_interface_constraint(Context *context, AstConstraint *constraint) { AstInterface *interface = constraint->interface; - Type* type = type_build_from_ast(context.ast_alloc, (AstType *) constraint->args[0]); + Type* type = type_build_from_ast(context, (AstType *) constraint->args[0]); if (!type) return 0; if (!strcmp(interface->name, "type_is_bool")) return type_is_bool(type); diff --git a/compiler/src/builtins.c b/compiler/src/builtins.c index 4f2024a3f..9b09684a1 100644 --- a/compiler/src/builtins.c +++ b/compiler/src/builtins.c @@ -18,116 +18,55 @@ static OnyxToken basic_type_f64_token = { Token_Type_Symbol, 3, "f64 " }; static OnyxToken basic_type_rawptr_token = { Token_Type_Symbol, 6, "rawptr " }; static OnyxToken basic_type_type_expr_token = { Token_Type_Symbol, 9, "type_expr " }; -AstBasicType basic_type_void = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_void_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_Void] }; -AstBasicType basic_type_bool = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_bool_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_Bool] }; -AstBasicType basic_type_i8 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_i8_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_I8] }; -AstBasicType basic_type_u8 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_u8_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_U8] }; -AstBasicType basic_type_i16 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_i16_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_I16] }; -AstBasicType basic_type_u16 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_u16_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_U16] }; -AstBasicType basic_type_i32 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_i32_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_I32] }; -AstBasicType basic_type_u32 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_u32_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_U32] }; -AstBasicType basic_type_i64 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_i64_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_I64] }; -AstBasicType basic_type_u64 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_u64_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_U64] }; -AstBasicType basic_type_f32 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_f32_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_F32] }; -AstBasicType basic_type_f64 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_f64_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_F64] }; -AstBasicType basic_type_rawptr = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_rawptr_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_Rawptr] }; -AstBasicType basic_type_type_expr = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_type_expr_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_Type_Index] }; - -// NOTE: Types used for numeric literals -AstBasicType basic_type_int_unsized = { Ast_Kind_Basic_Type, 0, NULL, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_Int_Unsized] }; -AstBasicType basic_type_float_unsized = { Ast_Kind_Basic_Type, 0, NULL, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_Float_Unsized] }; - -static OnyxToken simd_token = { Token_Type_Symbol, 0, "", { 0 } }; -AstBasicType basic_type_i8x16 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_I8X16] }; -AstBasicType basic_type_i16x8 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_I16X8] }; -AstBasicType basic_type_i32x4 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_I32X4] }; -AstBasicType basic_type_i64x2 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_I64X2] }; -AstBasicType basic_type_f32x4 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_F32X4] }; -AstBasicType basic_type_f64x2 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_F64X2] }; -AstBasicType basic_type_v128 = { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, &basic_types[Basic_Kind_V128] }; - -// HACK -// :AutoReturnType -Type type_auto_return = { 0 }; -AstBasicType basic_type_auto_return = { Ast_Kind_Basic_Type, 0, &simd_token, NULL, NULL, 0, NULL, &type_auto_return }; - static OnyxToken builtin_heap_start_token = { Token_Type_Symbol, 12, "__heap_start ", { 0 } }; static OnyxToken builtin_stack_top_token = { Token_Type_Symbol, 11, "__stack_top ", { 0 } }; static OnyxToken builtin_tls_base_token = { Token_Type_Symbol, 10, "__tls_base ", { 0 } }; static OnyxToken builtin_tls_size_token = { Token_Type_Symbol, 10, "__tls_size ", { 0 } }; static OnyxToken builtin_closure_base_token = { Token_Type_Symbol, 14, "__closure_base ", { 0 } }; static OnyxToken builtin_stack_trace_token = { Token_Type_Symbol, 0, " ", { 0 } }; -AstGlobal builtin_heap_start = { Ast_Kind_Global, Ast_Flag_Const, &builtin_heap_start_token, NULL, NULL, (AstType *) &basic_type_rawptr, NULL }; -AstGlobal builtin_stack_top = { Ast_Kind_Global, 0, &builtin_stack_top_token, NULL, NULL, (AstType *) &basic_type_rawptr, NULL }; -AstGlobal builtin_tls_base = { Ast_Kind_Global, 0, &builtin_tls_base_token, NULL, NULL, (AstType *) &basic_type_rawptr, NULL }; -AstGlobal builtin_tls_size = { Ast_Kind_Global, 0, &builtin_tls_size_token, NULL, NULL, (AstType *) &basic_type_u32, NULL }; -AstGlobal builtin_closure_base = { Ast_Kind_Global, 0, &builtin_closure_base_token, NULL, NULL, (AstType *) &basic_type_rawptr, NULL }; -AstGlobal builtin_stack_trace = { Ast_Kind_Global, 0, &builtin_stack_trace_token, NULL, NULL, (AstType *) &basic_type_rawptr, NULL }; - -AstType *builtin_string_type; -AstType *builtin_cstring_type; -AstType *builtin_range_type; -Type *builtin_range_type_type; -AstType *builtin_vararg_type; -Type *builtin_vararg_type_type; -AstTyped *builtin_context_variable; -AstType *builtin_allocator_type; -AstType *builtin_iterator_type; -AstType *builtin_optional_type; -AstType *builtin_callsite_type; -AstType *builtin_any_type; -AstType *builtin_code_type; -AstType *builtin_link_options_type; -AstType *builtin_package_id_type; -AstType *builtin_stack_trace_type; - -AstTyped *type_table_node = NULL; -AstTyped *foreign_blocks_node = NULL; -AstType *foreign_block_type = NULL; -AstTyped *tagged_procedures_node = NULL; -AstTyped *tagged_globals_node = NULL; -AstFunction *builtin_initialize_data_segments = NULL; -AstFunction *builtin_run_init_procedures = NULL; -AstFunction *builtin_closure_block_allocate = NULL; -bh_arr(AstFunction *) init_procedures = NULL; -AstOverloadedFunction *builtin_implicit_bool_cast; + +static OnyxToken simd_token = { Token_Type_Symbol, 0, "", { 0 } }; + const BuiltinSymbol builtin_symbols[] = { - { NULL, "void", (AstNode *) &basic_type_void }, - { NULL, "bool", (AstNode *) &basic_type_bool }, - { NULL, "i8", (AstNode *) &basic_type_i8 }, - { NULL, "u8", (AstNode *) &basic_type_u8 }, - { NULL, "i16", (AstNode *) &basic_type_i16 }, - { NULL, "u16", (AstNode *) &basic_type_u16 }, - { NULL, "i32", (AstNode *) &basic_type_i32 }, - { NULL, "u32", (AstNode *) &basic_type_u32 }, - { NULL, "i64", (AstNode *) &basic_type_i64 }, - { NULL, "u64", (AstNode *) &basic_type_u64 }, - { NULL, "f32", (AstNode *) &basic_type_f32 }, - { NULL, "f64", (AstNode *) &basic_type_f64 }, - { NULL, "rawptr", (AstNode *) &basic_type_rawptr }, - { NULL, "type_expr", (AstNode *) &basic_type_type_expr }, - - { "simd", "i8x16", (AstNode *) &basic_type_i8x16 }, - { "simd", "i16x8", (AstNode *) &basic_type_i16x8 }, - { "simd", "i32x4", (AstNode *) &basic_type_i32x4 }, - { "simd", "i64x2", (AstNode *) &basic_type_i64x2 }, - { "simd", "f32x4", (AstNode *) &basic_type_f32x4 }, - { "simd", "f64x2", (AstNode *) &basic_type_f64x2 }, - { "simd", "v128", (AstNode *) &basic_type_v128 }, - - { "builtin", "__heap_start", (AstNode *) &builtin_heap_start }, - { "builtin", "__stack_top", (AstNode *) &builtin_stack_top }, - { "builtin", "__tls_base", (AstNode *) &builtin_tls_base }, - { "builtin", "__tls_size", (AstNode *) &builtin_tls_size }, - { "builtin", "__closure_base", (AstNode *) &builtin_closure_base }, - - { NULL, NULL, NULL }, + #define OFFSET(member) (isize) &((Context *) 0)->basic_types.member + { NULL, "void", OFFSET(type_void) }, + { NULL, "bool", OFFSET(type_bool) }, + { NULL, "i8", OFFSET(type_i8) }, + { NULL, "u8", OFFSET(type_u8) }, + { NULL, "i16", OFFSET(type_i16) }, + { NULL, "u16", OFFSET(type_u16) }, + { NULL, "i32", OFFSET(type_i32) }, + { NULL, "u32", OFFSET(type_u32) }, + { NULL, "i64", OFFSET(type_i64) }, + { NULL, "u64", OFFSET(type_u64) }, + { NULL, "f32", OFFSET(type_f32) }, + { NULL, "f64", OFFSET(type_f64) }, + { NULL, "rawptr", OFFSET(type_rawptr) }, + { NULL, "type_expr", OFFSET(type_type_expr) }, + + { "simd", "i8x16", OFFSET(type_i8x16) }, + { "simd", "i16x8", OFFSET(type_i16x8) }, + { "simd", "i32x4", OFFSET(type_i32x4) }, + { "simd", "i64x2", OFFSET(type_i64x2) }, + { "simd", "f32x4", OFFSET(type_f32x4) }, + { "simd", "f64x2", OFFSET(type_f64x2) }, + { "simd", "v128", OFFSET(type_v128 ) }, + #undef OFFSET + + #define OFFSET(member) (isize) &((Context *) 0)->builtins.member + { "builtin", "__heap_start", OFFSET(heap_start) }, + { "builtin", "__stack_top", OFFSET(stack_top) }, + { "builtin", "__tls_base", OFFSET(tls_base) }, + { "builtin", "__tls_size", OFFSET(tls_size) }, + { "builtin", "__closure_base", OFFSET(closure_base) }, + #undef OFFSET + + { NULL, NULL, 0 }, }; -IntrinsicTable intrinsic_table; -static IntrinsicMap builtin_intrinsics[] = { +const IntrinsicMap builtin_intrinsics[] = { { "unreachable", ONYX_INTRINSIC_UNREACHABLE }, { "memory_size", ONYX_INTRINSIC_MEMORY_SIZE }, @@ -395,245 +334,271 @@ static IntrinsicMap builtin_intrinsics[] = { { NULL, ONYX_INTRINSIC_UNDEFINED }, }; -bh_arr(OverloadOption) operator_overloads[Binary_Op_Count] = { 0 }; -bh_arr(OverloadOption) unary_operator_overloads[Unary_Op_Count] = { 0 }; - -void prepare_builtins() { - builtin_string_type = NULL; - builtin_cstring_type = NULL; - builtin_range_type = NULL; - builtin_range_type_type = NULL; - builtin_vararg_type = NULL; - builtin_vararg_type_type = NULL; - builtin_context_variable = NULL; - builtin_allocator_type = NULL; - builtin_iterator_type = NULL; - builtin_optional_type = NULL; - builtin_callsite_type = NULL; - builtin_any_type = NULL; - builtin_code_type = NULL; - builtin_link_options_type = NULL; - builtin_package_id_type = NULL; - - type_table_node = NULL; - foreign_blocks_node = NULL; - foreign_block_type = NULL; - tagged_procedures_node = NULL; - tagged_globals_node = NULL; - builtin_initialize_data_segments = NULL; - builtin_run_init_procedures = NULL; - init_procedures = NULL; - builtin_implicit_bool_cast = NULL; - - basic_type_void.scope = NULL; - basic_type_bool.scope = NULL; - basic_type_i8.scope = NULL; - basic_type_u8.scope = NULL; - basic_type_i16.scope = NULL; - basic_type_u16.scope = NULL; - basic_type_i32.scope = NULL; - basic_type_u32.scope = NULL; - basic_type_i64.scope = NULL; - basic_type_u64.scope = NULL; - basic_type_f32.scope = NULL; - basic_type_f64.scope = NULL; - basic_type_rawptr.scope = NULL; - basic_type_type_expr.scope = NULL; +void prepare_builtins(Context *context) { + // `types_init()` needs to be called first so the pointers in context->types.basic are valid + assert(context->types.basic[Basic_Kind_Void]); + + context->basic_types.type_void = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_void_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_Void] }); + context->basic_types.type_bool = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_bool_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_Bool] }); + context->basic_types.type_i8 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_i8_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_I8] }); + context->basic_types.type_u8 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_u8_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_U8] }); + context->basic_types.type_i16 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_i16_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_I16] }); + context->basic_types.type_u16 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_u16_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_U16] }); + context->basic_types.type_i32 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_i32_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_I32] }); + context->basic_types.type_u32 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_u32_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_U32] }); + context->basic_types.type_i64 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_i64_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_I64] }); + context->basic_types.type_u64 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_u64_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_U64] }); + context->basic_types.type_f32 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_f32_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_F32] }); + context->basic_types.type_f64 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_f64_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_F64] }); + context->basic_types.type_rawptr = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_rawptr_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_Rawptr] }); + context->basic_types.type_type_expr = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &basic_type_type_expr_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_Type_Index] }); + + // NOTE: Types used for numeric literals + context->basic_types.type_int_unsized = ((AstBasicType) { Ast_Kind_Basic_Type, 0, NULL, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_Int_Unsized] }); + context->basic_types.type_float_unsized = ((AstBasicType) { Ast_Kind_Basic_Type, 0, NULL, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_Float_Unsized] }); + + context->basic_types.type_i8x16 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_I8X16] }); + context->basic_types.type_i16x8 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_I16X8] }); + context->basic_types.type_i32x4 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_I32X4] }); + context->basic_types.type_i64x2 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_I64X2] }); + context->basic_types.type_f32x4 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_F32X4] }); + context->basic_types.type_f64x2 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_F64X2] }); + context->basic_types.type_v128 = ((AstBasicType) { Ast_Kind_Basic_Type, Ast_Flag_Comptime, &simd_token, NULL, NULL, 0, NULL, context->types.basic[Basic_Kind_V128] }); + + // HACK + // :AutoReturnType + context->types.auto_return = bh_alloc_item(context->ast_alloc, Type); + context->basic_types.type_auto_return = ((AstBasicType) { Ast_Kind_Basic_Type, 0, &simd_token, NULL, NULL, 0, NULL, context->types.auto_return }); + + // Builtins + context->builtins.heap_start = ((AstGlobal) { Ast_Kind_Global, Ast_Flag_Const, &builtin_heap_start_token, NULL, NULL, (AstType *) &context->basic_types.type_rawptr, NULL }); + context->builtins.stack_top = ((AstGlobal) { Ast_Kind_Global, 0, &builtin_stack_top_token, NULL, NULL, (AstType *) &context->basic_types.type_rawptr, NULL }); + context->builtins.tls_base = ((AstGlobal) { Ast_Kind_Global, 0, &builtin_tls_base_token, NULL, NULL, (AstType *) &context->basic_types.type_rawptr, NULL }); + context->builtins.tls_size = ((AstGlobal) { Ast_Kind_Global, 0, &builtin_tls_size_token, NULL, NULL, (AstType *) &context->basic_types.type_u32, NULL }); + context->builtins.closure_base = ((AstGlobal) { Ast_Kind_Global, 0, &builtin_closure_base_token, NULL, NULL, (AstType *) &context->basic_types.type_rawptr, NULL }); + context->builtins.stack_trace = ((AstGlobal) { Ast_Kind_Global, 0, &builtin_stack_trace_token, NULL, NULL, (AstType *) &context->basic_types.type_rawptr, NULL }); + + context->node_that_signals_a_yield.kind = Ast_Kind_Function; } -void initialize_builtins(bh_allocator a) { +void initialize_builtins(Context *context) { + bh_allocator a = context->gp_alloc; + BuiltinSymbol* bsym = (BuiltinSymbol *) &builtin_symbols[0]; while (bsym->sym != NULL) { - if (bsym->package == NULL) - symbol_builtin_introduce(context.global_scope, bsym->sym, bsym->node); - else { - Package* p = package_lookup_or_create(bsym->package, context.global_scope, a, context.global_scope->created_at); + AstNode *node = (AstNode *) bh_pointer_add(context, bsym->offset); + + if (bsym->package == NULL) { + symbol_builtin_introduce(context, context->global_scope, bsym->sym, node); + + } else { + Package* p = package_lookup_or_create(context, bsym->package, context->global_scope, context->global_scope->created_at); assert(p); - symbol_builtin_introduce(p->scope, bsym->sym, bsym->node); + symbol_builtin_introduce(context, p->scope, bsym->sym, node); } + bsym++; } - Package* p = package_lookup_or_create("builtin", context.global_scope, a, context.global_scope->created_at); + Package* p = package_lookup_or_create(context, "builtin", context->global_scope, context->global_scope->created_at); - builtin_string_type = (AstType *) symbol_raw_resolve(p->scope, "str"); - if (builtin_string_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'str' struct not found in builtin package."); + context->builtins.string_type = (AstType *) symbol_raw_resolve(context, p->scope, "str"); + if (context->builtins.string_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'str' struct not found in builtin package."); return; } - builtin_cstring_type = (AstType *) symbol_raw_resolve(p->scope, "cstr"); - if (builtin_cstring_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'cstr' type not found in builtin package."); + context->builtins.cstring_type = (AstType *) symbol_raw_resolve(context, p->scope, "cstr"); + if (context->builtins.cstring_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'cstr' type not found in builtin package."); return; } - builtin_range_type = (AstType *) symbol_raw_resolve(p->scope, "range"); - if (builtin_range_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'range' struct not found in builtin package."); + context->builtins.range_type = (AstType *) symbol_raw_resolve(context, p->scope, "range"); + if (context->builtins.range_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'range' struct not found in builtin package."); return; } - builtin_vararg_type = (AstType *) symbol_raw_resolve(p->scope, "vararg"); - if (builtin_range_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'vararg' struct not found in builtin package."); + context->builtins.range64_type = (AstType *) symbol_raw_resolve(context, p->scope, "range64"); + if (context->builtins.range64_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'range64' struct not found in builtin package."); return; } - builtin_context_variable = (AstTyped *) symbol_raw_resolve(p->scope, "context"); - if (builtin_context_variable == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'context' variable not found in builtin package."); + context->builtins.vararg_type = (AstType *) symbol_raw_resolve(context, p->scope, "vararg"); + if (context->builtins.range_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'vararg' struct not found in builtin package."); return; } - builtin_allocator_type = (AstType *) symbol_raw_resolve(p->scope, "Allocator"); - if (builtin_allocator_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'Allocator' struct not found in builtin package."); + context->builtins.context_variable = (AstTyped *) symbol_raw_resolve(context, p->scope, "context"); + if (context->builtins.context_variable == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'context' variable not found in builtin package."); return; } - builtin_iterator_type = (AstType *) symbol_raw_resolve(p->scope, "Iterator"); - if (builtin_iterator_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'Iterator' struct not found in builtin package."); + context->builtins.allocator_type = (AstType *) symbol_raw_resolve(context, p->scope, "Allocator"); + if (context->builtins.allocator_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'Allocator' struct not found in builtin package."); return; } - builtin_optional_type = (AstType *) symbol_raw_resolve(p->scope, "Optional"); - if (builtin_optional_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'Optional' struct not found in builtin package."); + context->builtins.iterator_type = (AstType *) symbol_raw_resolve(context, p->scope, "Iterator"); + if (context->builtins.iterator_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'Iterator' struct not found in builtin package."); return; } - builtin_callsite_type = (AstType *) symbol_raw_resolve(p->scope, "CallSite"); - if (builtin_callsite_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'CallSite' struct not found in builtin package."); + context->builtins.optional_type = (AstType *) symbol_raw_resolve(context, p->scope, "Optional"); + if (context->builtins.optional_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'Optional' struct not found in builtin package."); return; } - builtin_any_type = (AstType *) symbol_raw_resolve(p->scope, "any"); - if (builtin_any_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'any' struct not found in builtin package."); + context->builtins.callsite_type = (AstType *) symbol_raw_resolve(context, p->scope, "CallSite"); + if (context->builtins.callsite_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'CallSite' struct not found in builtin package."); return; } - builtin_code_type = (AstType *) symbol_raw_resolve(p->scope, "Code"); - if (builtin_code_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'Code' struct not found in builtin package."); + context->builtins.any_type = (AstType *) symbol_raw_resolve(context, p->scope, "any"); + if (context->builtins.any_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'any' struct not found in builtin package."); return; } - builtin_initialize_data_segments = (AstFunction *) symbol_raw_resolve(p->scope, "__initialize_data_segments"); - if (builtin_initialize_data_segments == NULL || builtin_initialize_data_segments->kind != Ast_Kind_Function) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'__initialize_data_segments' procedure not found in builtin package."); + context->builtins.code_type = (AstType *) symbol_raw_resolve(context, p->scope, "Code"); + if (context->builtins.code_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'Code' struct not found in builtin package."); return; } - builtin_run_init_procedures = (AstFunction *) symbol_raw_resolve(p->scope, "__run_init_procedures"); - if (builtin_run_init_procedures == NULL || builtin_run_init_procedures->kind != Ast_Kind_Function) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'__run_init_procedures' procedure not found."); + context->builtins.array_type = (AstType *) symbol_raw_resolve(context, p->scope, "Array"); + if (context->builtins.array_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'Array' struct not found in builtin package."); return; } - builtin_implicit_bool_cast = (AstOverloadedFunction *) symbol_raw_resolve(p->scope, "__implicit_bool_cast"); - if (builtin_implicit_bool_cast == NULL || builtin_implicit_bool_cast->kind != Ast_Kind_Overloaded_Function) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'__implicit_bool_cast' #match procedure not found."); + context->builtins.slice_type = (AstType *) symbol_raw_resolve(context, p->scope, "Slice"); + if (context->builtins.slice_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'Slice' struct not found in builtin package."); return; } - builtin_closure_block_allocate = (AstFunction *) symbol_raw_resolve(p->scope, "__closure_block_allocate"); - if (builtin_closure_block_allocate == NULL || builtin_closure_block_allocate->kind != Ast_Kind_Function) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'__closure_block_allocate' procedure not found."); + context->builtins.initialize_data_segments = (AstFunction *) symbol_raw_resolve(context, p->scope, "__initialize_data_segments"); + if (context->builtins.initialize_data_segments == NULL || context->builtins.initialize_data_segments->kind != Ast_Kind_Function) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'__initialize_data_segments' procedure not found in builtin package."); return; } + context->builtins.run_init_procedures = (AstFunction *) symbol_raw_resolve(context, p->scope, "__run_init_procedures"); + if (context->builtins.run_init_procedures == NULL || context->builtins.run_init_procedures->kind != Ast_Kind_Function) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'__run_init_procedures' procedure not found."); + return; + } - builtin_link_options_type = (AstType *) symbol_raw_resolve(p->scope, "Link_Options"); - if (builtin_link_options_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'Link_Options' type not found."); + context->builtins.implicit_bool_cast = (AstOverloadedFunction *) symbol_raw_resolve(context, p->scope, "__implicit_bool_cast"); + if (context->builtins.implicit_bool_cast == NULL || context->builtins.implicit_bool_cast->kind != Ast_Kind_Overloaded_Function) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'__implicit_bool_cast' #match procedure not found."); return; } - builtin_package_id_type = (AstType *) symbol_raw_resolve(p->scope, "package_id"); - if (builtin_package_id_type == NULL) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, "'package_id' type not found."); + context->builtins.dispose_used_local = (AstOverloadedFunction *) symbol_raw_resolve(context, p->scope, "__dispose_used_local"); + if (context->builtins.dispose_used_local == NULL || context->builtins.dispose_used_local->kind != Ast_Kind_Overloaded_Function) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'__dispose_used_local' #match procedure not found."); return; } - init_procedures = NULL; - bh_arr_new(global_heap_allocator, init_procedures, 4); + context->builtins.closure_block_allocate = (AstFunction *) symbol_raw_resolve(context, p->scope, "__closure_block_allocate"); + if (context->builtins.closure_block_allocate == NULL || context->builtins.closure_block_allocate->kind != Ast_Kind_Function) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'__closure_block_allocate' procedure not found."); + return; + } - fori (i, 0, Binary_Op_Count) { - operator_overloads[i] = NULL; - bh_arr_new(global_heap_allocator, operator_overloads[i], 4); + + context->builtins.link_options_type = (AstType *) symbol_raw_resolve(context, p->scope, "Link_Options"); + if (context->builtins.link_options_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'Link_Options' type not found."); + return; } - fori (i, 0, Unary_Op_Count) { - unary_operator_overloads[i] = NULL; - bh_arr_new(global_heap_allocator, unary_operator_overloads[i], 4); + context->builtins.package_id_type = (AstType *) symbol_raw_resolve(context, p->scope, "package_id"); + if (context->builtins.package_id_type == NULL) { + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "'package_id' type not found."); + return; } - IntrinsicMap* intrinsic = &builtin_intrinsics[0]; - intrinsic_table = NULL; - while (intrinsic->name != NULL) { - shput(intrinsic_table, intrinsic->name, intrinsic->intrinsic); - intrinsic++; + context->builtins.init_procedures = NULL; + bh_arr_new(context->gp_alloc, context->builtins.init_procedures, 4); + + fori (i, 0, Binary_Op_Count) { + context->operator_overloads[i] = NULL; + bh_arr_new(context->gp_alloc, context->operator_overloads[i], 4); + } + + fori (i, 0, Unary_Op_Count) { + context->unary_operator_overloads[i] = NULL; + bh_arr_new(context->gp_alloc, context->unary_operator_overloads[i], 4); } } -void initalize_special_globals() { - Package *p = package_lookup("runtime.info"); +void initalize_special_globals(Context *context) { + Package *p = package_lookup(context, "runtime.info"); if (p != NULL) { - type_table_node = (AstTyped *) symbol_raw_resolve(p->scope, "type_table"); - foreign_blocks_node = (AstTyped *) symbol_raw_resolve(p->scope, "foreign_blocks"); - foreign_block_type = (AstType *) symbol_raw_resolve(p->scope, "foreign_block"); - tagged_procedures_node = (AstTyped *) symbol_raw_resolve(p->scope, "tagged_procedures"); - tagged_globals_node = (AstTyped *) symbol_raw_resolve(p->scope, "tagged_globals"); - - if (context.options->stack_trace_enabled) { - builtin_stack_trace_type = (AstType *) symbol_raw_resolve(p->scope, "Stack_Trace"); + context->builtins.type_table_node = (AstTyped *) symbol_raw_resolve(context, p->scope, "type_table"); + context->builtins.foreign_blocks_node = (AstTyped *) symbol_raw_resolve(context, p->scope, "foreign_blocks"); + context->builtins.foreign_block_type = (AstType *) symbol_raw_resolve(context, p->scope, "foreign_block"); + context->builtins.tagged_procedures_node = (AstTyped *) symbol_raw_resolve(context, p->scope, "tagged_procedures"); + context->builtins.tagged_globals_node = (AstTyped *) symbol_raw_resolve(context, p->scope, "tagged_globals"); + + if (context->options->stack_trace_enabled) { + context->builtins.stack_trace_type = (AstType *) symbol_raw_resolve(context, p->scope, "Stack_Trace"); } } } -void introduce_build_options(bh_allocator a) { - Package* p = package_lookup_or_create("runtime", context.global_scope, a, context.global_scope->created_at); +void introduce_build_options(Context *context) { + bh_allocator a = context->ast_alloc; + + Package* p = package_lookup_or_create(context, "runtime", context->global_scope, context->global_scope->created_at); // HACK creating this for later - package_lookup_or_create("runtime.vars", p->scope, a, context.global_scope->created_at); + package_lookup_or_create(context, "runtime.vars", p->scope, context->global_scope->created_at); - AstType* Runtime_Type = (AstType *) symbol_raw_resolve(p->scope, "Runtime"); + AstType* Runtime_Type = (AstType *) symbol_raw_resolve(context, p->scope, "Runtime"); if (Runtime_Type == NULL) { - onyx_report_error((OnyxFilePos) {0}, Error_Critical, "'Runtime' type not found in package runtime."); + ONYX_ERROR((OnyxFilePos) {0}, Error_Critical, "'Runtime' type not found in package runtime."); return; } - AstNumLit* runtime_type = make_int_literal(a, context.options->runtime); + AstNumLit* runtime_type = make_int_literal(context, context->options->runtime); runtime_type->type_node = Runtime_Type; - add_entities_for_node(NULL, (AstNode *) runtime_type, NULL, NULL); - symbol_builtin_introduce(p->scope, "runtime", (AstNode *) runtime_type); + add_entities_for_node(&context->entities, NULL, (AstNode *) runtime_type, NULL, NULL); + symbol_builtin_introduce(context, p->scope, "runtime", (AstNode *) runtime_type); - AstNumLit* multi_threaded = make_int_literal(a, context.options->use_multi_threading); - multi_threaded->type_node = (AstType *) &basic_type_bool; - symbol_builtin_introduce(p->scope, "Multi_Threading_Enabled", (AstNode *) multi_threaded); + AstNumLit* multi_threaded = make_int_literal(context, context->options->use_multi_threading); + multi_threaded->type_node = (AstType *) &context->basic_types.type_bool; + symbol_builtin_introduce(context, p->scope, "Multi_Threading_Enabled", (AstNode *) multi_threaded); - AstNumLit* debug_mode = make_int_literal(a, context.options->debug_info_enabled); - debug_mode->type_node = (AstType *) &basic_type_bool; - symbol_builtin_introduce(p->scope, "Debug_Mode_Enabled", (AstNode *) debug_mode); + AstNumLit* debug_mode = make_int_literal(context, context->options->debug_info_enabled); + debug_mode->type_node = (AstType *) &context->basic_types.type_bool; + symbol_builtin_introduce(context, p->scope, "Debug_Mode_Enabled", (AstNode *) debug_mode); - AstNumLit* stack_trace = make_int_literal(a, context.options->stack_trace_enabled); - stack_trace->type_node = (AstType *) &basic_type_bool; - symbol_builtin_introduce(p->scope, "Stack_Trace_Enabled", (AstNode *) stack_trace); + AstNumLit* stack_trace = make_int_literal(context, context->options->stack_trace_enabled); + stack_trace->type_node = (AstType *) &context->basic_types.type_bool; + symbol_builtin_introduce(context, p->scope, "Stack_Trace_Enabled", (AstNode *) stack_trace); - AstNumLit* version_major = make_int_literal(a, VERSION_MAJOR); - version_major->type_node = (AstType *) &basic_type_i32; - AstNumLit* version_minor = make_int_literal(a, VERSION_MINOR); - version_minor->type_node = (AstType *) &basic_type_i32; - AstNumLit* version_patch = make_int_literal(a, VERSION_PATCH); - version_patch->type_node = (AstType *) &basic_type_i32; - symbol_builtin_introduce(p->scope, "onyx_version_major", (AstNode *) version_major); - symbol_builtin_introduce(p->scope, "onyx_version_minor", (AstNode *) version_minor); - symbol_builtin_introduce(p->scope, "onyx_version_patch", (AstNode *) version_patch); + AstNumLit* version_major = make_int_literal(context, VERSION_MAJOR); + version_major->type_node = (AstType *) &context->basic_types.type_i32; + AstNumLit* version_minor = make_int_literal(context, VERSION_MINOR); + version_minor->type_node = (AstType *) &context->basic_types.type_i32; + AstNumLit* version_patch = make_int_literal(context, VERSION_PATCH); + version_patch->type_node = (AstType *) &context->basic_types.type_i32; + symbol_builtin_introduce(context, p->scope, "onyx_version_major", (AstNode *) version_major); + symbol_builtin_introduce(context, p->scope, "onyx_version_minor", (AstNode *) version_minor); + symbol_builtin_introduce(context, p->scope, "onyx_version_patch", (AstNode *) version_patch); i32 os; @@ -647,16 +612,16 @@ void introduce_build_options(bh_allocator a) { os = 3; #endif - AstType* OS_Type = (AstType *) symbol_raw_resolve(p->scope, "OS"); + AstType* OS_Type = (AstType *) symbol_raw_resolve(context, p->scope, "OS"); if (OS_Type == NULL) { - onyx_report_error((OnyxFilePos) {0}, Error_Critical, "'OS' type not found in package runtime."); + ONYX_ERROR((OnyxFilePos) {0}, Error_Critical, "'OS' type not found in package runtime."); return; } - AstNumLit* os_type = make_int_literal(a, os); + AstNumLit* os_type = make_int_literal(context, os); os_type->type_node = OS_Type; - add_entities_for_node(NULL, (AstNode *) os_type, NULL, NULL); - symbol_builtin_introduce(p->scope, "compiler_os", (AstNode *) os_type); + add_entities_for_node(&context->entities, NULL, (AstNode *) os_type, NULL, NULL); + symbol_builtin_introduce(context, p->scope, "compiler_os", (AstNode *) os_type); i32 arch = 0; #if defined(__x86_64__) || defined(_M_X64) @@ -667,21 +632,21 @@ void introduce_build_options(bh_allocator a) { arch = 3; // AARCH64; #endif - AstType* Arch_Type = (AstType *) symbol_raw_resolve(p->scope, "Arch"); + AstType* Arch_Type = (AstType *) symbol_raw_resolve(context, p->scope, "Arch"); if (Arch_Type == NULL) { - onyx_report_error((OnyxFilePos) {0}, Error_Critical, "'Arch' type not found in package runtime."); + ONYX_ERROR((OnyxFilePos) {0}, Error_Critical, "'Arch' type not found in package runtime."); return; } - AstNumLit* arch_type = make_int_literal(a, arch); + AstNumLit* arch_type = make_int_literal(context, arch); arch_type->type_node = Arch_Type; - add_entities_for_node(NULL, (AstNode *) arch_type, NULL, NULL); - symbol_builtin_introduce(p->scope, "arch", (AstNode *) arch_type); + add_entities_for_node(&context->entities, NULL, (AstNode *) arch_type, NULL, NULL); + symbol_builtin_introduce(context, p->scope, "arch", (AstNode *) arch_type); - if (context.options->generate_foreign_info) { - AstNumLit* foreign_info = make_int_literal(a, 1); - foreign_info->type_node = (AstType *) &basic_type_bool; - symbol_builtin_introduce(p->scope, "Generated_Foreign_Info", (AstNode *) foreign_info); + if (context->options->generate_foreign_info) { + AstNumLit* foreign_info = make_int_literal(context, 1); + foreign_info->type_node = (AstType *) &context->basic_types.type_bool; + symbol_builtin_introduce(context, p->scope, "Generated_Foreign_Info", (AstNode *) foreign_info); } } diff --git a/compiler/src/checker.c b/compiler/src/checker.c index eff515c9a..9f4b2e3bb 100644 --- a/compiler/src/checker.c +++ b/compiler/src/checker.c @@ -1,148 +1,273 @@ +#ifndef BH_INTERNAL_ALLOCATOR + #define BH_INTERNAL_ALLOCATOR (context->gp_alloc) +#endif + #include "astnodes.h" #include "types.h" -#undef BH_INTERNAL_ALLOCATOR -#define BH_INTERNAL_ALLOCATOR (global_heap_allocator) -#define BH_DEBUG #include "parser.h" #include "utils.h" +#include "doc.h" // All of the `check` functions return a boolean that signals if an issue // was reached while processing the node. These error booleans propagate // up the call stack until they reach `check_entity`. #define CHECK(kind, ...) do { \ - CheckStatus cs = check_ ## kind (__VA_ARGS__); \ + CheckStatus cs = check_ ## kind (context, __VA_ARGS__); \ if (cs > Check_Errors_Start) return cs; \ } while (0) +#define CHECK_INVISIBLE(kind, node, ...) do { \ + (node)->flags |= Ast_Flag_Symbol_Invisible; \ + CheckStatus cs = check_ ## kind (context, __VA_ARGS__); \ + (node)->flags &= ~Ast_Flag_Symbol_Invisible; \ + if (cs > Check_Errors_Start) return cs; \ + } while (0) + #define YIELD(loc, msg) do { \ - if (context.cycle_detected) { \ - onyx_report_error(loc, Error_Waiting_On, msg); \ + if (context->cycle_detected) { \ + ONYX_ERROR(loc, Error_Waiting_On, msg); \ return Check_Error; \ } else { \ - return Check_Yield_Macro; \ + return Check_Yield; \ } \ } while (0) #define YIELD_(loc, msg, ...) do { \ - if (context.cycle_detected) { \ - onyx_report_error(loc, Error_Waiting_On, msg, __VA_ARGS__); \ + if (context->cycle_detected) { \ + ONYX_ERROR(loc, Error_Waiting_On, msg, __VA_ARGS__); \ return Check_Error; \ } else { \ - return Check_Yield_Macro; \ + return Check_Yield; \ } \ } while (0) #define YIELD_ERROR(loc, msg) do { \ - if (context.cycle_detected) { \ - onyx_report_error(loc, Error_Critical, msg); \ + if (context->cycle_detected) { \ + ONYX_ERROR(loc, Error_Critical, msg); \ + return Check_Error; \ + } else { \ + return Check_Yield; \ + } \ + } while (0) + +#define YIELD_ERROR_(loc, msg, ...) do { \ + if (context->cycle_detected) { \ + ONYX_ERROR(loc, Error_Critical, msg, __VA_ARGS__); \ return Check_Error; \ } else { \ - return Check_Yield_Macro; \ + return Check_Yield; \ } \ } while (0) #define ERROR(loc, msg) do { \ - onyx_report_error(loc, Error_Critical, msg); \ + ONYX_ERROR(loc, Error_Critical, msg); \ return Check_Error; \ } while (0) #define ERROR_(loc, msg, ...) do { \ - onyx_report_error(loc, Error_Critical, msg, __VA_ARGS__); \ + ONYX_ERROR(loc, Error_Critical, msg, __VA_ARGS__); \ return Check_Error; \ } while (0) #define TYPE_CHECK_(expr, type, type_name) \ TypeMatch type_name; \ - type_name = unify_node_and_type(expr, type); \ - if (type_name == TYPE_MATCH_YIELD) YIELD((*expr)->token->pos, "Waiting on type checking."); \ - if (type_name == TYPE_MATCH_SPECIAL) return Check_Return_To_Symres; \ + type_name = unify_node_and_type(context, expr, type); \ + if (type_name == TYPE_MATCH_YIELD || type_name == TYPE_MATCH_SPECIAL) YIELD((*expr)->token->pos, "Waiting on type checking."); \ if (type_name == TYPE_MATCH_FAILED) +#define TYPE_QUERY_(expr, type, type_name) \ + TypeMatch type_name; \ + type_name = unify_node_and_type_(context, expr, type, 0); \ + if (type_name == TYPE_MATCH_YIELD || type_name == TYPE_MATCH_SPECIAL) YIELD((*expr)->token->pos, "Waiting on type checking."); \ + if (type_name == TYPE_MATCH_SUCCESS) + #define CONCAT(a, b) a##_##b #define DEFER_LINE(a, line) CONCAT(a, line) #define TYPE_CHECK(expr, type) TYPE_CHECK_(expr, type, DEFER_LINE(tc, __LINE__)) +#define TYPE_QUERY(expr, type) TYPE_QUERY_(expr, type, DEFER_LINE(tc, __LINE__)) typedef enum CheckStatus { Check_Success, // The node was successfully checked with out errors Check_Complete, // The node is done processing Check_Errors_Start, - Check_Return_To_Symres, // Return this node for further symres processing - Check_Yield_Macro, + Check_Goto_Parse, + Check_Yield, Check_Failed, // The node is done processing and should be put in the state of Failed. Check_Error, // There was an error when checking the node } CheckStatus; -CheckStatus check_block(AstBlock* block); -CheckStatus check_statement_chain(AstNode** start); -CheckStatus check_statement(AstNode** pstmt); -CheckStatus check_return(AstReturn* retnode); -CheckStatus check_if(AstIfWhile* ifnode); -CheckStatus check_while(AstIfWhile* whilenode); -CheckStatus check_for(AstFor* fornode); -CheckStatus check_switch(AstSwitch* switchnode); -CheckStatus check_call(AstCall** pcall); -CheckStatus check_binaryop(AstBinaryOp** pbinop); -CheckStatus check_unaryop(AstUnaryOp** punop); -CheckStatus check_struct_literal(AstStructLiteral* sl); -CheckStatus check_array_literal(AstArrayLiteral* al); -CheckStatus check_range_literal(AstRangeLiteral** range); -CheckStatus check_compound(AstCompound* compound); -CheckStatus check_if_expression(AstIfExpression* if_expr); -CheckStatus check_expression(AstTyped** expr); -CheckStatus check_address_of(AstAddressOf** paof); -CheckStatus check_dereference(AstDereference* deref); -CheckStatus check_subscript(AstSubscript** paa); -CheckStatus check_field_access(AstFieldAccess** pfield); -CheckStatus check_method_call(AstBinaryOp** mcall); -CheckStatus check_size_of(AstSizeOf* so); -CheckStatus check_align_of(AstAlignOf* ao); -CheckStatus check_global(AstGlobal* global); -CheckStatus check_function(AstFunction* func); -CheckStatus check_overloaded_function(AstOverloadedFunction* func); -CheckStatus check_struct(AstStructType* s_node); -CheckStatus check_temp_function_header(AstFunction* func); -CheckStatus check_function_header(AstFunction* func); -CheckStatus check_memres_type(AstMemRes* memres); -CheckStatus check_memres(AstMemRes* memres); -CheckStatus check_type(AstType** ptype); -CheckStatus check_insert_directive(AstDirectiveInsert** pinsert, b32 expected_expression); -CheckStatus check_directive_solidify(AstDirectiveSolidify** psolid); -CheckStatus check_do_block(AstDoBlock** pdoblock); -CheckStatus check_constraint(AstConstraint *constraint); -CheckStatus check_constraint_context(ConstraintContext *cc, Scope *scope, OnyxFilePos pos); -CheckStatus check_polyquery(AstPolyQuery *query); -CheckStatus check_directive_first(AstDirectiveFirst *first); -CheckStatus check_directive_export_name(AstDirectiveExportName *ename); +#define CHECK_FUNC(name, ...) CheckStatus check_##name(Context *context, __VA_ARGS__) + +CHECK_FUNC(block, AstBlock* block); +CHECK_FUNC(statement_chain, AstNode** start); +CHECK_FUNC(statement, AstNode** pstmt); +CHECK_FUNC(return, AstReturn* retnode); +CHECK_FUNC(if, AstIfWhile* ifnode); +CHECK_FUNC(while, AstIfWhile* whilenode); +CHECK_FUNC(for, AstFor* fornode); +CHECK_FUNC(case, AstSwitchCase *casenode); +CHECK_FUNC(switch, AstSwitch* switchnode); +CHECK_FUNC(call, AstCall** pcall); +CHECK_FUNC(binaryop, AstBinaryOp** pbinop); +CHECK_FUNC(unaryop, AstUnaryOp** punop); +CHECK_FUNC(struct_literal, AstStructLiteral* sl); +CHECK_FUNC(array_literal, AstArrayLiteral* al); +CHECK_FUNC(range_literal, AstRangeLiteral** range); +CHECK_FUNC(compound, AstCompound* compound); +CHECK_FUNC(if_expression, AstIfExpression* if_expr); +CHECK_FUNC(expression, AstTyped** expr); +CHECK_FUNC(address_of, AstAddressOf** paof); +CHECK_FUNC(dereference, AstDereference* deref); +CHECK_FUNC(subscript, AstSubscript** paa); +CHECK_FUNC(field_access, AstFieldAccess** pfield); +CHECK_FUNC(method_call, AstBinaryOp** mcall); +CHECK_FUNC(size_of, AstSizeOf* so); +CHECK_FUNC(align_of, AstAlignOf* ao); +CHECK_FUNC(global, AstGlobal* global); +CHECK_FUNC(function, AstFunction* func); +CHECK_FUNC(overloaded_function, AstOverloadedFunction* func); +CHECK_FUNC(struct, AstStructType* s_node); +CHECK_FUNC(temp_function_header, AstFunction* func); +CHECK_FUNC(function_header, AstFunction* func); +CHECK_FUNC(memres_type, AstMemRes* memres); +CHECK_FUNC(memres, AstMemRes* memres); +CHECK_FUNC(type, AstType** ptype); +CHECK_FUNC(insert_directive, AstDirectiveInsert** pinsert, b32 expected_expression); +CHECK_FUNC(directive_solidify, AstDirectiveSolidify** psolid); +CHECK_FUNC(directive_defined, AstDirectiveDefined** pdefined); +CHECK_FUNC(do_block, AstDoBlock** pdoblock); +CHECK_FUNC(constraint, AstConstraint *constraint); +CHECK_FUNC(constraint_context, ConstraintContext *cc, Scope *scope, OnyxFilePos pos); +CHECK_FUNC(polyquery, AstPolyQuery *query); +CHECK_FUNC(directive_first, AstDirectiveFirst *first); +CHECK_FUNC(directive_export_name, AstDirectiveExportName *ename); +CHECK_FUNC(proc_expansion, AstProceduralExpansion **pexp, ProceduralMacroExpansionKind kind); +CHECK_FUNC(package, AstPackage* package); #define STATEMENT_LEVEL 1 #define EXPRESSION_LEVEL 2 -u32 current_checking_level=0; -static inline void fill_in_type(AstTyped* node) { +static void scope_enter(Context *context, Scope* new_scope) { + assert(new_scope); + context->checker.current_scope = new_scope; + bh_arr_push(context->checker.scope_stack, new_scope); +} + +static void scope_leave(Context *context) { + assert(bh_arr_length(context->checker.scope_stack) > 0); + bh_arr_pop(context->checker.scope_stack); + context->checker.current_scope = bh_arr_last(context->checker.scope_stack); +} + +static void clear_modes(Context *context) { + context->checker.mode = 0; +} + +static void enable_mode(Context *context, CheckerMode mode) { + context->checker.mode |= mode; +} + +static void disable_mode(Context *context, CheckerMode mode) { + context->checker.mode &= ~mode; +} + +static b32 mode_enabled(Context *context, CheckerMode mode) { + return (context->checker.mode & mode) != 0; +} + +static inline void fill_in_type(Context *context, AstTyped* node) { if (node->type == NULL) { - if (check_type(&node->type_node) > Check_Errors_Start) return; + if (check_type(context, &node->type_node) > Check_Errors_Start) return; + + node->type = type_build_from_ast(context, node->type_node); + } +} + +static void reset_statement_idx_on_all_blocks(Context *context, AstBlock *block) { + block->statement_idx = 0; + + AstNode *walker = block->body; + while (walker) { + if (walker->kind == Ast_Kind_Block) { + reset_statement_idx_on_all_blocks(context, (AstBlock *) walker); + } + + walker = walker->next; + } +} - node->type = type_build_from_ast(context.ast_alloc, node->type_node); +CHECK_FUNC(symbol, AstNode** symbol_node) { + if (mode_enabled(context, CM_Dont_Resolve_Symbols)) return Check_Yield; + + OnyxToken* token = (*symbol_node)->token; + AstNode* res = symbol_resolve(context, context->checker.current_scope, token); + + if (!res) { + if (context->cycle_detected) { + token_toggle_end(token); + char *closest = find_closest_symbol_in_scope_and_parents(context, context->checker.current_scope, token->text); + token_toggle_end(token); + + if (closest) ERROR_(token->pos, "Unable to resolve symbol '%b'. Did you mean '%s'?", token->text, token->length, closest); + else ERROR_(token->pos, "Unable to resolve symbol '%b'.", token->text, token->length); + + return Check_Error; + } else { + return Check_Yield; + } + + } else { + track_resolution_for_symbol_info(context, *symbol_node, res); + *symbol_node = res; + context->checker.resolved_a_symbol = 1; + } + + return Check_Success; +} + +CHECK_FUNC(local, AstLocal** local) { + CHECK(type, &(*local)->type_node); + + if ((*local)->token != NULL) + symbol_introduce(context, context->checker.current_scope, (*local)->token, (AstNode *) *local); + + if ((*local)->auto_dispose) { + insert_auto_dispose_call(context, *local); + (*local)->auto_dispose = 0; } + + return Check_Success; } -CheckStatus check_return(AstReturn* retnode) { +CHECK_FUNC(return, AstReturn* retnode) { Type ** expected_return_type; + bh_arr(AstLocal *) named_return_values; - if (retnode->count >= (u32) bh_arr_length(context.checker.expected_return_type_stack)) { + if (retnode->count >= (u32) bh_arr_length(context->checker.expected_return_type_stack)) { ERROR_(retnode->token->pos, "Too many repeated 'return's here. Expected a maximum of %d.", - bh_arr_length(context.checker.expected_return_type_stack)); + bh_arr_length(context->checker.expected_return_type_stack)); + } + + if (retnode->from_proc) { + expected_return_type = context->checker.expected_return_type_stack[0]; + named_return_values = context->checker.named_return_values_stack[0]; + } else { + i32 idx = bh_arr_length(context->checker.expected_return_type_stack) - retnode->count - 1; + expected_return_type = context->checker.expected_return_type_stack[idx]; + named_return_values = context->checker.named_return_values_stack[idx]; } - expected_return_type = context.checker.expected_return_type_stack[bh_arr_length(context.checker.expected_return_type_stack) - retnode->count - 1]; + + +retry_return_expr_check: if (retnode->expr) { CHECK(expression, &retnode->expr); - if (*expected_return_type == &type_auto_return) { - resolve_expression_type(retnode->expr); + if (*expected_return_type == context->types.auto_return) { + resolve_expression_type(context, retnode->expr); if (retnode->expr->type == NULL) YIELD_ERROR(retnode->token->pos, "Unable to determine the automatic return type here."); @@ -153,8 +278,8 @@ CheckStatus check_return(AstReturn* retnode) { TYPE_CHECK(&retnode->expr, *expected_return_type) { ERROR_(retnode->token->pos, "Expected to return a value of type '%s', returning value of type '%s'.", - type_get_name(*expected_return_type), - node_get_type_name(retnode->expr)); + type_get_name(context, *expected_return_type), + node_get_type_name(context, retnode->expr)); } // @@ -168,70 +293,115 @@ CheckStatus check_return(AstReturn* retnode) { } } else { - if (*expected_return_type == &type_auto_return) { - *expected_return_type = &basic_types[Basic_Kind_Void]; + if (*expected_return_type == context->types.auto_return) { + *expected_return_type = context->types.basic[Basic_Kind_Void]; return Check_Success; } - if ((*expected_return_type) != &basic_types[Basic_Kind_Void]) { - ERROR_(retnode->token->pos, - "Returning from non-void function without a value. Expected a value of type '%s'.", - type_get_name(*expected_return_type)); + if ((*expected_return_type) != context->types.basic[Basic_Kind_Void]) { + if (!named_return_values) { + ERROR_(retnode->token->pos, + "Returning from non-void function without a value. Expected a value of type '%s'.", + type_get_name(context, *expected_return_type)); + + } else { + if (bh_arr_length(named_return_values) == 1) { + retnode->expr = (AstTyped *) named_return_values[0]; + + } else { + AstCompound *implicit_compound = onyx_ast_node_new(context->ast_alloc, sizeof(AstCompound), Ast_Kind_Compound); + implicit_compound->token = retnode->token; + + bh_arr_new(context->ast_alloc, implicit_compound->exprs, bh_arr_length(named_return_values)); + bh_arr_each(AstLocal *, named_return, named_return_values) { + bh_arr_push(implicit_compound->exprs, (AstTyped *) *named_return); + } + + retnode->expr = (AstTyped *) implicit_compound; + } + + goto retry_return_expr_check; + } } } return Check_Success; } -CheckStatus check_if(AstIfWhile* ifnode) { - if (ifnode->initialization != NULL) CHECK(statement_chain, &ifnode->initialization); - +CHECK_FUNC(if, AstIfWhile* ifnode) { if (ifnode->kind == Ast_Kind_Static_If) { if ((ifnode->flags & Ast_Flag_Static_If_Resolved) == 0) { YIELD(ifnode->token->pos, "Waiting for static if to be resolved."); } - if (static_if_resolution(ifnode)) { + if (static_if_resolution(context, ifnode)) { if (ifnode->true_stmt != NULL) { CHECK(statement, (AstNode **) &ifnode->true_stmt); ifnode->true_stmt->rules = Block_Rule_Macro; + ifnode->flags |= ifnode->true_stmt->flags & Ast_Flag_Block_Returns; } } else { if (ifnode->false_stmt != NULL) { CHECK(statement, (AstNode **) &ifnode->false_stmt); ifnode->false_stmt->rules = Block_Rule_Macro; + ifnode->flags |= ifnode->false_stmt->flags & Ast_Flag_Block_Returns; } } } else { + if (ifnode->initialization != NULL) { + if (!ifnode->scope) { + ifnode->scope = scope_create(context, context->checker.current_scope, ifnode->token->pos); + } + + scope_enter(context, ifnode->scope); + CHECK(statement_chain, &ifnode->initialization); + } + CHECK(expression, &ifnode->cond); if (!type_is_bool(ifnode->cond->type)) { - TypeMatch implicit_cast = implicit_cast_to_bool(&ifnode->cond); + TypeMatch implicit_cast = implicit_cast_to_bool(context, &ifnode->cond); if (implicit_cast == TYPE_MATCH_YIELD) YIELD(ifnode->token->pos, "Waiting for implicit cast to bool to check."); if (implicit_cast == TYPE_MATCH_FAILED) { - ERROR_(ifnode->token->pos, "Expected expression of type 'bool' for condition, got '%s'", type_get_name(ifnode->cond->type)); + ERROR_(ifnode->token->pos, "Expected expression of type 'bool' for condition, got '%s'", type_get_name(context, ifnode->cond->type)); } } if (ifnode->true_stmt) CHECK(statement, (AstNode **) &ifnode->true_stmt); if (ifnode->false_stmt) CHECK(statement, (AstNode **) &ifnode->false_stmt); + + if (ifnode->true_stmt && ifnode->false_stmt) { + if ((ifnode->true_stmt->flags & Ast_Flag_Block_Returns) && (ifnode->false_stmt->flags & Ast_Flag_Block_Returns)) + ifnode->flags |= Ast_Flag_Block_Returns; + } + + if (ifnode->initialization != NULL) { + scope_leave(context); + } } return Check_Success; } -CheckStatus check_while(AstIfWhile* whilenode) { - if (whilenode->initialization != NULL) CHECK(statement_chain, &whilenode->initialization); +CHECK_FUNC(while, AstIfWhile* whilenode) { + if (whilenode->initialization != NULL) { + if (!whilenode->scope) { + whilenode->scope = scope_create(context, context->checker.current_scope, whilenode->token->pos); + } + + scope_enter(context, whilenode->scope); + CHECK(statement_chain, &whilenode->initialization); + } CHECK(expression, &whilenode->cond); if (!type_is_bool(whilenode->cond->type)) { - TypeMatch implicit_cast = implicit_cast_to_bool(&whilenode->cond); + TypeMatch implicit_cast = implicit_cast_to_bool(context, &whilenode->cond); if (implicit_cast == TYPE_MATCH_YIELD) YIELD(whilenode->token->pos, "Waiting for implicit cast to bool to check."); if (implicit_cast == TYPE_MATCH_FAILED) { - ERROR_(whilenode->token->pos, "Expected expression of type 'bool' for condition, got '%s'", type_get_name(whilenode->cond->type)); + ERROR_(whilenode->token->pos, "Expected expression of type 'bool' for condition, got '%s'", type_get_name(context, whilenode->cond->type)); } } @@ -244,15 +414,45 @@ CheckStatus check_while(AstIfWhile* whilenode) { CHECK(statement, (AstNode **) &whilenode->false_stmt); } + if (whilenode->initialization != NULL) { + scope_leave(context); + } + return Check_Success; } -CheckStatus check_for(AstFor* fornode) { +CHECK_FUNC(for, AstFor* fornode) { + if (!fornode->scope) { + fornode->scope = scope_create(context, context->checker.current_scope, fornode->token->pos); + } + + scope_enter(context, fornode->scope); + b32 old_inside_for_iterator; if (fornode->flags & Ast_Flag_Has_Been_Checked) goto fornode_expr_checked; CHECK(expression, &fornode->iter); - resolve_expression_type(fornode->iter); + resolve_expression_type(context, fornode->iter); + + // + // These locals have to be checked after the iterator value to avoid incorrect + // symbol resolutions. + // + // for a in x { + // for a in a { // <- + // } + // } + // + CHECK(local, &fornode->var); + if (fornode->index_var) { + fornode->index_var->flags |= Ast_Flag_Cannot_Take_Addr; + CHECK(local, &fornode->index_var); + fill_in_type(context, (AstTyped *) fornode->index_var); + + if (!type_is_integer(fornode->index_var->type)) { + ERROR_(fornode->index_var->token->pos, "Index for a for loop must be an integer type, but it is a '%s'.", type_get_name(context, fornode->index_var->type)); + } + } Type* iter_type = fornode->iter->type; if (iter_type == NULL) YIELD(fornode->token->pos, "Waiting for iteration expression type to be known."); @@ -263,46 +463,72 @@ CheckStatus check_for(AstFor* fornode) { } // @HACK This should be built elsewhere... - builtin_range_type_type = type_build_from_ast(context.ast_alloc, builtin_range_type); - if (builtin_range_type_type == NULL) YIELD(fornode->token->pos, "Waiting for 'range' structure to be built."); + context->builtins.range_type_type = type_build_from_ast(context, context->builtins.range_type); + if (context->builtins.range_type_type == NULL) YIELD(fornode->token->pos, "Waiting for 'range' structure to be built."); Type* given_type = NULL; fornode->loop_type = For_Loop_Invalid; - if (types_are_compatible(iter_type, &basic_types[Basic_Kind_I32])) { + if (types_are_compatible(context, iter_type, context->types.basic[Basic_Kind_I32])) { + if (fornode->by_pointer) { + ERROR(error_loc, "Cannot iterate by pointer over a range."); + } + + AstNumLit* low_0 = make_int_literal(context, 0); + AstRangeLiteral* rl = make_range_literal(context, (AstTyped *) low_0, fornode->iter); + CHECK(range_literal, &rl); + fornode->iter = (AstTyped *) rl; + + given_type = context->builtins.range_type_type->Struct.memarr[0]->type; + fornode->var->flags |= Ast_Flag_Cannot_Take_Addr; + fornode->loop_type = For_Loop_Range; + } + else if (types_are_compatible(context, iter_type, context->types.basic[Basic_Kind_I64])) { if (fornode->by_pointer) { ERROR(error_loc, "Cannot iterate by pointer over a range."); } - AstNumLit* low_0 = make_int_literal(context.ast_alloc, 0); - AstRangeLiteral* rl = make_range_literal(context.ast_alloc, (AstTyped *) low_0, fornode->iter); + AstNumLit* low_0 = make_int_literal(context, 0); + low_0->type = context->types.basic[Basic_Kind_I64]; + + AstRangeLiteral* rl = make_range_literal(context, (AstTyped *) low_0, fornode->iter); CHECK(range_literal, &rl); fornode->iter = (AstTyped *) rl; - given_type = builtin_range_type_type->Struct.memarr[0]->type; + given_type = context->builtins.range64_type_type->Struct.memarr[0]->type; fornode->var->flags |= Ast_Flag_Cannot_Take_Addr; fornode->loop_type = For_Loop_Range; } - else if (types_are_compatible(iter_type, builtin_range_type_type)) { + else if (types_are_compatible(context, iter_type, context->builtins.range_type_type)) { if (fornode->by_pointer) { ERROR(error_loc, "Cannot iterate by pointer over a range."); } // NOTE: Blindly copy the first range member's type which will // be the low value. - brendanfh 2020/09/04 - given_type = builtin_range_type_type->Struct.memarr[0]->type; + given_type = iter_type->Struct.memarr[0]->type; fornode->var->flags |= Ast_Flag_Cannot_Take_Addr; fornode->loop_type = For_Loop_Range; + } + else if (types_are_compatible(context, iter_type, context->builtins.range64_type_type)) { + if (fornode->by_pointer) { + ERROR(error_loc, "Cannot iterate by pointer over a range."); + } + // NOTE: Blindly copy the first range member's type which will + // be the low value. - brendanfh 2020/09/04 + given_type = iter_type->Struct.memarr[0]->type; + fornode->var->flags |= Ast_Flag_Cannot_Take_Addr; + fornode->loop_type = For_Loop_Range; } else if (iter_type->kind == Type_Kind_Array) { - if (fornode->by_pointer) given_type = type_make_pointer(context.ast_alloc, iter_type->Array.elem); + if (fornode->by_pointer) given_type = type_make_pointer(context, iter_type->Array.elem); else given_type = iter_type->Array.elem; fornode->loop_type = For_Loop_Array; } else if (iter_type->kind == Type_Kind_Slice) { - if (fornode->by_pointer) given_type = type_make_pointer(context.ast_alloc, iter_type->Slice.elem); + if (fornode->by_pointer) given_type = type_make_pointer(context, iter_type->Slice.elem); else given_type = iter_type->Slice.elem; fornode->loop_type = For_Loop_Slice; @@ -310,7 +536,7 @@ CheckStatus check_for(AstFor* fornode) { } else if (iter_type->kind == Type_Kind_VarArgs) { if (fornode->by_pointer) { - ERROR_(error_loc, "Cannot iterate by pointer over '%s'.", type_get_name(iter_type)); + ERROR_(error_loc, "Cannot iterate by pointer over '%s'.", type_get_name(context, iter_type)); } given_type = iter_type->VarArgs.elem; @@ -319,12 +545,12 @@ CheckStatus check_for(AstFor* fornode) { fornode->loop_type = For_Loop_Slice; } else if (iter_type->kind == Type_Kind_DynArray) { - if (fornode->by_pointer) given_type = type_make_pointer(context.ast_alloc, iter_type->DynArray.elem); + if (fornode->by_pointer) given_type = type_make_pointer(context, iter_type->DynArray.elem); else given_type = iter_type->DynArray.elem; fornode->loop_type = For_Loop_DynArr; } - else if (type_constructed_from_poly(iter_type, builtin_iterator_type)) { + else if (type_constructed_from_poly(iter_type, context->builtins.iterator_type)) { if (fornode->by_pointer) { ERROR(error_loc, "Cannot iterate by pointer over an iterator."); } @@ -332,71 +558,64 @@ CheckStatus check_for(AstFor* fornode) { // HACK: This assumes the Iterator type only has a single type argument. given_type = iter_type->Struct.poly_sln[0].type; fornode->loop_type = For_Loop_Iterator; + fornode->var->flags |= Ast_Flag_Address_Taken; } if (given_type == NULL) - ERROR_(error_loc, "Cannot iterate over a '%s'.", type_get_name(iter_type)); + ERROR_(error_loc, "Cannot iterate over a '%s'.", type_get_name(context, iter_type)); if (fornode->var->type_node) { - fill_in_type((AstTyped *) fornode->var); + fill_in_type(context, (AstTyped *) fornode->var); TYPE_CHECK((AstTyped **) &fornode->var, given_type) { - ERROR_(error_loc, "Mismatched type for loop variable. You specified '%s', but it should be '%s'.", type_get_name(fornode->var->type), type_get_name(given_type)); + ERROR_(error_loc, "Mismatched type for loop variable. You specified '%s', but it should be '%s'.", type_get_name(context, fornode->var->type), type_get_name(context, given_type)); } } else { fornode->var->type = given_type; } - if (fornode->index_var) { - fornode->index_var->flags |= Ast_Flag_Cannot_Take_Addr; - CHECK(expression, (AstTyped **) &fornode->index_var); - - if (!type_is_integer(fornode->index_var->type)) { - ERROR_(fornode->index_var->token->pos, "Index for a for loop must be an integer type, but it is a '%s'.", type_get_name(fornode->index_var->type)); - } - } - if (fornode->by_pointer) fornode->var->flags |= Ast_Flag_Cannot_Take_Addr; if (fornode->loop_type == For_Loop_Invalid) - ERROR_(error_loc, "Cannot iterate over a '%s'.", type_get_name(iter_type)); + ERROR_(error_loc, "Cannot iterate over a '%s'.", type_get_name(context, iter_type)); if (fornode->no_close && fornode->loop_type != For_Loop_Iterator) { - onyx_report_warning(error_loc, "Warning: #no_close here is meaningless as the iterable is not an iterator."); + ONYX_WARNING(error_loc, "Warning: #no_close here is meaningless as the iterable is not an iterator."); } fornode->flags |= Ast_Flag_Has_Been_Checked; fornode_expr_checked: - bh_arr_push(context.checker.for_node_stack, fornode); + bh_arr_push(context->checker.for_node_stack, fornode); - old_inside_for_iterator = context.checker.inside_for_iterator; - context.checker.inside_for_iterator = 0; + old_inside_for_iterator = context->checker.inside_for_iterator; + context->checker.inside_for_iterator = 0; iter_type = fornode->iter->type; - if (type_constructed_from_poly(iter_type, builtin_iterator_type)) { - context.checker.inside_for_iterator = 1; + if (type_constructed_from_poly(iter_type, context->builtins.iterator_type)) { + context->checker.inside_for_iterator = 1; } do { - CheckStatus cs = check_block(fornode->stmt); - context.checker.inside_for_iterator = old_inside_for_iterator; + CheckStatus cs = check_block(context, fornode->stmt); + context->checker.inside_for_iterator = old_inside_for_iterator; if (cs > Check_Errors_Start) return cs; } while(0); - bh_arr_pop(context.checker.for_node_stack); + bh_arr_pop(context->checker.for_node_stack); + scope_leave(context); return Check_Success; } -static b32 add_case_to_switch_statement(AstSwitch* switchnode, u64 case_value, AstSwitchCase* casestmt, OnyxFilePos pos) { +static b32 add_case_to_switch_statement(Context *context, AstSwitch* switchnode, u64 case_value, AstSwitchCase* casestmt, OnyxFilePos pos) { assert(switchnode->switch_kind == Switch_Kind_Integer || switchnode->switch_kind == Switch_Kind_Union); switchnode->min_case = bh_min(switchnode->min_case, case_value); switchnode->max_case = bh_max(switchnode->max_case, case_value); if (bh_imap_has(&switchnode->case_map, case_value)) { - onyx_report_error(pos, Error_Critical, "Multiple cases for values '%d'.", case_value); + ONYX_ERROR(pos, Error_Critical, "Multiple cases for values '%d'.", case_value); return 1; } @@ -404,20 +623,20 @@ static b32 add_case_to_switch_statement(AstSwitch* switchnode, u64 case_value, A return 0; } -static CheckStatus collect_switch_case_blocks(AstSwitch* switchnode, AstBlock* root) { +static CheckStatus collect_switch_case_blocks(Context *context, AstSwitch* switchnode, AstBlock* root) { AstNode *walker = root->body; while (walker != NULL) { switch (walker->kind) { case Ast_Kind_Block: - collect_switch_case_blocks(switchnode, (AstBlock *) walker); + collect_switch_case_blocks(context, switchnode, (AstBlock *) walker); break; case Ast_Kind_Switch_Case: { AstSwitchCase *case_node = (AstSwitchCase *) walker; if (case_node->is_default) { if (switchnode->default_case != NULL && switchnode->default_case != case_node->block) { - ERROR(case_node->token->pos, "Multiple #default cases given"); - ERROR(switchnode->default_case->token->pos, "Multiple #default cases given"); + ERROR(case_node->token->pos, "Multiple default cases given"); + ERROR(switchnode->default_case->token->pos, "Multiple default cases given"); return Check_Error; } @@ -435,10 +654,10 @@ static CheckStatus collect_switch_case_blocks(AstSwitch* switchnode, AstBlock* r AstIf* static_if = (AstIf *) walker; assert(static_if->flags & Ast_Flag_Static_If_Resolved); - if (static_if_resolution(static_if)) { - if (static_if->true_stmt) collect_switch_case_blocks(switchnode, static_if->true_stmt); + if (static_if_resolution(context, static_if)) { + if (static_if->true_stmt) collect_switch_case_blocks(context, switchnode, static_if->true_stmt); } else { - if (static_if->false_stmt) collect_switch_case_blocks(switchnode, static_if->false_stmt); + if (static_if->false_stmt) collect_switch_case_blocks(context, switchnode, static_if->false_stmt); } break; @@ -454,11 +673,59 @@ static CheckStatus collect_switch_case_blocks(AstSwitch* switchnode, AstBlock* r return Check_Success; } -CheckStatus check_switch(AstSwitch* switchnode) { - if (switchnode->initialization != NULL) CHECK(statement_chain, &switchnode->initialization); +CHECK_FUNC(case, AstSwitchCase *casenode) { + if (!casenode->is_default) { + bh_arr_each(AstTyped *, expr, casenode->values) { + CHECK(expression, expr); + } + } + + if (mode_enabled(context, CM_Dont_Check_Case_Bodies)) return Check_Success; + + if (casenode->capture) { + if (casenode->scope == NULL) { + casenode->scope = scope_create(context, context->checker.current_scope, casenode->token->pos); + symbol_introduce(context, casenode->scope, casenode->capture->token, (AstNode *) casenode->capture); + } + + scope_enter(context, casenode->scope); + } + + if (casenode->body_is_expr) { + CHECK(expression, &casenode->expr); + } else { + CHECK(block, casenode->block); + } + + if (casenode->capture) { + scope_leave(context); + } + + return Check_Success; +} + +CHECK_FUNC(switch, AstSwitch* switchnode) { + // + // Checking switches is quite complicated and tricky because of the feature-set Onyx + // supports. Switch bodies can contain arbitrary statements at parse-time, but must + // be expanded to a tree of block-nodes with case-nodes as the leaves. This complicates + // the checking, because case-bodies cannot be checked before they know the type of their + // captured variables (if there are any), but the case values must be checked before the + // switch node can do proper type checking. + // + + if (switchnode->initialization) { + if (switchnode->scope == NULL) { + switchnode->scope = scope_create(context, context->checker.current_scope, switchnode->token->pos); + } + + scope_enter(context, switchnode->scope); + + CHECK(statement_chain, &switchnode->initialization); + } CHECK(expression, &switchnode->expr); - Type* resolved_expr_type = resolve_expression_type(switchnode->expr); + Type* resolved_expr_type = resolve_expression_type(context, switchnode->expr); if (!(switchnode->flags & Ast_Flag_Has_Been_Checked)) { if (resolved_expr_type == NULL) YIELD(switchnode->token->pos, "Waiting for expression type to be known."); @@ -475,52 +742,57 @@ CheckStatus check_switch(AstSwitch* switchnode) { switch (switchnode->switch_kind) { case Switch_Kind_Integer: switchnode->min_case = 0xffffffffffffffff; - bh_imap_init(&switchnode->case_map, global_heap_allocator, 4); + bh_imap_init(&switchnode->case_map, context->gp_alloc, 4); break; case Switch_Kind_Use_Equals: - bh_arr_new(global_heap_allocator, switchnode->case_exprs, 4); + bh_arr_new(context->gp_alloc, switchnode->case_exprs, 4); break; case Switch_Kind_Union: switchnode->min_case = 1; - bh_imap_init(&switchnode->case_map, global_heap_allocator, 4); + bh_imap_init(&switchnode->case_map, context->gp_alloc, 4); u32 variants = type_union_get_variant_count(switchnode->expr->type); - switchnode->union_variants_handled = bh_alloc_array(context.ast_alloc, u8, variants); + switchnode->union_variants_handled = bh_alloc_array(context->ast_alloc, u8, variants); break; default: assert(0); } + + switchnode->flags |= Ast_Flag_Has_Been_Checked; } - switchnode->flags |= Ast_Flag_Has_Been_Checked; - // Should the case block code be checked here? - // Or should this just exist to resolve macros and expand #unquotes - // then the cases are consumed into the array or cases, THEN the blocks - // are actually checked? if (switchnode->cases == NULL) { + // + // Set CM_Dont_Check_Case_Bodies so the bodies of case nodes will be skipped. + // This avoids weird type-checking and symbol resolution issues when a case + // node comes from an #insert or macro expansion. They will be re-checked + // fully in the next check_block below. + // + enable_mode(context, CM_Dont_Check_Case_Bodies); CHECK(block, switchnode->case_block); + disable_mode(context, CM_Dont_Check_Case_Bodies); - bh_arr_new(global_heap_allocator, switchnode->cases, 4); - if (collect_switch_case_blocks(switchnode, switchnode->case_block) != Check_Success) { + bh_arr_new(context->gp_alloc, switchnode->cases, 4); + if (collect_switch_case_blocks(context, switchnode, switchnode->case_block) != Check_Success) { return Check_Error; } // This is important, otherwise if this block has to return to symbol resolution. - switchnode->case_block->statement_idx = 0; + reset_statement_idx_on_all_blocks(context, switchnode->case_block); } fori (i, switchnode->yield_return_index, bh_arr_length(switchnode->cases)) { AstSwitchCase *sc = switchnode->cases[i]; if (sc->capture && bh_arr_length(sc->values) != 1) { - ERROR(sc->token->pos, "Expected exactly one value in switch-case when using a capture, i.e. `case value: X { ... }`."); + ERROR(sc->token->pos, "Expected exactly one value in switch-case when using a capture, i.e. `case X as value { ... }`."); } if (sc->capture && switchnode->switch_kind != Switch_Kind_Union) { ERROR_(sc->capture->token->pos, "Captures in switch cases are only allowed when switching over a union type. Switching over '%s' here.", - type_get_name(switchnode->expr->type)); + type_get_name(context, switchnode->expr->type)); } if (sc->flags & Ast_Flag_Has_Been_Checked) goto check_switch_case_block; @@ -531,22 +803,21 @@ CheckStatus check_switch(AstSwitch* switchnode) { // Handle case 1 .. 10 if (switchnode->switch_kind == Switch_Kind_Integer && (*value)->kind == Ast_Kind_Range_Literal) { AstRangeLiteral* rl = (AstRangeLiteral *) (*value); - resolve_expression_type(rl->low); - resolve_expression_type(rl->high); + resolve_expression_type(context, rl->low); + resolve_expression_type(context, rl->high); if (rl->low->kind != Ast_Kind_NumLit || rl->high->kind != Ast_Kind_NumLit) { ERROR(rl->token->pos, "case statement expected compile time known range."); } - promote_numlit_to_larger((AstNumLit *) rl->low); - promote_numlit_to_larger((AstNumLit *) rl->high); + promote_numlit_to_larger(context, (AstNumLit *) rl->low); + promote_numlit_to_larger(context, (AstNumLit *) rl->high); i64 lower = ((AstNumLit *) rl->low)->value.l; i64 upper = ((AstNumLit *) rl->high)->value.l; - // NOTE: This is inclusive!!!! - fori (case_value, lower, upper + 1) { - if (add_case_to_switch_statement(switchnode, case_value, sc, rl->token->pos)) + fori (case_value, lower, upper) { + if (add_case_to_switch_statement(context, switchnode, case_value, sc, rl->token->pos)) return Check_Error; } @@ -564,19 +835,19 @@ CheckStatus check_switch(AstSwitch* switchnode) { if ((*value)->token) tkn = (*value)->token; ERROR_(tkn->pos, "'%b' is not a variant of '%s'.", - (*value)->token->text, (*value)->token->length, type_get_name(union_expr_type)); + (*value)->token->text, (*value)->token->length, type_get_name(context, union_expr_type)); } // We subtract one here because variant numbering starts at 1, instead of 0. // This is so a zeroed out block of memory does not have a valid variant. // This is going to change now... - i32 variant_number = get_expression_integer_value(*value, NULL); + i32 variant_number = get_expression_integer_value(context, *value, NULL); switchnode->union_variants_handled[variant_number] = 1; UnionVariant *union_variant = union_expr_type->Union.variants_ordered[variant_number]; if (sc->capture) { if (sc->capture_is_by_pointer) { - sc->capture->type = type_make_pointer(context.ast_alloc, union_variant->type); + sc->capture->type = type_make_pointer(context, union_variant->type); } else { sc->capture->type = union_variant->type; } @@ -588,7 +859,7 @@ CheckStatus check_switch(AstSwitch* switchnode) { if ((*value)->token) tkn = (*value)->token; ERROR_(tkn->pos, "Mismatched types in switch-case. Expected '%s', got '%s'.", - type_get_name(resolved_expr_type), type_get_name((*value)->type)); + type_get_name(context, resolved_expr_type), type_get_name(context, (*value)->type)); } } @@ -596,11 +867,11 @@ CheckStatus check_switch(AstSwitch* switchnode) { case Switch_Kind_Integer: case Switch_Kind_Union: { b32 is_valid; - i64 integer_value = get_expression_integer_value(*value, &is_valid); + i64 integer_value = get_expression_integer_value(context, *value, &is_valid); if (!is_valid) ERROR_((*value)->token->pos, "Case statement expected compile time known integer. Got '%s'.", onyx_ast_node_kind_string((*value)->kind)); - if (add_case_to_switch_statement(switchnode, integer_value, sc, sc->block->token->pos)) + if (add_case_to_switch_statement(context, switchnode, integer_value, sc, sc->block->token->pos)) return Check_Error; break; @@ -621,7 +892,7 @@ CheckStatus check_switch(AstSwitch* switchnode) { CaseToBlock ctb; ctb.casestmt = sc; ctb.original_value = *value; - ctb.comparison = make_binary_op(context.ast_alloc, Binary_Op_Equal, switchnode->expr, *value); + ctb.comparison = make_binary_op(context, Binary_Op_Equal, switchnode->expr, *value); ctb.comparison->token = (*value)->token; bh_arr_push(switchnode->case_exprs, ctb); @@ -636,7 +907,7 @@ CheckStatus check_switch(AstSwitch* switchnode) { check_switch_case_block: if (switchnode->is_expr) { if (!sc->body_is_expr) { - onyx_report_error(sc->token->pos, Error_Critical, "Inside a switch expression, all cases must return a value."); + ONYX_ERROR(sc->token->pos, Error_Critical, "Inside a switch expression, all cases must return a value."); ERROR(sc->token->pos, "Change the case statement to look like 'case X => expr'."); } } else { @@ -645,23 +916,30 @@ CheckStatus check_switch(AstSwitch* switchnode) { } } + switchnode->yield_return_index += 1; + } + + CHECK(block, switchnode->case_block); + + b32 all_cases_return = 1; + bh_arr_each(AstSwitchCase *, pcase, switchnode->cases) { + AstSwitchCase *sc = *pcase; + if (sc->body_is_expr) { - CHECK(expression, &sc->expr); if (switchnode->type == NULL) { - switchnode->type = resolve_expression_type(sc->expr); + switchnode->type = resolve_expression_type(context, sc->expr); + } else { TYPE_CHECK(&sc->expr, switchnode->type) { ERROR_(sc->token->pos, "Expected case expression to be of type '%s', got '%s'.", - type_get_name(switchnode->type), - type_get_name(sc->expr->type)); + type_get_name(context, switchnode->type), + type_get_name(context, sc->expr->type)); } } } else { - CHECK(block, sc->block); + all_cases_return = all_cases_return && (sc->block->flags & Ast_Flag_Block_Returns); } - - switchnode->yield_return_index += 1; } if (switchnode->default_case) { @@ -672,13 +950,15 @@ CheckStatus check_switch(AstSwitch* switchnode) { if (switchnode->type) { TYPE_CHECK(default_case, switchnode->type) { ERROR_((*default_case)->token->pos, "Expected case expression to be of type '%s', got '%s'.", - type_get_name(switchnode->type), - type_get_name((*default_case)->type)); + type_get_name(context, switchnode->type), + type_get_name(context, (*default_case)->type)); } } } else { CHECK(block, switchnode->default_case); + + all_cases_return = all_cases_return && (switchnode->default_case->flags & Ast_Flag_Block_Returns); } } else if (switchnode->switch_kind == Switch_Kind_Union) { @@ -712,10 +992,18 @@ CheckStatus check_switch(AstSwitch* switchnode) { } } + if (all_cases_return) { + switchnode->flags |= Ast_Flag_Block_Returns; + } + + if (switchnode->initialization) { + scope_leave(context); + } + return Check_Success; } -CheckStatus check_arguments(Arguments* args) { +CHECK_FUNC(arguments, Arguments* args) { bh_arr_each(AstTyped *, actual, args->values) CHECK(expression, actual); @@ -725,14 +1013,14 @@ CheckStatus check_arguments(Arguments* args) { return Check_Success; } -CheckStatus check_argument(AstArgument** parg) { +CHECK_FUNC(argument, AstArgument** parg) { CHECK(expression, &(*parg)->value); (*parg)->type = (*parg)->value->type; return Check_Success; } -static CheckStatus check_resolve_callee(AstCall* call, AstTyped** effective_callee) { +CHECK_FUNC(resolve_callee, AstCall* call, AstTyped** effective_callee) { if (call->kind == Ast_Kind_Intrinsic_Call) return Check_Success; AstTyped* callee = (AstTyped *) strip_aliases((AstNode *) call->callee); @@ -743,19 +1031,20 @@ static CheckStatus check_resolve_callee(AstCall* call, AstTyped** effective_call if (callee->kind == Ast_Kind_Overloaded_Function) { AstTyped* new_callee = find_matching_overload_by_arguments( + context, ((AstOverloadedFunction *) callee)->overloads, &call->args); if (new_callee == NULL) { - if (context.cycle_almost_detected < 2) { + if (context->cycle_almost_detected < 2) { YIELD(call->token->pos, "Waiting to know all options for overloaded function"); } - report_unable_to_match_overload(call, ((AstOverloadedFunction *) callee)->overloads); + report_unable_to_match_overload(context, call, ((AstOverloadedFunction *) callee)->overloads); return Check_Error; } - if (new_callee == (AstTyped *) &node_that_signals_a_yield) { + if (new_callee == (AstTyped *) &context->node_that_signals_a_yield) { YIELD(call->token->pos, "Waiting for overloaded function option to pass type-checking."); } @@ -768,9 +1057,9 @@ static CheckStatus check_resolve_callee(AstCall* call, AstTyped** effective_call calling_a_macro = 1; call->callee = callee; - AstTyped* new_callee = (AstTyped *) macro_resolve_header((AstMacro *) callee, &call->args, call->token, 1); + AstTyped* new_callee = (AstTyped *) macro_resolve_header(context, (AstMacro *) callee, &call->args, call->token, 1); if (new_callee == NULL) return Check_Error; - if (new_callee == (AstTyped *) &node_that_signals_a_yield) { + if (new_callee == (AstTyped *) &context->node_that_signals_a_yield) { YIELD(call->token->pos, "Waiting for macro header to pass type-checking."); } @@ -778,9 +1067,9 @@ static CheckStatus check_resolve_callee(AstCall* call, AstTyped** effective_call callee = new_callee; } else while (callee->kind == Ast_Kind_Polymorphic_Proc) { - AstTyped* new_callee = (AstTyped *) polymorphic_proc_lookup((AstFunction *) callee, PPLM_By_Arguments, &call->args, call->token); + AstTyped* new_callee = (AstTyped *) polymorphic_proc_lookup(context, (AstFunction *) callee, PPLM_By_Arguments, &call->args, call->token); if (new_callee == NULL) return Check_Error; - if (new_callee == (AstTyped *) &node_that_signals_a_yield) { + if (new_callee == (AstTyped *) &context->node_that_signals_a_yield) { YIELD(call->token->pos, "Waiting for polymorphic procedure header to pass type-checking."); } @@ -789,7 +1078,7 @@ static CheckStatus check_resolve_callee(AstCall* call, AstTyped** effective_call } // NOTE: Build callee's type - fill_in_type((AstTyped *) callee); + fill_in_type(context, (AstTyped *) callee); if (callee->type == NULL) { YIELD(call->token->pos, "Trying to resolve function type for callee."); } @@ -803,14 +1092,14 @@ static CheckStatus check_resolve_callee(AstCall* call, AstTyped** effective_call } if (need_to_check_overload_return_type) { - ensure_overload_returns_correct_type(callee, (AstOverloadedFunction *) original_callee); + ensure_overload_returns_correct_type(context, callee, (AstOverloadedFunction *) original_callee); } *effective_callee = callee; return Check_Success; } -CheckStatus check_call(AstCall** pcall) { +CHECK_FUNC(call, AstCall** pcall) { // All the things that need to be done when checking a call node. // 1. Ensure the callee is not a symbol // 2. Check the callee expression (since it could be a variable or a field access, etc) @@ -823,11 +1112,20 @@ CheckStatus check_call(AstCall** pcall) { // 9. Check types of formal and actual params against each other, handling varargs AstCall* call = *pcall; + if (call->placeholder_argument_position > 0) { + ONYX_ERROR(call->token->pos, Error_Critical, "This call contains an argument placeholder '_', but it was not piped into."); + return Check_Error; + } + if (call->kind == Ast_Kind_Call) { + u32 current_checking_level_store = context->checker.current_checking_level; + CHECK(expression, &call->callee); + context->checker.current_checking_level = current_checking_level_store; + AstNode* callee = strip_aliases((AstNode *) call->callee); if (callee->kind == Ast_Kind_Poly_Struct_Type || callee->kind == Ast_Kind_Poly_Union_Type) { - *pcall = (AstCall *) convert_call_to_polycall(call); + *pcall = (AstCall *) convert_call_to_polycall(context, call); CHECK(expression, (AstTyped **) pcall); return Check_Success; } @@ -835,10 +1133,9 @@ CheckStatus check_call(AstCall** pcall) { if (call->flags & Ast_Flag_Has_Been_Checked) return Check_Success; - u32 current_checking_level_store = current_checking_level; - CHECK(expression, &call->callee); + u32 current_checking_level_store = context->checker.current_checking_level; CHECK(arguments, &call->args); - current_checking_level = current_checking_level_store; + context->checker.current_checking_level = current_checking_level_store; AstFunction* callee=NULL; CHECK(resolve_callee, call, (AstTyped **) &callee); @@ -849,12 +1146,15 @@ CheckStatus check_call(AstCall** pcall) { } } - i32 arg_count = get_argument_buffer_size(&callee->type->Function, &call->args); - arguments_ensure_length(&call->args, arg_count); + i32 arg_count = get_argument_buffer_size(context, &callee->type->Function, &call->args); + arguments_ensure_length(context, &call->args, arg_count); char* err_msg = NULL; - fill_in_arguments(&call->args, (AstNode *) callee, &err_msg, 0); - if (err_msg != NULL) ERROR(call->token->pos, err_msg); + fill_in_arguments(context, &call->args, (AstNode *) callee, &err_msg, 0); + if (err_msg != NULL) { + ONYX_ERROR(callee->token->pos, Error_Critical, "Here is the function being called."); + ERROR(call->token->pos, err_msg); + } bh_arr(AstArgument *) arg_arr = (bh_arr(AstArgument *)) call->args.values; bh_arr_each(AstArgument *, arg, arg_arr) { @@ -869,33 +1169,33 @@ CheckStatus check_call(AstCall** pcall) { AstTyped** arg_value = &(*arg)->value; if ((*arg_value)->kind == Ast_Kind_Call_Site) { - AstCallSite* callsite = (AstCallSite *) ast_clone(context.ast_alloc, *arg_value); + AstCallSite* callsite = (AstCallSite *) ast_clone(context, *arg_value); if (callsite->collapsed) continue; callsite->callsite_token = call->token; // HACK CLEANUP - OnyxToken* str_token = bh_alloc(context.ast_alloc, sizeof(OnyxToken)); - str_token->text = bh_strdup(global_heap_allocator, (char *) call->token->pos.filename); + OnyxToken* str_token = bh_alloc(context->ast_alloc, sizeof(OnyxToken)); + str_token->text = bh_strdup(context->gp_alloc, (char *) call->token->pos.filename); str_token->length = strlen(call->token->pos.filename); str_token->pos = call->token->pos; str_token->type = Token_Type_Literal_String; - AstStrLit* filename = bh_alloc_item(context.ast_alloc, AstStrLit); + AstStrLit* filename = bh_alloc_item(context->ast_alloc, AstStrLit); memset(filename, 0, sizeof(AstStrLit)); filename->kind = Ast_Kind_StrLit; filename->token = str_token; filename->data_id = 0; - filename->type_node = builtin_string_type; + filename->type_node = context->builtins.string_type; - add_entities_for_node(NULL, (AstNode *) filename, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) filename, NULL, NULL); callsite->filename = filename; - callsite->line = make_int_literal(context.ast_alloc, call->token->pos.line); - callsite->column = make_int_literal(context.ast_alloc, call->token->pos.column); + callsite->line = make_int_literal(context, call->token->pos.line); + callsite->column = make_int_literal(context, call->token->pos.column); - convert_numlit_to_type(callsite->line, &basic_types[Basic_Kind_U32]); - convert_numlit_to_type(callsite->column, &basic_types[Basic_Kind_U32]); + convert_numlit_to_type(context, callsite->line, context->types.basic[Basic_Kind_U32], 1); + convert_numlit_to_type(context, callsite->column, context->types.basic[Basic_Kind_U32], 1); callsite->collapsed = 1; *arg_value = (AstTyped *) callsite; @@ -911,135 +1211,142 @@ CheckStatus check_call(AstCall** pcall) { token_toggle_end(callee->intrinsic_name); char* intr_name = callee->intrinsic_name->text; - i32 index; - if ((index = shgeti(intrinsic_table, intr_name)) == -1) { - onyx_report_error(callee->token->pos, Error_Critical, "Intrinsic not supported, '%s'.", intr_name); + OnyxIntrinsic intrinsic = 0xffffffff; + const IntrinsicMap *im = &builtin_intrinsics[0]; + while (im->name) { + if (!strcmp(im->name, intr_name)) { + intrinsic = im->intrinsic; + break; + } + im++; + } + + if (intrinsic == 0xffffffff) { + ONYX_ERROR(callee->token->pos, Error_Critical, "Intrinsic not supported, '%s'.", intr_name); token_toggle_end(callee->intrinsic_name); return Check_Error; } - call->intrinsic = intrinsic_table[index].value; + call->intrinsic = intrinsic; token_toggle_end(callee->intrinsic_name); } call->va_kind = VA_Kind_Not_VA; call->type = callee->type->Function.return_type; - if (call->type == &type_auto_return && call->callee->kind != Ast_Kind_Macro) { + if (call->type == context->types.auto_return && call->callee->kind != Ast_Kind_Macro) { YIELD(call->token->pos, "Waiting for auto-return type to be solved."); } OnyxError error; - TypeMatch tm = check_arguments_against_type(&call->args, &callee->type->Function, &call->va_kind, - call->token, get_function_name(callee), &error); + TypeMatch tm = check_arguments_against_type(context, &call->args, &callee->type->Function, &call->va_kind, + call->token, get_function_name(context, callee), &error); if (tm == TYPE_MATCH_FAILED) { - onyx_submit_error(error); + onyx_submit_error(context, error); return Check_Error; } - if (tm == TYPE_MATCH_SPECIAL) { - return Check_Return_To_Symres; + if (tm == TYPE_MATCH_YIELD || tm == TYPE_MATCH_SPECIAL) { + YIELD(call->token->pos, "Waiting on argument type checking."); } - if (tm == TYPE_MATCH_YIELD) YIELD(call->token->pos, "Waiting on argument type checking."); - call->flags |= Ast_Flag_Has_Been_Checked; if (call->kind == Ast_Kind_Call && call->callee->kind == Ast_Kind_Macro) { - expand_macro(pcall, callee); - return Check_Return_To_Symres; + expand_macro(context, pcall, callee); + return Check_Yield; } if (callee->kind == Ast_Kind_Function && callee->deprecated_warning) { - onyx_report_warning(callee->token->pos, "Calling a deprecated function: %b", + ONYX_WARNING(callee->token->pos, "Calling a deprecated function: %b", callee->deprecated_warning->token->text, callee->deprecated_warning->token->length); - onyx_report_warning(call->token->pos, "Here is where the deprecated function was called."); + ONYX_WARNING(call->token->pos, "Here is where the deprecated function was called."); } return Check_Success; } -static void report_bad_binaryop(AstBinaryOp* binop) { - onyx_report_error(binop->token->pos, Error_Critical, "Binary operator '%s' not understood for arguments of type '%s' and '%s'.", +static void report_bad_binaryop(Context *context, AstBinaryOp* binop) { + ONYX_ERROR(binop->token->pos, Error_Critical, "Binary operator '%s' not understood for arguments of type '%s' and '%s'.", binaryop_string[binop->operation], - node_get_type_name(binop->left), - node_get_type_name(binop->right)); + node_get_type_name(context, binop->left), + node_get_type_name(context, binop->right)); } -static AstCall* binaryop_try_operator_overload(AstBinaryOp* binop, AstTyped* third_argument) { - if (bh_arr_length(operator_overloads[binop->operation]) == 0) return &context.checker.__op_maybe_overloaded; +static AstCall* binaryop_try_operator_overload(Context *context, AstBinaryOp* binop, AstTyped* third_argument) { + if (bh_arr_length(context->operator_overloads[binop->operation]) == 0) return &context->checker.__op_maybe_overloaded; if (binop->overload_args == NULL || binop->overload_args->values[1] == NULL) { if (binop->overload_args == NULL) { - binop->overload_args = bh_alloc_item(context.ast_alloc, Arguments); - bh_arr_new(context.ast_alloc, binop->overload_args->values, 3); + binop->overload_args = bh_alloc_item(context->ast_alloc, Arguments); + bh_arr_new(context->ast_alloc, binop->overload_args->values, 3); bh_arr_set_length(binop->overload_args->values, third_argument ? 3 : 2); } if (binop_is_assignment(binop->operation)) { - binop->overload_args->values[0] = (AstTyped *) make_address_of(context.ast_alloc, binop->left); + binop->overload_args->values[0] = (AstTyped *) make_address_of(context, binop->left); - u32 current_all_checks_are_final = context.checker.all_checks_are_final; - context.checker.all_checks_are_final = 0; - u32 current_checking_level_store = current_checking_level; - CheckStatus cs = check_address_of((AstAddressOf **) &binop->overload_args->values[0]); - current_checking_level = current_checking_level_store; - context.checker.all_checks_are_final = current_all_checks_are_final; + u32 current_all_checks_are_final = context->checker.all_checks_are_final; + context->checker.all_checks_are_final = 0; + u32 current_checking_level_store = context->checker.current_checking_level; + CheckStatus cs = check_address_of(context, (AstAddressOf **) &binop->overload_args->values[0]); + context->checker.current_checking_level = current_checking_level_store; + context->checker.all_checks_are_final = current_all_checks_are_final; - if (cs == Check_Yield_Macro) return (AstCall *) &node_that_signals_a_yield; + if (cs == Check_Yield) return (AstCall *) &context->node_that_signals_a_yield; if (cs == Check_Error) return NULL; - binop->overload_args->values[0] = (AstTyped *) make_argument(context.ast_alloc, binop->overload_args->values[0]); + binop->overload_args->values[0] = (AstTyped *) make_argument(context, binop->overload_args->values[0]); } else { - binop->overload_args->values[0] = (AstTyped *) make_argument(context.ast_alloc, binop->left); + binop->overload_args->values[0] = (AstTyped *) make_argument(context, binop->left); } - binop->overload_args->values[1] = (AstTyped *) make_argument(context.ast_alloc, binop->right); - if (third_argument != NULL) binop->overload_args->values[2] = (AstTyped *) make_argument(context.ast_alloc, third_argument); + binop->overload_args->values[1] = (AstTyped *) make_argument(context, binop->right); + if (third_argument != NULL) binop->overload_args->values[2] = (AstTyped *) make_argument(context, third_argument); } - AstTyped* overload = find_matching_overload_by_arguments(operator_overloads[binop->operation], binop->overload_args); - if (overload == NULL || overload == (AstTyped *) &node_that_signals_a_yield) return (AstCall *) overload; + AstTyped* overload = find_matching_overload_by_arguments(context, context->operator_overloads[binop->operation], binop->overload_args); + if (overload == NULL || overload == (AstTyped *) &context->node_that_signals_a_yield) return (AstCall *) overload; - AstCall* implicit_call = onyx_ast_node_new(context.ast_alloc, sizeof(AstCall), Ast_Kind_Call); + AstCall* implicit_call = onyx_ast_node_new(context->ast_alloc, sizeof(AstCall), Ast_Kind_Call); implicit_call->token = binop->token; implicit_call->callee = overload; implicit_call->va_kind = VA_Kind_Not_VA; - arguments_clone(&implicit_call->args, binop->overload_args); + arguments_clone(context, &implicit_call->args, binop->overload_args); return implicit_call; } -static AstCall* unaryop_try_operator_overload(AstUnaryOp* unop) { - if (bh_arr_length(unary_operator_overloads[unop->operation]) == 0) return &context.checker.__op_maybe_overloaded; +static AstCall* unaryop_try_operator_overload(Context *context, AstUnaryOp* unop) { + if (bh_arr_length(context->unary_operator_overloads[unop->operation]) == 0) return &context->checker.__op_maybe_overloaded; if (unop->overload_args == NULL || unop->overload_args->values[0] == NULL) { if (unop->overload_args == NULL) { - unop->overload_args = bh_alloc_item(context.ast_alloc, Arguments); - bh_arr_new(context.ast_alloc, unop->overload_args->values, 1); + unop->overload_args = bh_alloc_item(context->ast_alloc, Arguments); + bh_arr_new(context->ast_alloc, unop->overload_args->values, 1); bh_arr_set_length(unop->overload_args->values, 1); } - unop->overload_args->values[0] = (AstTyped *) make_argument(context.ast_alloc, unop->expr); + unop->overload_args->values[0] = (AstTyped *) make_argument(context, unop->expr); } - AstTyped* overload = find_matching_overload_by_arguments(unary_operator_overloads[unop->operation], unop->overload_args); - if (overload == NULL || overload == (AstTyped *) &node_that_signals_a_yield) return (AstCall *) overload; + AstTyped* overload = find_matching_overload_by_arguments(context, context->unary_operator_overloads[unop->operation], unop->overload_args); + if (overload == NULL || overload == (AstTyped *) &context->node_that_signals_a_yield) return (AstCall *) overload; - AstCall* implicit_call = onyx_ast_node_new(context.ast_alloc, sizeof(AstCall), Ast_Kind_Call); + AstCall* implicit_call = onyx_ast_node_new(context->ast_alloc, sizeof(AstCall), Ast_Kind_Call); implicit_call->token = unop->token; implicit_call->callee = overload; implicit_call->va_kind = VA_Kind_Not_VA; - arguments_clone(&implicit_call->args, unop->overload_args); + arguments_clone(context, &implicit_call->args, unop->overload_args); return implicit_call; } -static CheckStatus assign_type_or_check(AstTyped **node, Type *type, OnyxToken *report_loc) { +static CheckStatus assign_type_or_check(Context *context, AstTyped **node, Type *type, OnyxToken *report_loc) { if (node && (*node)->type == NULL) { (*node)->type = type; @@ -1047,8 +1354,8 @@ static CheckStatus assign_type_or_check(AstTyped **node, Type *type, OnyxToken * TYPE_CHECK(node, type) { ERROR_(report_loc->pos, "Cannot assign value of type '%s' to a '%s'.", - type_get_name(type), - node_get_type_name(*node)); + type_get_name(context, type), + node_get_type_name(context, *node)); return Check_Error; } } @@ -1056,14 +1363,14 @@ static CheckStatus assign_type_or_check(AstTyped **node, Type *type, OnyxToken * return Check_Success; } -#define TRY_ASSIGN_TYPE_OR_FAIL(node, type, report) do { \ - CheckStatus stat = assign_type_or_check((node), (type), (report)); \ +#define TRY_ASSIGN_TYPE_OR_FAIL(context, node, type, report) do { \ + CheckStatus stat = assign_type_or_check((context), (node), (type), (report)); \ if (stat != Check_Success) return stat; \ } while (0); -CheckStatus check_binaryop_assignment(AstBinaryOp** pbinop) { +CHECK_FUNC(binaryop_assignment, AstBinaryOp** pbinop) { AstBinaryOp* binop = *pbinop; - if (current_checking_level == EXPRESSION_LEVEL) + if (context->checker.current_checking_level == EXPRESSION_LEVEL) ERROR(binop->token->pos, "Assignment not valid in expression."); if (!is_lval((AstNode *) binop->left)) @@ -1091,9 +1398,9 @@ CheckStatus check_binaryop_assignment(AstBinaryOp** pbinop) { // as in some cases (especially with macros and polyprocs), the result is not "correct". The result // makes them appears as though they are runtime-known values, which they are not. Using the following // pattern does prevent this issue. - resolve_expression_type(binop->right); + resolve_expression_type(context, binop->right); - Type* right_type = get_expression_type(binop->right); + Type* right_type = get_expression_type(context, binop->right); if (right_type == NULL) { if (binop->right->entity == NULL || binop->right->entity->state > Entity_State_Check_Types) { ERROR(binop->token->pos, "Could not resolve type of right hand side to infer."); @@ -1122,22 +1429,22 @@ CheckStatus check_binaryop_assignment(AstBinaryOp** pbinop) { } if (store_expr_count == 1 && lhs->kind != Ast_Kind_Compound) { - TRY_ASSIGN_TYPE_OR_FAIL(&binop->left, right_type->Compound.types[0], binop->token); + TRY_ASSIGN_TYPE_OR_FAIL(context, &binop->left, right_type->Compound.types[0], binop->token); } else { fori (i, 0, store_expr_count) { - if (right_type->Compound.types[i] == &basic_types[Basic_Kind_Void]) { + if (right_type->Compound.types[i] == context->types.basic[Basic_Kind_Void]) { ERROR(lhs->exprs[i]->token->pos, "Due to inference, this variables type would be 'void', which is not allowed."); } - TRY_ASSIGN_TYPE_OR_FAIL(&lhs->exprs[i], right_type->Compound.types[i], binop->token); + TRY_ASSIGN_TYPE_OR_FAIL(context, &lhs->exprs[i], right_type->Compound.types[i], binop->token); } - lhs->type = type_build_compound_type(context.ast_alloc, lhs); + lhs->type = type_build_compound_type(context, lhs); } } else { - if (right_type == &basic_types[Basic_Kind_Void]) { + if (right_type == context->types.basic[Basic_Kind_Void]) { ERROR(binop->left->token->pos, "Due to inference, this variables type would be 'void', which is not allowed."); } @@ -1162,7 +1469,7 @@ CheckStatus check_binaryop_assignment(AstBinaryOp** pbinop) { else if (binop->operation == Binary_Op_Assign_Shr) operation = Binary_Op_Shr; else if (binop->operation == Binary_Op_Assign_Sar) operation = Binary_Op_Sar; - AstBinaryOp* new_right = make_binary_op(context.ast_alloc, operation, binop->left, binop->right); + AstBinaryOp* new_right = make_binary_op(context, operation, binop->left, binop->right); binop->right = (AstTyped *) new_right; new_right->token = binop->token; binop->operation = Binary_Op_Assign; @@ -1179,11 +1486,11 @@ CheckStatus check_binaryop_assignment(AstBinaryOp** pbinop) { TYPE_CHECK(&binop->right, binop->left->type) { ERROR_(binop->token->pos, "Cannot assign value of type '%s' to a '%s'.", - node_get_type_name(binop->right), - node_get_type_name(binop->left)); + node_get_type_name(context, binop->right), + node_get_type_name(context, binop->left)); } - binop->type = &basic_types[Basic_Kind_Void]; + binop->type = context->types.basic[Basic_Kind_Void]; return Check_Success; } @@ -1246,7 +1553,7 @@ static b32 binary_op_is_allowed(BinaryOp operation, Type* type) { return (binop_allowed[operation] & effective_flags) != 0; } -CheckStatus check_binaryop_compare(AstBinaryOp** pbinop) { +CHECK_FUNC(binaryop_compare, AstBinaryOp** pbinop) { AstBinaryOp* binop = *pbinop; // HACK: Since ^... to rawptr is a one way conversion, strip any pointers @@ -1257,10 +1564,10 @@ CheckStatus check_binaryop_compare(AstBinaryOp** pbinop) { if (ltype == NULL) YIELD(binop->token->pos, "Waiting for left-type to be known."); if (rtype == NULL) YIELD(binop->token->pos, "Waiting for right-type to be known."); - if (ltype->kind == Type_Kind_Pointer) ltype = &basic_types[Basic_Kind_Rawptr]; - if (rtype->kind == Type_Kind_Pointer) rtype = &basic_types[Basic_Kind_Rawptr]; + if (ltype->kind == Type_Kind_Pointer) ltype = context->types.basic[Basic_Kind_Rawptr]; + if (rtype->kind == Type_Kind_Pointer) rtype = context->types.basic[Basic_Kind_Rawptr]; - if (!types_are_compatible(ltype, rtype)) { + if (!types_are_compatible(context, ltype, rtype)) { b32 left_ac = node_is_auto_cast((AstNode *) binop->left); b32 right_ac = node_is_auto_cast((AstNode *) binop->right); if (left_ac && right_ac) ERROR(binop->token->pos, "Cannot have auto cast on both sides of binary operator."); @@ -1269,27 +1576,27 @@ CheckStatus check_binaryop_compare(AstBinaryOp** pbinop) { TYPE_CHECK(&binop->right, ltype) { ERROR_(binop->token->pos, "Cannot compare '%s' to '%s'.", - type_get_name(binop->left->type), - type_get_name(binop->right->type)); + type_get_name(context, binop->left->type), + type_get_name(context, binop->right->type)); } } } if (!binary_op_is_allowed(binop->operation, binop->left->type)) { - report_bad_binaryop(binop); + report_bad_binaryop(context, binop); return Check_Error; } - binop->type = &basic_types[Basic_Kind_Bool]; + binop->type = context->types.basic[Basic_Kind_Bool]; if (binop->flags & Ast_Flag_Comptime) { // NOTE: Not a binary op - *pbinop = (AstBinaryOp *) ast_reduce(context.ast_alloc, (AstTyped *) binop); + *pbinop = (AstBinaryOp *) ast_reduce(context, (AstTyped *) binop); } return Check_Success; } -CheckStatus check_binaryop_bool(AstBinaryOp** pbinop) { +CHECK_FUNC(binaryop_bool, AstBinaryOp** pbinop) { AstBinaryOp* binop = *pbinop; b32 left_is_bool = 0; @@ -1298,7 +1605,7 @@ CheckStatus check_binaryop_bool(AstBinaryOp** pbinop) { if (type_is_bool(binop->left->type)) { left_is_bool = 1; } else { - TypeMatch implicit_cast = implicit_cast_to_bool(&binop->left); + TypeMatch implicit_cast = implicit_cast_to_bool(context, &binop->left); if (implicit_cast == TYPE_MATCH_YIELD) YIELD(binop->token->pos, "Waiting for implicit cast to bool to check."); if (implicit_cast == TYPE_MATCH_SUCCESS) { left_is_bool = 1; @@ -1308,7 +1615,7 @@ CheckStatus check_binaryop_bool(AstBinaryOp** pbinop) { if (type_is_bool(binop->right->type)) { right_is_bool = 1; } else { - TypeMatch implicit_cast = implicit_cast_to_bool(&binop->right); + TypeMatch implicit_cast = implicit_cast_to_bool(context, &binop->right); if (implicit_cast == TYPE_MATCH_YIELD) YIELD(binop->token->pos, "Waiting for implicit cast to bool to check."); if (implicit_cast == TYPE_MATCH_SUCCESS) { right_is_bool = 1; @@ -1316,15 +1623,15 @@ CheckStatus check_binaryop_bool(AstBinaryOp** pbinop) { } if (!left_is_bool || !right_is_bool) { - report_bad_binaryop(binop); + report_bad_binaryop(context, binop); return Check_Error; } - binop->type = &basic_types[Basic_Kind_Bool]; + binop->type = context->types.basic[Basic_Kind_Bool]; if (binop->flags & Ast_Flag_Comptime) { // NOTE: Not a binary op - *pbinop = (AstBinaryOp *) ast_reduce(context.ast_alloc, (AstTyped *) binop); + *pbinop = (AstBinaryOp *) ast_reduce(context, (AstTyped *) binop); } return Check_Success; } @@ -1335,22 +1642,22 @@ static inline b32 type_is_not_basic_or_pointer(Type *t) { && (t->kind != Type_Kind_Pointer)); } -CheckStatus check_binaryop(AstBinaryOp** pbinop) { +CHECK_FUNC(binaryop, AstBinaryOp** pbinop) { AstBinaryOp* binop = *pbinop; if (binop->flags & Ast_Flag_Has_Been_Checked) return Check_Success; - if (binop->operation == Binary_Op_Assign && binop->left->kind == Ast_Kind_Subscript && bh_arr_length(operator_overloads[Binary_Op_Subscript_Equals]) > 0) { + if (binop->operation == Binary_Op_Assign && binop->left->kind == Ast_Kind_Subscript && bh_arr_length(context->operator_overloads[Binary_Op_Subscript_Equals]) > 0) { AstSubscript* sub = (AstSubscript *) binop->left; if (binop->potential_substitute == NULL) { - u32 current_checking_level_store = current_checking_level; + u32 current_checking_level_store = context->checker.current_checking_level; CHECK(expression, &sub->addr); CHECK(expression, &sub->expr); CHECK(expression, &binop->right); - current_checking_level = current_checking_level_store; + context->checker.current_checking_level = current_checking_level_store; - AstBinaryOp *op = onyx_ast_node_new(context.ast_alloc, sizeof(AstBinaryOp), Ast_Kind_Binary_Op); + AstBinaryOp *op = onyx_ast_node_new(context->ast_alloc, sizeof(AstBinaryOp), Ast_Kind_Binary_Op); op->token = binop->token; op->operation = Binary_Op_Subscript_Equals; op->left = ((AstSubscript *) binop->left)->addr; @@ -1359,8 +1666,8 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { binop->potential_substitute = op; } - AstCall* call = binaryop_try_operator_overload(binop->potential_substitute, binop->right); - if (call == (AstCall *) &node_that_signals_a_yield) YIELD(binop->token->pos, "Waiting on potential operator overload."); + AstCall* call = binaryop_try_operator_overload(context, binop->potential_substitute, binop->right); + if (call == (AstCall *) &context->node_that_signals_a_yield) YIELD(binop->token->pos, "Waiting on potential operator overload."); if (call != NULL) { call->next = binop->next; *(AstCall **) pbinop = call; @@ -1371,10 +1678,10 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { } - u32 current_checking_level_store = current_checking_level; + u32 current_checking_level_store = context->checker.current_checking_level; CHECK(expression, &binop->left); CHECK(expression, &binop->right); - current_checking_level = current_checking_level_store; + context->checker.current_checking_level = current_checking_level_store; // :UnaryFieldAccessIsGross if (binop->left->kind == Ast_Kind_Unary_Field_Access || binop->right->kind == Ast_Kind_Unary_Field_Access) { @@ -1382,7 +1689,7 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { TYPE_CHECK(&binop->right, binop->left->type) { // TODO: This should report a better error about the Unary_Field_Access not be able to be resolved given whatever type. // - brendanfh 2021/12/31 - report_bad_binaryop(binop); + report_bad_binaryop(context, binop); return Check_Error; } } @@ -1392,7 +1699,7 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { binop->flags |= Ast_Flag_Comptime; } - if (context.checker.expression_types_must_be_known) { + if (context->checker.expression_types_must_be_known) { if (binop->left->type == NULL || binop->right->type == NULL) { ERROR(binop->token->pos, "Internal compiler error: one of the operands types is unknown here."); } @@ -1400,26 +1707,25 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { // NOTE: Try operator overloading before checking everything else. if (type_is_not_basic_or_pointer(binop->left->type) || type_is_not_basic_or_pointer(binop->right->type)) { - u64 cache_key = 0; if (binop->left->type && binop->right->type) { - if (!context.checker.__binop_impossible_cache[binop->operation].hashes) { - bh_imap_init(&context.checker.__binop_impossible_cache[binop->operation], global_heap_allocator, 256); + if (!context->checker.__binop_impossible_cache[binop->operation].hashes) { + bh_imap_init(&context->checker.__binop_impossible_cache[binop->operation], context->gp_alloc, 256); } cache_key = ((u64) (binop->left->type->id) << 32ll) | (u64) binop->right->type->id; - if (bh_imap_has(&context.checker.__binop_impossible_cache[binop->operation], cache_key)) { + if (bh_imap_has(&context->checker.__binop_impossible_cache[binop->operation], cache_key)) { goto definitely_not_op_overload; } } - AstCall *implicit_call = binaryop_try_operator_overload(binop, NULL); + AstCall *implicit_call = binaryop_try_operator_overload(context, binop, NULL); - if (implicit_call == (AstCall *) &node_that_signals_a_yield) + if (implicit_call == (AstCall *) &context->node_that_signals_a_yield) YIELD(binop->token->pos, "Trying to resolve operator overload."); - if (implicit_call != NULL && implicit_call != &context.checker.__op_maybe_overloaded) { + if (implicit_call != NULL && implicit_call != &context->checker.__op_maybe_overloaded) { // NOTE: Not a binary op implicit_call->next = binop->next; *pbinop = (AstBinaryOp *) implicit_call; @@ -1428,14 +1734,14 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { return Check_Success; } - if (cache_key && implicit_call != &context.checker.__op_maybe_overloaded) { - bh_imap_put(&context.checker.__binop_impossible_cache[binop->operation], cache_key, 1); + if (cache_key && implicit_call != &context->checker.__op_maybe_overloaded) { + bh_imap_put(&context->checker.__binop_impossible_cache[binop->operation], cache_key, 1); } } definitely_not_op_overload: - if (binop_is_assignment(binop->operation)) return check_binaryop_assignment(pbinop); + if (binop_is_assignment(binop->operation)) return check_binaryop_assignment(context, pbinop); if (binop->left->type == NULL && binop->left->entity && binop->left->entity->state <= Entity_State_Check_Types) { YIELD(binop->left->token->pos, "Waiting for this type to be known"); @@ -1446,9 +1752,9 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { // NOTE: Comparision operators and boolean operators are handled separately. if (binop_is_compare(binop->operation)) - return check_binaryop_compare(pbinop); + return check_binaryop_compare(context, pbinop); if (binop->operation == Binary_Op_Bool_And || binop->operation == Binary_Op_Bool_Or) - return check_binaryop_bool(pbinop); + return check_binaryop_bool(context, pbinop); // NOTE: The left side cannot be rawptr. if (type_is_rawptr(binop->left->type)) { @@ -1459,14 +1765,14 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { if (type_is_multi_pointer(binop->left->type)) { if (binop->operation != Binary_Op_Add && binop->operation != Binary_Op_Minus) goto bad_binaryop; - resolve_expression_type(binop->right); + resolve_expression_type(context, binop->right); if (!type_is_integer(binop->right->type)) goto bad_binaryop; - AstNumLit* numlit = make_int_literal(context.ast_alloc, type_size_of(binop->left->type->MultiPointer.elem)); + AstNumLit* numlit = make_int_literal(context, type_size_of(binop->left->type->MultiPointer.elem)); numlit->token = binop->right->token; numlit->type = binop->right->type; - AstBinaryOp* binop_node = make_binary_op(context.ast_alloc, Binary_Op_Multiply, binop->right, (AstTyped *) numlit); + AstBinaryOp* binop_node = make_binary_op(context, Binary_Op_Multiply, binop->right, (AstTyped *) numlit); binop_node->token = binop->token; CHECK(binaryop, &binop_node); @@ -1475,7 +1781,7 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { binop->right->type = binop->left->type; } - if (!types_are_compatible(binop->left->type, binop->right->type)) { + if (!types_are_compatible(context, binop->left->type, binop->right->type)) { b32 left_ac = node_is_auto_cast((AstNode *) binop->left); b32 right_ac = node_is_auto_cast((AstNode *) binop->right); if (left_ac && right_ac) { @@ -1487,8 +1793,8 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { ERROR_(binop->token->pos, "Binary operation '%s' not understood for types '%s' and '%s'.", binaryop_string[binop->operation], - node_get_type_name(binop->left), - node_get_type_name(binop->right)); + node_get_type_name(context, binop->left), + node_get_type_name(context, binop->right)); } } } @@ -1498,53 +1804,55 @@ CheckStatus check_binaryop(AstBinaryOp** pbinop) { // NOTE: Enum flags with '&' result in a boolean value if (binop->type->kind == Type_Kind_Enum && binop->type->Enum.is_flags && binop->operation == Binary_Op_And) { - binop->type = &basic_types[Basic_Kind_Bool]; + binop->type = context->types.basic[Basic_Kind_Bool]; } - if (context.checker.all_checks_are_final) { + if (context->checker.all_checks_are_final) { binop->flags |= Ast_Flag_Has_Been_Checked; if (binop->flags & Ast_Flag_Comptime) { // NOTE: Not a binary op - *pbinop = (AstBinaryOp *) ast_reduce(context.ast_alloc, (AstTyped *) binop); + *pbinop = (AstBinaryOp *) ast_reduce(context, (AstTyped *) binop); } } return Check_Success; bad_binaryop: - report_bad_binaryop(binop); + report_bad_binaryop(context, binop); return Check_Error; } -CheckStatus check_unaryop(AstUnaryOp** punop) { +CHECK_FUNC(unaryop, AstUnaryOp** punop) { AstUnaryOp* unaryop = *punop; CHECK(expression, &unaryop->expr); if (unaryop->operation != Unary_Op_Negate) { - resolve_expression_type(unaryop->expr); + resolve_expression_type(context, unaryop->expr); } if (unaryop->operation == Unary_Op_Cast) { + CHECK(type, &unaryop->type_node); + char* err; if (unaryop->type == NULL) YIELD(unaryop->token->pos, "Trying to resolve destination type for cast."); - if (!cast_is_legal(unaryop->expr->type, unaryop->type, &err)) { + if (!cast_is_legal(context, unaryop->expr->type, unaryop->type, &err)) { ERROR_(unaryop->token->pos, "Cast Error: %s", err); } } if (unaryop->operation == Unary_Op_Not) { if (!type_is_bool(unaryop->expr->type)) { - TypeMatch implicit_cast = implicit_cast_to_bool(&unaryop->expr); + TypeMatch implicit_cast = implicit_cast_to_bool(context, &unaryop->expr); if (implicit_cast == TYPE_MATCH_YIELD) YIELD(unaryop->token->pos, "Waiting for implicit cast to bool to check."); if (implicit_cast == TYPE_MATCH_FAILED) { ERROR_(unaryop->token->pos, "Bool negation operator expected bool type, got '%s'.", - node_get_type_name(unaryop->expr)); + node_get_type_name(context, unaryop->expr)); } } } @@ -1553,7 +1861,7 @@ CheckStatus check_unaryop(AstUnaryOp** punop) { if (!type_is_integer(unaryop->expr->type)) { ERROR_(unaryop->token->pos, "Bitwise operator expected integer type, got '%s'.", - node_get_type_name(unaryop->expr)); + node_get_type_name(context, unaryop->expr)); } } @@ -1561,10 +1869,10 @@ CheckStatus check_unaryop(AstUnaryOp** punop) { unaryop->type = unaryop->expr->type; } - if (unaryop->operation == Unary_Op_Try) { - AstCall* call = unaryop_try_operator_overload(unaryop); - if (call == (AstCall *) &node_that_signals_a_yield) YIELD(unaryop->token->pos, "Waiting on potential operator overload."); - if (call != NULL && call != &context.checker.__op_maybe_overloaded) { + if (unaryop->operation == Unary_Op_Try || unaryop->operation == Unary_Op_Unwrap) { + AstCall* call = unaryop_try_operator_overload(context, unaryop); + if (call == (AstCall *) &context->node_that_signals_a_yield) YIELD(unaryop->token->pos, "Waiting on potential operator overload."); + if (call != NULL && call != &context->checker.__op_maybe_overloaded) { call->next = unaryop->next; *(AstCall **) punop = call; @@ -1572,20 +1880,40 @@ CheckStatus check_unaryop(AstUnaryOp** punop) { return Check_Success; } - ERROR_(unaryop->token->pos, "'%s' does not support '?' operator.", type_get_name(unaryop->expr->type)); + if (unaryop->operation == Unary_Op_Try) + ERROR_(unaryop->token->pos, "'%s' does not support '?' operator.", type_get_name(context, unaryop->expr->type)); + + if (unaryop->operation == Unary_Op_Unwrap) + ERROR_(unaryop->token->pos, "'%s' does not support '!' operator.", type_get_name(context, unaryop->expr->type)); } if (unaryop->expr->flags & Ast_Flag_Comptime) { unaryop->flags |= Ast_Flag_Comptime; // NOTE: Not a unary op - *punop = (AstUnaryOp *) ast_reduce(context.ast_alloc, (AstTyped *) unaryop); + *punop = (AstUnaryOp *) ast_reduce(context, (AstTyped *) unaryop); } return Check_Success; } -CheckStatus check_struct_literal(AstStructLiteral* sl) { +CHECK_FUNC(struct_literal, AstStructLiteral* sl) { + if (sl->stnode) { + CHECK(expression, &sl->stnode); + } + + sl->type_node = (AstType *) sl->stnode; + while (sl->type_node && sl->type_node->kind == Ast_Kind_Type_Alias) + sl->type_node = ((AstTypeAlias *) sl->type_node)->to; + + if (sl->extension_value) { + CHECK(expression, &sl->extension_value); + + // Use the type of the extension value if no type of the structure literal was given. + if (!sl->type && sl->extension_value->type) { + sl->type = sl->extension_value->type; + } + } if (sl->type == NULL) { // NOTE: This is used for automatically typed struct literals. If there is no provided @@ -1603,13 +1931,23 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { ERROR(sl->token->pos, "Type used for struct literal is not a type."); } - sl->type = type_build_from_ast(context.ast_alloc, (AstType *) sl->stnode); + sl->type = type_build_from_ast(context, (AstType *) sl->stnode); if (sl->type == NULL) YIELD(sl->token->pos, "Trying to resolve type of struct literal."); } + if (sl->values_to_initialize == NULL) { + bh_arr_new(context->gp_alloc, sl->values_to_initialize, 2); + } + + // + // Union literal are internally structure literals with a single named member. if (sl->type->kind == Type_Kind_Union) { if ((sl->flags & Ast_Flag_Has_Been_Checked) != 0) return Check_Success; + + if (sl->extension_value) { + ERROR_(sl->token->pos, "Cannot use field-update syntax on '%s' because it is a 'union'.", type_get_name(context, sl->type)); + } Type *union_type = sl->type; @@ -1617,18 +1955,25 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { // Produce an empty value of the first union type. UnionVariant *uv = union_type->Union.variants[0].value; - AstNumLit *tag_value = make_int_literal(context.ast_alloc, uv->tag_value); + AstNumLit *tag_value = make_int_literal(context, uv->tag_value); tag_value->type = union_type->Union.tag_type; - bh_arr_push(sl->args.values, (AstTyped *) tag_value); - bh_arr_push(sl->args.values, (AstTyped *) make_zero_value(context.ast_alloc, sl->token, uv->type)); + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { + (AstTyped *) tag_value, + 0 + })); + + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { + (AstTyped *) make_zero_value(context, sl->token, uv->type), + union_type->Union.alignment + })); sl->flags |= Ast_Flag_Has_Been_Checked; return Check_Success; } if (bh_arr_length(sl->args.values) != 0 || bh_arr_length(sl->args.named_values) != 1) { - ERROR_(sl->token->pos, "Expected exactly one named member when constructing an instance of a union type, '%s'.", type_get_name(sl->type)); + ERROR_(sl->token->pos, "Expected exactly one named member when constructing an instance of a union type, '%s'.", type_get_name(context, sl->type)); } AstNamedValue* value = sl->args.named_values[0]; @@ -1641,7 +1986,7 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { if (!matched_variant) { ERROR_(value->token->pos, "'%b' is not a variant of '%s'.", - value->token->text, value->token->length, type_get_name(union_type)); + value->token->text, value->token->length, type_get_name(context, union_type)); } CHECK(expression, &value->value); @@ -1649,27 +1994,80 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { TYPE_CHECK(&value->value, matched_variant->type) { ERROR_(value->token->pos, "Mismatched type in initialized type. Expected something of type '%s', got '%s'.", - type_get_name(matched_variant->type), - type_get_name(value->value->type)); + type_get_name(context, matched_variant->type), + type_get_name(context, value->value->type)); } - AstNumLit *tag_value = make_int_literal(context.ast_alloc, matched_variant->tag_value); + AstNumLit *tag_value = make_int_literal(context, matched_variant->tag_value); tag_value->type = union_type->Union.tag_type; - bh_arr_push(sl->args.values, (AstTyped *) tag_value); - bh_arr_push(sl->args.values, value->value); + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { + (AstTyped *) tag_value, + 0 + })); + + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { + value->value, + union_type->Union.alignment + })); sl->flags |= Ast_Flag_Has_Been_Checked; return Check_Success; } + if (sl->type->kind == Type_Kind_Array) { + if (bh_arr_length(sl->args.named_values) > 0) { + ERROR_(sl->token->pos, "Cannot specify named values when creating a '%s'.", type_get_name(context, sl->type)); + } + + u32 value_count = bh_arr_length(sl->args.values); + if (value_count == 0) { + AstZeroValue *zv = make_zero_value(context, sl->token, sl->type); + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { (AstTyped *) zv, 0 })); + + sl->flags |= Ast_Flag_Has_Been_Checked; + return Check_Success; + } + + if (value_count != sl->type->Array.count) { + ERROR_(sl->token->pos, + "Expected exactly '%d' values when constructing a '%s', but got '%d' value%s.", + sl->type->Array.count, + type_get_name(context, sl->type), + value_count, + bh_num_plural(value_count)); + } + + Type* type_to_match = sl->type->Array.elem; + i32 offset = 0; + bh_arr_each(AstTyped *, pval, sl->args.values) { + CHECK(expression, pval); + + TYPE_CHECK(pval, type_to_match) { + ERROR_(sl->token->pos, + "Mismatched type. Expected something of type '%s', got '%s'.", + type_get_name(context, type_to_match), + type_get_name(context, (*pval)->type)); + } + + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { *pval, offset })); + offset += type_size_of(type_to_match); + bh_align(offset, type_alignment_of(type_to_match)); + } + + return Check_Success; + } + + if (!type_is_structlike_strict(sl->type)) { + if ((sl->flags & Ast_Flag_Has_Been_Checked) != 0) return Check_Success; + // // If there are no given arguments to a structure literal, it is treated as a 'zero-value', // and can be used to create a completely zeroed value of any type. if (bh_arr_length(sl->args.values) == 0 && bh_arr_length(sl->args.named_values) == 0) { - AstZeroValue *zv = make_zero_value(context.ast_alloc, sl->token, sl->type); - bh_arr_push(sl->args.values, (AstTyped *) zv); + AstZeroValue *zv = make_zero_value(context, sl->token, sl->type); + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { (AstTyped *) zv, 0 })); sl->flags |= Ast_Flag_Has_Been_Checked; return Check_Success; @@ -1686,18 +2084,13 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { TYPE_CHECK(&sl->args.values[0], type_to_match) { ERROR_(sl->token->pos, "Mismatched type in initialized type. Expected something of type '%s', got '%s'.", - type_get_name(type_to_match), - type_get_name(sl->args.values[0]->type)); + type_get_name(context, type_to_match), + type_get_name(context, sl->args.values[0]->type)); } - sl->flags |= Ast_Flag_Has_Been_Checked; - return Check_Success; - } + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { sl->args.values[0], 0 })); - if ((sl->flags & Ast_Flag_Has_Been_Checked) != 0) { - assert(sl->args.values); - assert(sl->args.values[0]); - assert(sl->args.values[0]->kind == Ast_Kind_Zero_Value); + sl->flags |= Ast_Flag_Has_Been_Checked; return Check_Success; } @@ -1705,30 +2098,41 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { // Otherwise, it is not possible to construct the type if it is not a structure. ERROR_(sl->token->pos, "'%s' is not constructable using a struct literal.", - type_get_name(sl->type)); + type_get_name(context, sl->type)); + } + + bh_arr_clear(sl->values_to_initialize); + + if (sl->extension_value) { + TYPE_CHECK(&sl->extension_value, sl->type) { + ERROR_(sl->token->pos, "Expected base value for field-update to be of type '%s', but it was '%s' instead.", + type_get_name(context, sl->type), type_get_name(context, sl->extension_value->type)); + } + + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { sl->extension_value, 0 })); } i32 mem_count = type_structlike_mem_count(sl->type); - arguments_ensure_length(&sl->args, mem_count); + arguments_ensure_length(context, &sl->args, mem_count); // :Idempotency if ((sl->flags & Ast_Flag_Has_Been_Checked) == 0) { char* err_msg = NULL; - if (!fill_in_arguments(&sl->args, (AstNode *) sl, &err_msg, 1)) { - onyx_report_error(sl->token->pos, Error_Critical, err_msg); - - bh_arr_each(AstTyped *, value, sl->args.values) { - if (*value == NULL) { - i32 member_idx = value - sl->args.values; // Pointer subtraction hack - StructMember smem; - type_lookup_member_by_idx(sl->type, member_idx, &smem); - - onyx_report_error(sl->token->pos, Error_Critical, - "Value not given for %d%s member, '%s', for type '%s'.", - member_idx + 1, bh_num_suffix(member_idx + 1), - smem.name, type_get_name(sl->type)); - } - } + if (!fill_in_arguments(context, &sl->args, (AstNode *) sl, &err_msg, 1)) { + ONYX_ERROR(sl->token->pos, Error_Critical, err_msg); + + // bh_arr_each(AstTyped *, value, sl->args.values) { + // if (*value == NULL) { + // i32 member_idx = value - sl->args.values; // Pointer subtraction hack + // StructMember smem; + // type_lookup_member_by_idx(sl->type, member_idx, &smem); + + // ONYX_ERROR(sl->token->pos, Error_Critical, + // "Value not given for %d%s member, '%s', for type '%s'.", + // member_idx + 1, bh_num_suffix(member_idx + 1), + // smem.name, type_get_name(context, sl->type)); + // } + // } return Check_Error; } @@ -1751,7 +2155,7 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { // NOTE: Not checking the return on this function because // this for loop is bounded by the number of members in the // type. - type_lookup_member_by_idx(sl->type, i, &smem); + type_lookup_member_by_idx(context, sl->type, i, &smem); Type* formal = smem.type; CHECK(expression, actual); @@ -1764,8 +2168,12 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { "Mismatched types for %d%s member named '%s', expected '%s', got '%s'.", i + 1, bh_num_suffix(i + 1), smem.name, - type_get_name(formal), - node_get_type_name(*actual)); + type_get_name(context, formal), + node_get_type_name(context, *actual)); + } + + if (!sl->extension_value || (*actual)->kind != Ast_Kind_Zero_Value) { + bh_arr_push(sl->values_to_initialize, ((ValueWithOffset) { *actual, smem.offset })); } sl->flags &= ((*actual)->flags & Ast_Flag_Comptime) | (sl->flags &~ Ast_Flag_Comptime); @@ -1775,7 +2183,15 @@ CheckStatus check_struct_literal(AstStructLiteral* sl) { return Check_Success; } -CheckStatus check_array_literal(AstArrayLiteral* al) { +CHECK_FUNC(array_literal, AstArrayLiteral* al) { + if (al->atnode) { + CHECK(expression, &al->atnode); + } + + al->type_node = (AstType *) al->atnode; + while (al->type_node && al->type_node->kind == Ast_Kind_Type_Alias) + al->type_node = ((AstTypeAlias *) al->type_node)->to; + bh_arr_each(AstTyped *, expr, al->values) { CHECK(expression, expr); } @@ -1789,11 +2205,11 @@ CheckStatus check_array_literal(AstArrayLiteral* al) { if (!node_is_type((AstNode *) al->atnode)) ERROR(al->token->pos, "Array type is not a type."); - al->type = type_build_from_ast(context.ast_alloc, (AstType *) al->atnode); + al->type = type_build_from_ast(context, (AstType *) al->atnode); if (al->type == NULL) YIELD(al->token->pos, "Trying to resolve type of array literal."); - al->type = type_make_array(context.ast_alloc, al->type, bh_arr_length(al->values)); + al->type = type_make_array(context, al->type, bh_arr_length(al->values)); if (al->type == NULL || al->type->kind != Type_Kind_Array) ERROR(al->token->pos, "Expected array type for array literal. This is a compiler bug."); @@ -1821,125 +2237,243 @@ CheckStatus check_array_literal(AstArrayLiteral* al) { TYPE_CHECK(expr, elem_type) { ERROR_((*expr)->token->pos, "Mismatched types for value in array. Expected something of type '%s', got '%s' instead.", - type_get_name(elem_type), - node_get_type_name(*expr)); + type_get_name(context, elem_type), + node_get_type_name(context, *expr)); } } return Check_Success; } -CheckStatus check_range_literal(AstRangeLiteral** prange) { +CHECK_FUNC(range_literal, AstRangeLiteral** prange) { AstRangeLiteral* range = *prange; if (range->flags & Ast_Flag_Has_Been_Checked) return Check_Success; CHECK(expression, &range->low); CHECK(expression, &range->high); - builtin_range_type_type = type_build_from_ast(context.ast_alloc, builtin_range_type); - if (builtin_range_type_type == NULL) YIELD(range->token->pos, "Waiting for 'range' structure to be built."); + // HACK HACK These should already be checked but they might node be! + CHECK(type, &context->builtins.range_type); + CHECK(type, &context->builtins.range64_type); + + context->builtins.range_type_type = type_build_from_ast(context, context->builtins.range_type); + context->builtins.range64_type_type = type_build_from_ast(context, context->builtins.range64_type); + if (context->builtins.range_type_type == NULL) YIELD(range->token->pos, "Waiting for 'range' structure to be built."); + if (context->builtins.range64_type_type == NULL) YIELD(range->token->pos, "Waiting for 'range64' structure to be built."); + + Type* expected_range_type = NULL; + TYPE_QUERY(&range->low, context->types.basic[Basic_Kind_I32]) { + TYPE_QUERY(&range->high, context->types.basic[Basic_Kind_I32]) { + expected_range_type = context->builtins.range_type_type; + } + } + + if (expected_range_type == NULL) { + TYPE_QUERY(&range->low, context->types.basic[Basic_Kind_I64]) { + TYPE_QUERY(&range->high, context->types.basic[Basic_Kind_I64]) { + expected_range_type = context->builtins.range64_type_type; + } + } + } + + if (expected_range_type == NULL) { + ERROR_(range->token->pos, "Range operator '..' not understood for types '%s' and '%s'.", + node_get_type_name(context, range->low), node_get_type_name(context, range->high)); + } - Type* expected_range_type = builtin_range_type_type; StructMember smem; - type_lookup_member(expected_range_type, "low", &smem); + type_lookup_member(context, expected_range_type, "low", &smem); TYPE_CHECK(&range->low, smem.type) { ERROR_(range->token->pos, - "Expected left side of range to be a 32-bit integer, got '%s'.", - node_get_type_name(range->low)); + "Expected left side of range to be a '%s', got '%s'.", + type_get_name(context, smem.type), node_get_type_name(context, range->low)); } - type_lookup_member(expected_range_type, "high", &smem); + type_lookup_member(context, expected_range_type, "high", &smem); TYPE_CHECK(&range->high, smem.type) { ERROR_(range->token->pos, - "Expected right side of range to be a 32-bit integer, got '%s'.", - node_get_type_name(range->high)); + "Expected left side of range to be a '%s', got '%s'.", + type_get_name(context, smem.type), node_get_type_name(context, range->high)); } if (range->step == NULL) { - type_lookup_member(expected_range_type, "step", &smem); + type_lookup_member(context, expected_range_type, "step", &smem); assert(smem.initial_value != NULL); CHECK(expression, smem.initial_value); range->step = *smem.initial_value; } + if (range->inclusive) { + AstTyped *one = (AstTyped *) make_int_literal(context, 1); + one->type = smem.type; + + range->high = (AstTyped *) make_binary_op(context, Binary_Op_Add, range->high, one); + + CHECK(binaryop, (AstBinaryOp **) &range->high); + } + range->flags |= Ast_Flag_Has_Been_Checked; + range->type = expected_range_type; return Check_Success; } -CheckStatus check_compound(AstCompound* compound) { +CHECK_FUNC(compound, AstCompound* compound) { bh_arr_each(AstTyped *, expr, compound->exprs) { CHECK(expression, expr); } - compound->type = type_build_compound_type(context.ast_alloc, compound); + compound->type = type_build_compound_type(context, compound); return Check_Success; } -CheckStatus check_if_expression(AstIfExpression* if_expr) { +CHECK_FUNC(if_expression, AstIfExpression* if_expr) { CHECK(expression, &if_expr->cond); CHECK(expression, &if_expr->true_expr); CHECK(expression, &if_expr->false_expr); - TYPE_CHECK(&if_expr->cond, &basic_types[Basic_Kind_Bool]) { - TypeMatch implicit_cast = implicit_cast_to_bool(&if_expr->cond); + TYPE_CHECK(&if_expr->cond, context->types.basic[Basic_Kind_Bool]) { + TypeMatch implicit_cast = implicit_cast_to_bool(context, &if_expr->cond); if (implicit_cast == TYPE_MATCH_YIELD) YIELD(if_expr->token->pos, "Waiting for implicit cast to bool to check."); if (implicit_cast == TYPE_MATCH_FAILED) { ERROR_(if_expr->token->pos, "If-expression expected boolean for condition, got '%s'.", - type_get_name(if_expr->cond->type)); + type_get_name(context, if_expr->cond->type)); } } - resolve_expression_type((AstTyped *) if_expr); + resolve_expression_type(context, (AstTyped *) if_expr); - if (!types_are_compatible(if_expr->true_expr->type, if_expr->false_expr->type)) { + if (!types_are_compatible(context, if_expr->true_expr->type, if_expr->false_expr->type)) { ERROR_(if_expr->token->pos, "Mismatched types for if-expression, left side is '%s', and right side is '%s'.", - type_get_name(if_expr->true_expr->type), type_get_name(if_expr->false_expr->type)); + type_get_name(context, if_expr->true_expr->type), type_get_name(context, if_expr->false_expr->type)); } return Check_Success; } -CheckStatus check_do_block(AstDoBlock** pdoblock) { - AstDoBlock* doblock = *pdoblock; - if (doblock->flags & Ast_Flag_Has_Been_Checked) return Check_Success; +CHECK_FUNC(pipe, AstBinaryOp** ppipe) { + AstBinaryOp *pipe = *ppipe; - fill_in_type((AstTyped *) doblock); + // + // Handle x |> y()? or x |> y()! + if (pipe->right->kind == Ast_Kind_Unary_Op) { + AstUnaryOp *the_try = (AstUnaryOp *) pipe->right; + if (the_try->operation == Unary_Op_Try || the_try->operation == Unary_Op_Unwrap) { + // Shuffle the tree! + AstBinaryOp *the_pipe = pipe; + + the_pipe->right = the_try->expr; + the_try->expr = (AstTyped *) the_pipe; + *ppipe = (AstBinaryOp *) the_try; + + CHECK(expression, (AstTyped **) ppipe); + return Check_Success; + } + } - bh_arr_push(context.checker.expected_return_type_stack, &doblock->type); + AstCall* base_call_node = (AstCall *) pipe->right; + AstCall* call_node = base_call_node; - doblock->block->rules = Block_Rule_Do_Block; - CHECK(block, doblock->block); + if (call_node->kind == Ast_Kind_Method_Call) { + call_node = (AstCall *) ((AstBinaryOp *) call_node)->right; + } - if (doblock->type == &type_auto_return) doblock->type = &basic_types[Basic_Kind_Void]; + // + // Handle x |> y === x |> y() === y(x) + if (call_node->kind != Ast_Kind_Call) { + AstCall *new_call = onyx_ast_node_new(context->ast_alloc, sizeof(AstCall), Ast_Kind_Call); + new_call->token = call_node->token; + new_call->callee = (AstTyped *) call_node; + arguments_initialize(context, &new_call->args); - bh_arr_pop(context.checker.expected_return_type_stack); + pipe->right = (AstTyped *) new_call; + base_call_node = new_call; + call_node = new_call; + } - doblock->flags |= Ast_Flag_Has_Been_Checked; - return Check_Success; -} + if (call_node->callee->kind == Ast_Kind_Unary_Field_Access) { + AstAlias *left_alias = onyx_ast_node_new(context->ast_alloc, sizeof(AstAlias), Ast_Kind_Alias); + left_alias->token = pipe->left->token; + left_alias->alias = pipe->left; + pipe->left = (AstTyped *) left_alias; -CheckStatus check_address_of(AstAddressOf** paof) { - AstAddressOf* aof = *paof; + AstFieldAccess *implicit_field_access = make_field_access(context, (AstTyped *) left_alias, NULL); + implicit_field_access->token = call_node->callee->token; - AstTyped* expr = (AstTyped *) strip_aliases((AstNode *) aof->expr); - if (expr->kind == Ast_Kind_Subscript && bh_arr_length(operator_overloads[Binary_Op_Ptr_Subscript]) > 0) { - if (aof->potential_substitute == NULL) { - CHECK(expression, &((AstSubscript *) expr)->addr); - CHECK(expression, &((AstSubscript *) expr)->expr); + call_node->callee = (AstTyped *) implicit_field_access; - AstBinaryOp *op = onyx_ast_node_new(context.ast_alloc, sizeof(AstBinaryOp), Ast_Kind_Binary_Op); - op->operation = Binary_Op_Ptr_Subscript; - op->left = ((AstSubscript *) expr)->addr; + AstAddressOf *address_of = make_address_of(context, pipe->left); + address_of->can_be_removed = 1; + pipe->left = (AstTyped *) address_of; + } + + if (!call_node || call_node->kind != Ast_Kind_Call) { + ERROR(pipe->token->pos, "Pipe operator expected call on right side."); + } + + // CLEANUP: Why is this here? + if (pipe->left == NULL) return Check_Error; + + if (call_node->placeholder_argument_position > 0) { + assert(call_node->placeholder_argument_position - 1 < bh_arr_length(call_node->args.values)); + call_node->args.values[call_node->placeholder_argument_position - 1] = (AstTyped *) make_argument(context, pipe->left); + call_node->placeholder_argument_position = 0; + + } else { + bh_arr_insertn(call_node->args.values, 0, 1); + call_node->args.values[0] = (AstTyped *) make_argument(context, pipe->left); + } + + base_call_node->next = pipe->next; + *ppipe = (AstBinaryOp *) base_call_node; + + CHECK(expression, (AstTyped **) ppipe); + return Check_Success; +} + +CHECK_FUNC(do_block, AstDoBlock** pdoblock) { + AstDoBlock* doblock = *pdoblock; + if (doblock->flags & Ast_Flag_Has_Been_Checked) return Check_Success; + + fill_in_type(context, (AstTyped *) doblock); + + bh_arr_push(context->checker.expected_return_type_stack, &doblock->type); + bh_arr_push(context->checker.named_return_values_stack, doblock->named_return_locals); + + doblock->block->rules = Block_Rule_Do_Block; + + CHECK(block, doblock->block); + + if (doblock->type == context->types.auto_return) doblock->type = context->types.basic[Basic_Kind_Void]; + + bh_arr_pop(context->checker.expected_return_type_stack); + bh_arr_pop(context->checker.named_return_values_stack); + + doblock->flags |= Ast_Flag_Has_Been_Checked; + return Check_Success; +} + +CHECK_FUNC(address_of, AstAddressOf** paof) { + AstAddressOf* aof = *paof; + + AstTyped* expr = (AstTyped *) strip_aliases((AstNode *) aof->expr); + if (expr->kind == Ast_Kind_Subscript && bh_arr_length(context->operator_overloads[Binary_Op_Ptr_Subscript]) > 0) { + if (aof->potential_substitute == NULL) { + CHECK(expression, &((AstSubscript *) expr)->addr); + CHECK(expression, &((AstSubscript *) expr)->expr); + + AstBinaryOp *op = onyx_ast_node_new(context->ast_alloc, sizeof(AstBinaryOp), Ast_Kind_Binary_Op); + op->operation = Binary_Op_Ptr_Subscript; + op->left = ((AstSubscript *) expr)->addr; op->right = ((AstSubscript *) expr)->expr; op->token = aof->token; aof->potential_substitute = op; } - AstCall* call = binaryop_try_operator_overload(aof->potential_substitute, NULL); - if (call == (AstCall *) &node_that_signals_a_yield) YIELD(aof->token->pos, "Waiting for operator overload to possibly resolve."); + AstCall* call = binaryop_try_operator_overload(context, aof->potential_substitute, NULL); + if (call == (AstCall *) &context->node_that_signals_a_yield) YIELD(aof->token->pos, "Waiting for operator overload to possibly resolve."); if (call != NULL) { call->next = aof->next; *(AstCall **) paof = call; @@ -1949,9 +2483,20 @@ CheckStatus check_address_of(AstAddressOf** paof) { } } + if (node_is_type((AstNode *) expr)) { + AstPointerType *pt = onyx_ast_node_new(context->ast_alloc, sizeof(AstPointerType), Ast_Kind_Pointer_Type); + pt->token = aof->token; + pt->elem = (AstType *) expr; + pt->next = aof->next; + *paof = (AstAddressOf *) pt; + CHECK(type, (AstType **) &pt); + return Check_Success; + } + CHECK(expression, &aof->expr); + if (node_is_addressable_literal((AstNode *) aof->expr)) { - resolve_expression_type(aof->expr); + resolve_expression_type(context, aof->expr); } if (aof->expr->type == NULL) { @@ -1960,7 +2505,7 @@ CheckStatus check_address_of(AstAddressOf** paof) { expr = (AstTyped *) strip_aliases((AstNode *) aof->expr); if (node_is_type((AstNode *) expr)) { - AstPointerType *pt = onyx_ast_node_new(context.ast_alloc, sizeof(AstPointerType), Ast_Kind_Pointer_Type); + AstPointerType *pt = onyx_ast_node_new(context->ast_alloc, sizeof(AstPointerType), Ast_Kind_Pointer_Type); pt->token = aof->token; pt->elem = (AstType *) expr; pt->next = aof->next; @@ -1981,7 +2526,7 @@ CheckStatus check_address_of(AstAddressOf** paof) { if (aof->can_be_removed) { *(AstTyped **) paof = aof->expr; - return Check_Yield_Macro; + return Check_Yield; } ERROR_(aof->token->pos, "Cannot take the address of something that is not an l-value. %s", onyx_ast_node_kind_string(expr->kind)); @@ -1989,7 +2534,7 @@ CheckStatus check_address_of(AstAddressOf** paof) { expr->flags |= Ast_Flag_Address_Taken; - aof->type = type_make_pointer(context.ast_alloc, expr->type); + aof->type = type_make_pointer(context, expr->type); if (expr->kind == Ast_Kind_Memres && !((AstMemRes *) expr)->threadlocal) { aof->flags |= Ast_Flag_Comptime; @@ -1998,13 +2543,13 @@ CheckStatus check_address_of(AstAddressOf** paof) { return Check_Success; } -CheckStatus check_dereference(AstDereference* deref) { +CHECK_FUNC(dereference, AstDereference* deref) { CHECK(expression, &deref->expr); if (!type_is_pointer(deref->expr->type)) ERROR(deref->token->pos, "Cannot dereference non-pointer value."); - if (deref->expr->type == basic_type_rawptr.basic_type) + if (deref->expr->type == context->types.basic[Basic_Kind_Rawptr]) ERROR(deref->token->pos, "Cannot dereference 'rawptr'. Cast to another pointer type first."); deref->type = deref->expr->type->Pointer.elem; @@ -2012,7 +2557,7 @@ CheckStatus check_dereference(AstDereference* deref) { return Check_Success; } -CheckStatus check_subscript(AstSubscript** psub) { +CHECK_FUNC(subscript, AstSubscript** psub) { AstSubscript* sub = *psub; CHECK(expression, &sub->addr); CHECK(expression, &sub->expr); @@ -2025,9 +2570,9 @@ CheckStatus check_subscript(AstSubscript** psub) { && !(type_is_array_accessible(sub->addr->type))) { // AstSubscript is the same as AstBinaryOp for the first sizeof(AstBinaryOp) bytes AstBinaryOp* binop = (AstBinaryOp *) sub; - AstCall *implicit_call = binaryop_try_operator_overload(binop, NULL); + AstCall *implicit_call = binaryop_try_operator_overload(context, binop, NULL); - if (implicit_call == (AstCall *) &node_that_signals_a_yield) + if (implicit_call == (AstCall *) &context->node_that_signals_a_yield) YIELD(sub->token->pos, "Trying to resolve operator overload."); if (implicit_call != NULL) { @@ -2041,16 +2586,16 @@ CheckStatus check_subscript(AstSubscript** psub) { } if (!type_is_array_accessible(sub->addr->type)) { - report_bad_binaryop((AstBinaryOp *) sub); + report_bad_binaryop(context, (AstBinaryOp *) sub); return Check_Error; } if (sub->addr->type->kind == Type_Kind_Slice || sub->addr->type->kind == Type_Kind_DynArray || sub->addr->type->kind == Type_Kind_VarArgs) { // If we are accessing on a slice or a dynamic array, implicitly add a field access for the data member StructMember smem; - type_lookup_member(sub->addr->type, "data", &smem); + type_lookup_member(context, sub->addr->type, "data", &smem); - AstFieldAccess* fa = make_field_access(context.ast_alloc, sub->addr, "data"); + AstFieldAccess* fa = make_field_access(context, sub->addr, "data"); fa->type = smem.type; fa->offset = smem.offset; fa->idx = smem.idx; @@ -2058,31 +2603,31 @@ CheckStatus check_subscript(AstSubscript** psub) { sub->addr = (AstTyped *) fa; } - if (types_are_compatible(sub->expr->type, builtin_range_type_type)) { + if (types_are_compatible(context, sub->expr->type, context->builtins.range_type_type)) { Type *of = type_get_contained_type(sub->addr->type); if (of == NULL) { // FIXME: Slice creation should be allowed for slice types and dynamic array types, like it // is below, but this code doesn't look at that. - report_bad_binaryop((AstBinaryOp *) sub); + report_bad_binaryop(context, (AstBinaryOp *) sub); ERROR(sub->token->pos, "Invalid type for left of slice creation."); } sub->kind = Ast_Kind_Slice; - sub->type = type_make_slice(context.ast_alloc, of); + sub->type = type_make_slice(context, of); sub->elem_size = type_size_of(of); return Check_Success; } - resolve_expression_type(sub->expr); + resolve_expression_type(context, sub->expr); if (!type_is_small_integer(sub->expr->type)) { - report_bad_binaryop((AstBinaryOp *) sub); - ERROR_(sub->token->pos, "Expected small integer type for index, got '%s'.", node_get_type_name(sub->expr)); + report_bad_binaryop(context, (AstBinaryOp *) sub); + ERROR_(sub->token->pos, "Expected small integer type for index, got '%s'.", node_get_type_name(context, sub->expr)); } sub->type = type_get_contained_type(sub->addr->type); if (sub->type == NULL) { - report_bad_binaryop((AstBinaryOp *) sub); + report_bad_binaryop(context, (AstBinaryOp *) sub); ERROR(sub->token->pos, "Invalid type for left of array access."); } @@ -2090,23 +2635,54 @@ CheckStatus check_subscript(AstSubscript** psub) { return Check_Success; } -CheckStatus check_field_access(AstFieldAccess** pfield) { +CHECK_FUNC(field_access, AstFieldAccess** pfield) { AstFieldAccess* field = *pfield; if (field->flags & Ast_Flag_Has_Been_Checked) return Check_Success; - CHECK(expression, &field->expr); - if (field->expr->type == NULL) { - YIELD(field->token->pos, "Trying to resolve type of source expression."); + if (field->token != NULL && field->field == NULL) { + token_toggle_end(field->token); + field->field = bh_strdup(context->ast_alloc, field->token->text); + token_toggle_end(field->token); } - if (field->expr->kind == Ast_Kind_Package) { - return Check_Return_To_Symres; + // + // Here we "pre-check" the expression to resolve any symbols, but maybe + // leave unknown types hanging around. This does not matter for any of + // the cases that exist below. If we are not looking at one of the cases + // below, we will "properly" check the expression type again. Not the + // fastest way of going about this, but I am aiming for correctness + // at the moment. + // + // - brendanfh 03 January 2025 + // + check_expression(context, &field->expr); + AstTyped *expr; + { + expr = (AstTyped *) strip_aliases((AstNode *) field->expr); + while (expr->kind == Ast_Kind_Type_Alias) { + expr = (AstTyped *)((AstTypeAlias *) expr)->to; + } + + if (expr->kind == Ast_Kind_Struct_Type || + expr->kind == Ast_Kind_Poly_Struct_Type || + expr->kind == Ast_Kind_Enum_Type || + expr->kind == Ast_Kind_Type_Raw_Alias || + expr->kind == Ast_Kind_Union_Type || + expr->kind == Ast_Kind_Poly_Union_Type || + expr->kind == Ast_Kind_Slice_Type || + expr->kind == Ast_Kind_DynArr_Type || + expr->kind == Ast_Kind_Distinct_Type || + expr->kind == Ast_Kind_Interface || + expr->kind == Ast_Kind_Compiler_Extension || + expr->kind == Ast_Kind_Package) { + goto try_resolve_from_node; + } } - if (field->token != NULL && field->field == NULL) { - token_toggle_end(field->token); - field->field = bh_strdup(context.ast_alloc, field->token->text); - token_toggle_end(field->token); + CHECK(expression, &field->expr); + + if (field->expr->type == NULL) { + YIELD(field->token->pos, "Trying to resolve type of source expression."); } if (!type_is_structlike(field->expr->type)) { @@ -2118,12 +2694,12 @@ CheckStatus check_field_access(AstFieldAccess** pfield) { } StructMember smem; - if (!type_lookup_member(field->expr->type, field->field, &smem)) { + if (!type_lookup_member(context, field->expr->type, field->field, &smem)) { if (field->expr->type->kind == Type_Kind_Array) { u32 field_count = field->expr->type->Array.count; if (!strcmp(field->field, "count")) { - *pfield = (AstFieldAccess *) make_int_literal(context.ast_alloc, field_count); + *pfield = (AstFieldAccess *) make_int_literal(context, field_count); return Check_Success; } @@ -2141,7 +2717,7 @@ CheckStatus check_field_access(AstFieldAccess** pfield) { else if (!strcmp(accessor, "w") || !strcmp(accessor, "a")) valid = field_count >= 4, index = 3; if (valid) { - *pfield = make_field_access(context.ast_alloc, field->expr, field->field); + *pfield = make_field_access(context, field->expr, field->field); (*pfield)->type = field->expr->type->Array.elem; (*pfield)->offset = index * type_size_of(field->expr->type->Array.elem); (*pfield)->idx = index; @@ -2159,16 +2735,16 @@ CheckStatus check_field_access(AstFieldAccess** pfield) { // HACK make a function for this. if (!field->type_node) { - AstPolyCallType* pctype = onyx_ast_node_new(context.ast_alloc, sizeof(AstPolyCallType), Ast_Kind_Poly_Call_Type); + AstPolyCallType* pctype = onyx_ast_node_new(context->ast_alloc, sizeof(AstPolyCallType), Ast_Kind_Poly_Call_Type); pctype->token = field->token; - pctype->callee = builtin_optional_type; - bh_arr_new(context.ast_alloc, pctype->params, 1); + pctype->callee = context->builtins.optional_type; + bh_arr_new(context->ast_alloc, pctype->params, 1); bh_arr_push(pctype->params, (AstNode *) uv->type->ast_type); field->type_node = (AstType *) pctype; } - field->type = type_build_from_ast(context.ast_alloc, field->type_node); + field->type = type_build_from_ast(context, field->type_node); if (!field->type) YIELD(field->token->pos, "Waiting for field access type to be constructed."); field->flags |= Ast_Flag_Has_Been_Checked; @@ -2189,9 +2765,9 @@ CheckStatus check_field_access(AstFieldAccess** pfield) { // thing will have to be reconsidered. AstTyped **dest = &field->expr; do { - assert(type_lookup_member_by_idx((*dest)->type, containing_member.use_through_pointer_index, &containing_member)); + assert(type_lookup_member_by_idx(context, (*dest)->type, containing_member.use_through_pointer_index, &containing_member)); - AstFieldAccess *new_access = onyx_ast_node_new(context.ast_alloc, sizeof(AstFieldAccess), Ast_Kind_Field_Access); + AstFieldAccess *new_access = onyx_ast_node_new(context->ast_alloc, sizeof(AstFieldAccess), Ast_Kind_Field_Access); new_access->token = field->token; new_access->offset = containing_member.offset; new_access->idx = containing_member.idx; @@ -2210,7 +2786,7 @@ CheckStatus check_field_access(AstFieldAccess** pfield) { field->type = smem.type; field->flags |= Ast_Flag_Has_Been_Checked; - track_resolution_for_symbol_info((AstNode *) field, (AstNode *) smem.member_node); + track_resolution_for_symbol_info(context, (AstNode *) field, (AstNode *) smem.member_node); return Check_Success; // Field access is the general term for "a.b". In the early stages of the language, @@ -2222,42 +2798,81 @@ CheckStatus check_field_access(AstFieldAccess** pfield) { // there might be an `#inject` that will add a symbol later. When a cycle is // detected however, it uses the levenschtein distance to find the closest symbol // to the attempted lookup. - AstNode *n; - AstType *type_node; - try_resolve_from_type: - n = try_symbol_raw_resolve_from_type(field->expr->type, field->field); + AstNode *n = NULL; + AstType *type_node = NULL; + try_resolve_from_type: type_node = field->expr->type->ast_type; - if (!n) n = try_symbol_raw_resolve_from_node((AstNode *) field->expr, field->field); - if (!n) n = try_symbol_raw_resolve_from_node((AstNode *) type_node, field->field); + n = try_symbol_raw_resolve_from_type(context, field->expr->type, field->field); + if (n) goto resolved; + + try_resolve_from_node: + type_node = NULL; + n = try_symbol_raw_resolve_from_node(context, (AstNode *) field->expr, field->field); + + resolved: if (n) { - track_resolution_for_symbol_info((AstNode *) *pfield, n); + track_resolution_for_symbol_info(context, (AstNode *) *pfield, n); *pfield = (AstFieldAccess *) n; + CHECK(expression, (AstTyped **) pfield); return Check_Success; } // // This has to be cycle_almost_detected, not cycle_detected, because interface - // constraints relay on Check_Error being returned, not Check_Yield_Macro. For + // constraints relay on Check_Error being returned, not Check_Yield. For // this reason, I have to produce an error at the last minute, BEFORE the loop // enters a cycle detected state, when there is no point of return. - if (!context.cycle_almost_detected && !context.cycle_detected) { + if (!context->cycle_almost_detected && !context->cycle_detected) { // Skipping the slightly expensive symbol lookup // below by not using YIELD_ERROR. - return Check_Yield_Macro; + return Check_Yield; + } + + if (expr->kind == Ast_Kind_Package) { + if (context->cycle_detected) { + char *closest = find_closest_symbol_in_node(context, (AstNode *) expr, field->field); + + AstPackage *package = (AstPackage *) strip_aliases((AstNode *) field->expr); + char *package_name = "unknown (compiler bug)"; + if (package && package->package) { + package_name = package->package->name; + } + + if (closest) { + ERROR_(field->token->pos, "'%b' was not found in package '%s'. Did you mean '%s'?", + field->token->text, + field->token->length, + package_name, + closest); + } else { + ERROR_(field->token->pos, "'%b' was not found in package '%s'. Perhaps it is defined in a file that was not loaded?", + field->token->text, + field->token->length, + package_name); + } + } + + return Check_Yield; + } + + if (context->cycle_detected || context->cycle_almost_detected >= 2) { + ERROR_(field->token->pos, "'%b' does not exist here. This is a bad error message.", + field->token->text, + field->token->length); } - char* type_name = (char *) node_get_type_name(field->expr); - if (field->expr->type == &basic_types[Basic_Kind_Type_Index]) { - Type *actual_type = type_build_from_ast(context.ast_alloc, (AstType *) field->expr); - type_name = (char *) type_get_name(actual_type); + char* type_name = (char *) node_get_type_name(context, field->expr); + if (field->expr->type == context->types.basic[Basic_Kind_Type_Index]) { + Type *actual_type = type_build_from_ast(context, (AstType *) field->expr); + type_name = (char *) type_get_name(context, actual_type); } if (!type_node) goto closest_not_found; - char* closest = find_closest_symbol_in_node((AstNode *) type_node, field->field); + char* closest = find_closest_symbol_in_node(context, (AstNode *) type_node, field->field); if (closest) { ERROR_(field->token->pos, "Field '%s' does not exist on '%s'. Did you mean '%s'?", field->field, type_name, closest); } @@ -2266,13 +2881,56 @@ CheckStatus check_field_access(AstFieldAccess** pfield) { ERROR_(field->token->pos, "Field '%s' does not exist on '%s'.", field->field, type_name); } -CheckStatus check_method_call(AstBinaryOp** pmcall) { +// CLEANUP: This is an experimental feature and might be removed in the future. +// I noticed a common pattern when writing in Onyx is something that looks like this: +// +// foo.member_function(&foo, ...) +// +// I decided it would be worth adding a bit of syntactic sugar for such as call. I +// decided to use the '->' operator for this purpose. The snippet below is the exact +// same as the snippet above (after the nodes have been processed by the function below) +// +// foo->member_function(...) +CHECK_FUNC(method_call, AstBinaryOp** pmcall) { AstBinaryOp* mcall = *pmcall; + CHECK(expression, &mcall->left); + + if ((mcall->flags & Ast_Flag_Has_Been_Symres) == 0) { + if (mcall->left == NULL) return Check_Error; + + if (mcall->right->kind != Ast_Kind_Call) { + ERROR(mcall->token->pos, "'->' expected procedure call on right side."); + } + + // + // This is a small hack that makes chaining method calls + // work. Because check_method_call replaces the method call + // and marks it as completed, if there are multiple references + // to the same method call node, one of them will be left dangling. + // To remedy this, an alias node an be placed around the method call + // so that when check_method_call replaces it, it is replaced + // within the alias, and all references are updated. + if (mcall->left->kind == Ast_Kind_Method_Call) { + AstAlias *left_alias = onyx_ast_node_new(context->ast_alloc, sizeof(AstAlias), Ast_Kind_Alias); + left_alias->token = mcall->left->token; + left_alias->alias = mcall->left; + + mcall->left = (AstTyped *) left_alias; + } + + AstFieldAccess* implicit_field_access = make_field_access(context, mcall->left, NULL); + implicit_field_access->token = ((AstCall *) mcall->right)->callee->token; + ((AstCall *) mcall->right)->callee = (AstTyped *) implicit_field_access; + + mcall->flags |= Ast_Flag_Has_Been_Symres; + } + // :Idempotency if ((mcall->flags & Ast_Flag_Has_Been_Checked) == 0) { - CHECK(expression, &mcall->left); - if (mcall->left->type == NULL) YIELD(mcall->token->pos, "Trying to resolve type of left hand side."); + if (mcall->left->type == NULL) { + YIELD(mcall->token->pos, "Trying to resolve type of left hand side."); + } AstTyped* implicit_argument = mcall->left; AstCall* call_node = (AstCall *) mcall->right; @@ -2284,12 +2942,12 @@ CheckStatus check_method_call(AstBinaryOp** pmcall) { // This could be weird to think about semantically so some testing with real code // would be good. - brendanfh 2020/02/05 if (implicit_argument->type->kind != Type_Kind_Pointer) { - AstAddressOf *address_of = make_address_of(context.ast_alloc, implicit_argument); + AstAddressOf *address_of = make_address_of(context, implicit_argument); address_of->can_be_removed = 1; implicit_argument = (AstTyped *) address_of; } - AstArgument *new_arg = make_argument(context.ast_alloc, implicit_argument); + AstArgument *new_arg = make_argument(context, implicit_argument); new_arg->used_as_lval_of_method_call = 1; bh_arr_insertn(call_node->args.values, 0, 1); @@ -2299,70 +2957,117 @@ CheckStatus check_method_call(AstBinaryOp** pmcall) { mcall->flags |= Ast_Flag_Has_Been_Checked; } - CHECK(call, (AstCall **) &mcall->right); - + // + // This can happen now that method calls which expand via a macro are not replaced and + // instead are passed all the way to the code generator. + // if (mcall->right->kind != Ast_Kind_Call) { *pmcall = (AstBinaryOp *) mcall->right; + // CHECK(expression, (AstCall **) pmcall); + return Check_Yield; } else { + CHECK(call, (AstCall **) &mcall->right); mcall->type = mcall->right->type; } return Check_Success; } -CheckStatus check_size_of(AstSizeOf* so) { +CHECK_FUNC(size_of, AstSizeOf* so) { + CHECK(type, &so->type_node); CHECK(type, &so->so_ast_type); - so->so_type = type_build_from_ast(context.ast_alloc, so->so_ast_type); + so->so_type = type_build_from_ast(context, so->so_ast_type); if (so->so_type == NULL) YIELD(so->token->pos, "Trying to resolve type to take the size of."); + // HACK + if ( + (so->so_type->kind == Type_Kind_Struct && so->so_type->Struct.status == SPS_Start) || + (so->so_type->kind == Type_Kind_Union && so->so_type->Union.status == SPS_Start) + ) { + YIELD(so->token->pos, "Waiting until type has a size."); + } + so->size = type_size_of(so->so_type); so->flags |= Ast_Flag_Comptime; - return Check_Success; } -CheckStatus check_align_of(AstAlignOf* ao) { +CHECK_FUNC(align_of, AstAlignOf* ao) { + CHECK(type, &ao->type_node); CHECK(type, &ao->ao_ast_type); - ao->ao_type = type_build_from_ast(context.ast_alloc, ao->ao_ast_type); + ao->ao_type = type_build_from_ast(context, ao->ao_ast_type); if (ao->ao_type == NULL) YIELD(ao->token->pos, "Trying to resolve type to take the alignment of."); + // HACK + if ( + (ao->ao_type->kind == Type_Kind_Struct && ao->ao_type->Struct.status == SPS_Start) || + (ao->ao_type->kind == Type_Kind_Union && ao->ao_type->Union.status == SPS_Start) + ) { + YIELD(ao->token->pos, "Waiting until type has an alignment."); + } + ao->alignment = type_alignment_of(ao->ao_type); ao->flags |= Ast_Flag_Comptime; return Check_Success; } -CheckStatus check_expression(AstTyped** pexpr) { +CHECK_FUNC(expression, AstTyped** pexpr) { + if ((*pexpr)->kind == Ast_Kind_Symbol) { + CHECK(symbol, (AstNode **) pexpr); + + // HACK? + // I don't know how I never ran into this problem before, + // but when a symbol is resolved, there is never a "double + // check" that its type node is symbol resolved as well. + // This only proved to be an issue when using constraint + // sentinels, so I only added that case here. This should + // maybe be considered in the future because I think this + // lack of double checking could be causing other bugs. + if ((*pexpr)->kind == Ast_Kind_Constraint_Sentinel) { + CHECK(type, &(*pexpr)->type_node); + } + + CHECK(expression, (AstTyped **) pexpr); + return Check_Success; + } + AstTyped* expr = *pexpr; - if (expr->kind > Ast_Kind_Type_Start && expr->kind < Ast_Kind_Type_End) { + + if (node_is_type((AstNode *) expr)) { // This is to ensure that the type will exist when compiling. For example, a poly-call type // would have to wait for the entity to pass through, which the code generation does not know // about. CHECK(type, (AstType **) pexpr); - expr = *pexpr; + expr = (AstTyped *) strip_aliases((AstNode *) *pexpr); // Don't try to construct a polystruct ahead of time because you can't. if (expr->kind != Ast_Kind_Poly_Struct_Type && expr->kind != Ast_Kind_Poly_Union_Type) { - if (type_build_from_ast(context.ast_alloc, (AstType*) expr) == NULL) { + if (type_build_from_ast(context, (AstType*) expr) == NULL) { YIELD(expr->token->pos, "Trying to construct type."); } } else { - type_build_from_ast(context.ast_alloc, (AstType*) expr); + type_build_from_ast(context, (AstType*) expr); } - expr->type = &basic_types[Basic_Kind_Type_Index]; + expr->type = context->types.basic[Basic_Kind_Type_Index]; return Check_Success; } if (expr->kind == Ast_Kind_Polymorphic_Proc) { // polymorphic procedures do not need to be checked. Their concrete instantiations // will be checked when they are created. + + if (((AstFunction *) expr)->captures) { + ((AstFunction *) expr)->scope_to_lookup_captured_values = context->checker.current_scope; + } + return Check_Success; } @@ -2371,27 +3076,56 @@ CheckStatus check_expression(AstTyped** pexpr) { } if (expr->kind == Ast_Kind_Directive_Init) { - ERROR(expr->token->pos, "#init declarations are not allowed in normal expressions, only in #after clauses."); + if (!mode_enabled(context, CM_Allow_Init_Expressions)) { + ERROR(expr->token->pos, "#init declarations are not allowed in normal expressions, only in #after clauses."); + } + + return Check_Success; + } + + // We have to set the type_node of string literals outside of the parser, + // because the actual nodes for builtins could be NULL if we are parsing + // the builtin.onyx file. Setting them here resolves that. Maybe we could + // make the string-like types internal to the compiler so we wouldn't need + // this hack? + // + // - brendanfh 01 January 2025 + // + if (expr->kind == Ast_Kind_StrLit) { + AstStrLit* str = (AstStrLit *) expr; + if (str->is_cstr) { + CHECK(type, &context->builtins.cstring_type); + str->type_node = context->builtins.cstring_type; + + } else { + CHECK(type, &context->builtins.string_type); + str->type_node = context->builtins.string_type; + } } - fill_in_type(expr); - current_checking_level = EXPRESSION_LEVEL; + fill_in_type(context, expr); + context->checker.current_checking_level = EXPRESSION_LEVEL; CheckStatus retval = Check_Success; switch (expr->kind) { - case Ast_Kind_Binary_Op: retval = check_binaryop((AstBinaryOp **) pexpr); break; - case Ast_Kind_Unary_Op: retval = check_unaryop((AstUnaryOp **) pexpr); break; + case Ast_Kind_Binary_Op: retval = check_binaryop(context, (AstBinaryOp **) pexpr); break; + case Ast_Kind_Unary_Op: retval = check_unaryop(context, (AstUnaryOp **) pexpr); break; + case Ast_Kind_Pipe: retval = check_pipe(context, (AstBinaryOp **) pexpr); break; case Ast_Kind_Intrinsic_Call: - case Ast_Kind_Call: retval = check_call((AstCall **) pexpr); break; - case Ast_Kind_Argument: retval = check_argument((AstArgument **) pexpr); break; - case Ast_Kind_Block: retval = check_block((AstBlock *) expr); break; + case Ast_Kind_Call: retval = check_call(context, (AstCall **) pexpr); break; + case Ast_Kind_Argument: retval = check_argument(context, (AstArgument **) pexpr); break; + case Ast_Kind_Block: retval = check_block(context, (AstBlock *) expr); break; - case Ast_Kind_Symbol: - YIELD_(expr->token->pos, "Waiting to resolve symbol, '%b'.", expr->token->text, expr->token->length); - break; + case Ast_Kind_Symbol: assert(0); break; case Ast_Kind_Param: + if (expr->flags & Ast_Flag_Param_Symbol_Dirty) { + assert(expr->token->type == Token_Type_Symbol); + *pexpr = (AstTyped *) make_symbol(context, expr->token); + CHECK(expression, pexpr); + } + if (expr->type == NULL) { YIELD(expr->token->pos, "Waiting on parameter type."); } @@ -2399,36 +3133,45 @@ CheckStatus check_expression(AstTyped** pexpr) { case Ast_Kind_Local: break; - case Ast_Kind_Address_Of: retval = check_address_of((AstAddressOf **) pexpr); break; - case Ast_Kind_Dereference: retval = check_dereference((AstDereference *) expr); break; + case Ast_Kind_Address_Of: retval = check_address_of(context, (AstAddressOf **) pexpr); break; + case Ast_Kind_Dereference: retval = check_dereference(context, (AstDereference *) expr); break; case Ast_Kind_Slice: - case Ast_Kind_Subscript: retval = check_subscript((AstSubscript **) pexpr); break; - case Ast_Kind_Field_Access: retval = check_field_access((AstFieldAccess **) pexpr); break; - case Ast_Kind_Method_Call: retval = check_method_call((AstBinaryOp **) pexpr); break; - case Ast_Kind_Size_Of: retval = check_size_of((AstSizeOf *) expr); break; - case Ast_Kind_Align_Of: retval = check_align_of((AstAlignOf *) expr); break; - case Ast_Kind_Range_Literal: retval = check_range_literal((AstRangeLiteral **) pexpr); break; + case Ast_Kind_Subscript: retval = check_subscript(context, (AstSubscript **) pexpr); break; + case Ast_Kind_Field_Access: retval = check_field_access(context, (AstFieldAccess **) pexpr); break; + case Ast_Kind_Method_Call: retval = check_method_call(context, (AstBinaryOp **) pexpr); break; + case Ast_Kind_Size_Of: retval = check_size_of(context, (AstSizeOf *) expr); break; + case Ast_Kind_Align_Of: retval = check_align_of(context, (AstAlignOf *) expr); break; + case Ast_Kind_Range_Literal: retval = check_range_literal(context, (AstRangeLiteral **) pexpr); break; case Ast_Kind_Global: if (expr->type == NULL) { - onyx_report_error(expr->token->pos, Error_Critical, "Global with unknown type."); + ONYX_ERROR(expr->token->pos, Error_Critical, "Global with unknown type."); retval = Check_Error; } break; case Ast_Kind_NumLit: - assert(expr->type != NULL); + if (!expr->type) { + return Check_Yield; + } break; case Ast_Kind_Struct_Literal: - retval = check_struct_literal((AstStructLiteral *) expr); + retval = check_struct_literal(context, (AstStructLiteral *) expr); break; case Ast_Kind_Array_Literal: - retval = check_array_literal((AstArrayLiteral *) expr); + retval = check_array_literal(context, (AstArrayLiteral *) expr); break; case Ast_Kind_Function: + // We do not need to check the type, because fill_in_type should already have done that for us. + // CHECK(type, &(*expr)->type_node); + + if (((AstFunction *) expr)->captures) { + ((AstFunction *) expr)->scope_to_lookup_captured_values = context->checker.current_scope; + } + if (expr->type == NULL) YIELD(expr->token->pos, "Waiting for function type to be resolved."); @@ -2439,8 +3182,10 @@ CheckStatus check_expression(AstTyped** pexpr) { break; case Ast_Kind_Directive_Defined: - *pexpr = (AstTyped *) make_bool_literal(context.ast_alloc, ((AstDirectiveDefined *) expr)->is_defined); - fill_in_type(*pexpr); + CHECK(directive_defined, (AstDirectiveDefined **) pexpr); + + *pexpr = (AstTyped *) make_bool_literal(context, ((AstDirectiveDefined *) expr)->is_defined); + fill_in_type(context, *pexpr); break; case Ast_Kind_Compound: @@ -2449,35 +3194,54 @@ CheckStatus check_expression(AstTyped** pexpr) { case Ast_Kind_Call_Site: // NOTE: This has to be set here because if it were to be set in the parser, - // builtin_callsite_type wouldn't be known when parsing the builtin.onyx file. - expr->type_node = builtin_callsite_type; + // context->builtins.callsite_type wouldn't be known when parsing the builtin.onyx file. + expr->type_node = context->builtins.callsite_type; break; case Ast_Kind_If_Expression: CHECK(if_expression, (AstIfExpression *) expr); break; - case Ast_Kind_Alias: - CHECK(expression, &((AstAlias *) expr)->alias); + case Ast_Kind_Alias: { + AstAlias *alias = (AstAlias *) expr; + + // + // If an alias has an entity, do not force checking on it here. + // Wait for the alias to pass type-checking in the normal way. + // Otherwise, there can be weird cases where symbols resolve + // incorrectly because they are being checked in the wrong scope. + // + if (alias->entity && context->checker.current_entity != alias->entity) { + if (alias->entity->state < Entity_State_Code_Gen) { + YIELD(expr->token->pos, "Waiting for alias to pass type checking."); + } + } else { + CHECK_INVISIBLE(expression, alias, &alias->alias); + } + expr->flags |= (((AstAlias *) expr)->alias->flags & Ast_Flag_Comptime); expr->type = ((AstAlias *) expr)->alias->type; break; + } case Ast_Kind_Directive_Insert: - retval = check_insert_directive((AstDirectiveInsert **) pexpr, 1); + retval = check_insert_directive(context, (AstDirectiveInsert **) pexpr, 1); break; case Ast_Kind_Code_Block: expr->flags |= Ast_Flag_Comptime; - fill_in_type(expr); + fill_in_type(context, expr); break; - case Ast_Kind_Do_Block: - retval = check_do_block((AstDoBlock **) pexpr); + case Ast_Kind_Do_Block: { + Scope* old_current_scope = context->checker.current_scope; + retval = check_do_block(context, (AstDoBlock **) pexpr); + context->checker.current_scope = old_current_scope; break; + } case Ast_Kind_Memres: - if (expr->type == NULL) YIELD(expr->token->pos, "Waiting to know globals type."); + if (expr->type == NULL || expr->type->kind == Type_Kind_Invalid) YIELD(expr->token->pos, "Waiting to know globals type."); break; case Ast_Kind_Directive_First: @@ -2489,12 +3253,13 @@ CheckStatus check_expression(AstTyped** pexpr) { break; case Ast_Kind_Directive_Export_Name: - retval = check_directive_export_name((AstDirectiveExportName *) expr); + retval = check_directive_export_name(context, (AstDirectiveExportName *) expr); break; - case Ast_Kind_StrLit: + case Ast_Kind_StrLit: { if (expr->type == NULL) YIELD(expr->token->pos, "Waiting to know string literals type. This is a weird one...") ; break; + } case Ast_Kind_Directive_This_Package: YIELD(expr->token->pos, "Waiting to resolve #this_package."); @@ -2511,7 +3276,7 @@ CheckStatus check_expression(AstTyped** pexpr) { cl->captured_value->flags |= Ast_Flag_Address_Taken; } - expr->type = type_make_pointer(context.ast_alloc, cl->captured_value->type); + expr->type = type_make_pointer(context, cl->captured_value->type); } else { expr->type = cl->captured_value->type; @@ -2527,21 +3292,30 @@ CheckStatus check_expression(AstTyped** pexpr) { break; } + case Ast_Kind_Package: + CHECK(package, (AstPackage *) expr); + break; + + case Ast_Kind_Procedural_Expansion: + CHECK(proc_expansion, (AstProceduralExpansion **) pexpr, PMEK_Expression); + break; + case Ast_Kind_Switch_Case: break; case Ast_Kind_File_Contents: break; case Ast_Kind_Overloaded_Function: break; case Ast_Kind_Enum_Value: break; case Ast_Kind_Polymorphic_Proc: break; - case Ast_Kind_Package: break; case Ast_Kind_Error: break; case Ast_Kind_Unary_Field_Access: break; case Ast_Kind_Foreign_Block: break; case Ast_Kind_Zero_Value: break; case Ast_Kind_Interface: break; + case Ast_Kind_Compiler_Extension: break; + case Ast_Kind_Procedural_Macro: break; default: retval = Check_Error; - onyx_report_error(expr->token->pos, Error_Critical, "UNEXPECTED INTERNAL COMPILER ERROR"); + ONYX_ERROR(expr->token->pos, Error_Critical, "UNEXPECTED INTERNAL COMPILER ERROR"); DEBUG_HERE; break; } @@ -2549,8 +3323,13 @@ CheckStatus check_expression(AstTyped** pexpr) { return retval; } -CheckStatus check_global(AstGlobal* global) { - fill_in_type((AstTyped *) global); +CHECK_FUNC(global_header, AstGlobal *global) { + CHECK(type, &global->type_node); + return Check_Success; +} + +CHECK_FUNC(global, AstGlobal* global) { + fill_in_type(context, (AstTyped *) global); if (global->type == NULL) { YIELD(global->token->pos, "Trying to resolve type for global."); @@ -2559,7 +3338,7 @@ CheckStatus check_global(AstGlobal* global) { return Check_Success; } -CheckStatus check_insert_directive(AstDirectiveInsert** pinsert, b32 expected_expression) { +CHECK_FUNC(insert_directive, AstDirectiveInsert** pinsert, b32 expected_expression) { AstDirectiveInsert* insert = *pinsert; if (insert->flags & Ast_Flag_Has_Been_Checked) return Check_Success; @@ -2577,11 +3356,11 @@ CheckStatus check_insert_directive(AstDirectiveInsert** pinsert, b32 expected_ex CHECK(expression, pexpr); } - Type* code_type = type_build_from_ast(context.ast_alloc, builtin_code_type); + Type* code_type = type_build_from_ast(context, context->builtins.code_type); TYPE_CHECK(&insert->code_expr, code_type) { ERROR_(insert->token->pos, "#unquote expected a value of type 'Code', got '%s'.", - type_get_name(insert->code_expr->type)); + type_get_name(context, insert->code_expr->type)); } AstCodeBlock* code_block = (AstCodeBlock *) insert->code_expr; @@ -2592,21 +3371,21 @@ CheckStatus check_insert_directive(AstDirectiveInsert** pinsert, b32 expected_ex } if (!code_block->is_expression && expected_expression) { - onyx_report_error(insert->token->pos, Error_Critical, "Expected a code block that is an expression here, but got a code block that is statements."); - onyx_report_error(code_block->token->pos, Error_Critical, "Try changing { expr } into ( expr ) here."); + ONYX_ERROR(insert->token->pos, Error_Critical, "Expected a code block that is an expression here, but got a code block that is statements."); + ONYX_ERROR(code_block->token->pos, Error_Critical, "Try changing { expr } into ( expr ) here."); return Check_Error; } u32 bound_symbol_count = bh_arr_length(code_block->binding_symbols); u32 bound_expr_count = bh_arr_length(insert->binding_exprs); if (bound_symbol_count > bound_expr_count) { - onyx_report_error(insert->token->pos, Error_Critical, + ONYX_ERROR(insert->token->pos, Error_Critical, "Expected at least %d argument%s to unquote code block, only got %d.", bound_symbol_count, bh_num_plural(bound_symbol_count), bound_expr_count); ERROR(code_block->token->pos, "Here is the code block being unquoted."); } - AstNode* cloned_block = ast_clone(context.ast_alloc, code_block->code); + AstNode* cloned_block = ast_clone(context, code_block->code); cloned_block->next = insert->next; if (bound_expr_count > 0) { @@ -2616,20 +3395,20 @@ CheckStatus check_insert_directive(AstDirectiveInsert** pinsert, b32 expected_ex scope = &((AstBlock *) cloned_block)->quoted_block_capture_scope; } else if (bound_symbol_count > 0) { - AstReturn* return_node = onyx_ast_node_new(context.ast_alloc, sizeof(AstReturn), Ast_Kind_Return); + AstReturn* return_node = onyx_ast_node_new(context->ast_alloc, sizeof(AstReturn), Ast_Kind_Return); return_node->token = cloned_block->token; return_node->expr = (AstTyped *) cloned_block; - AstBlock* body_block = onyx_ast_node_new(context.ast_alloc, sizeof(AstBlock), Ast_Kind_Block); + AstBlock* body_block = onyx_ast_node_new(context->ast_alloc, sizeof(AstBlock), Ast_Kind_Block); body_block->token = cloned_block->token; body_block->body = (AstNode *) return_node; body_block->rules = Block_Rule_Code_Block; scope = &((AstBlock *) body_block)->quoted_block_capture_scope; - AstDoBlock* doblock = (AstDoBlock *) onyx_ast_node_new(context.ast_alloc, sizeof(AstDoBlock), Ast_Kind_Do_Block); + AstDoBlock* doblock = (AstDoBlock *) onyx_ast_node_new(context->ast_alloc, sizeof(AstDoBlock), Ast_Kind_Do_Block); doblock->token = cloned_block->token; doblock->block = body_block; - doblock->type = &type_auto_return; + doblock->type = context->types.auto_return; doblock->next = cloned_block->next; cloned_block = (AstNode *) doblock; @@ -2637,10 +3416,10 @@ CheckStatus check_insert_directive(AstDirectiveInsert** pinsert, b32 expected_ex if (bound_symbol_count > 0) { assert(scope); - *scope = scope_create(context.ast_alloc, NULL, code_block->token->pos); + *scope = scope_create(context, NULL, code_block->token->pos); fori (i, 0, bound_symbol_count) { - symbol_introduce(*scope, code_block->binding_symbols[i], (AstNode *) insert->binding_exprs[i]); + symbol_introduce(context, *scope, code_block->binding_symbols[i], (AstNode *) insert->binding_exprs[i]); } } } @@ -2649,25 +3428,69 @@ CheckStatus check_insert_directive(AstDirectiveInsert** pinsert, b32 expected_ex insert->flags |= Ast_Flag_Has_Been_Checked; - return Check_Return_To_Symres; + return Check_Yield; +} + +CHECK_FUNC(directive_defined, AstDirectiveDefined** pdefined) { + AstDirectiveDefined* defined = *pdefined; + + b32 has_to_be_resolved = context->cycle_almost_detected >= 1; + + // We disable errors here so if we fail a symbol resolution, we don't generate any errors + // and instead can capture that as "not defined". + onyx_errors_disable(context); + context->checker.resolved_a_symbol = 0; + + CheckStatus ss = check_expression(context, &defined->expr); + if (has_to_be_resolved && ss != Check_Success && !context->checker.resolved_a_symbol) { + // The symbol definitely was not found and there is no chance that it could be found. + defined->is_defined = 0; + + onyx_errors_enable(context); + return Check_Success; + } + + if (ss == Check_Success) { + defined->is_defined = 1; + + onyx_errors_enable(context); + return Check_Success; + } + + onyx_errors_enable(context); + return Check_Yield; } -CheckStatus check_directive_solidify(AstDirectiveSolidify** psolid) { +CHECK_FUNC(directive_solidify, AstDirectiveSolidify** psolid) { AstDirectiveSolidify* solid = *psolid; + CHECK(expression, (AstTyped **) &solid->poly_proc); + + if (solid->poly_proc && solid->poly_proc->kind == Ast_Kind_Directive_Solidify) { + AstFunction* potentially_resolved_proc = (AstFunction *) ((AstDirectiveSolidify *) solid->poly_proc)->resolved_proc; + if (!potentially_resolved_proc) return Check_Yield; + + solid->poly_proc = potentially_resolved_proc; + } + + if (!solid->poly_proc || solid->poly_proc->kind != Ast_Kind_Polymorphic_Proc) { + ERROR(solid->token->pos, "Expected polymorphic procedure in #solidify directive."); + } + bh_arr_each(AstPolySolution, sln, solid->known_polyvars) { + // HACK: This assumes that 'ast_type' and 'value' are at the same offset. CHECK(expression, &sln->value); if (node_is_type((AstNode *) sln->value)) { - sln->type = type_build_from_ast(context.ast_alloc, sln->ast_type); + sln->type = type_build_from_ast(context, sln->ast_type); sln->kind = PSK_Type; } else { sln->kind = PSK_Value; } } - solid->resolved_proc = polymorphic_proc_try_solidify(solid->poly_proc, solid->known_polyvars, solid->token); - if (solid->resolved_proc == (AstNode *) &node_that_signals_a_yield) { + solid->resolved_proc = polymorphic_proc_try_solidify(context, solid->poly_proc, solid->known_polyvars, solid->token); + if (solid->resolved_proc == (AstNode *) &context->node_that_signals_a_yield) { solid->resolved_proc = NULL; YIELD(solid->token->pos, "Waiting for partially solidified procedure."); } @@ -2678,20 +3501,20 @@ CheckStatus check_directive_solidify(AstDirectiveSolidify** psolid) { return Check_Success; } -CheckStatus check_remove_directive(AstDirectiveRemove *remove) { - if (!context.checker.inside_for_iterator) { +CHECK_FUNC(remove_directive, AstDirectiveRemove *remove) { + if (!context->checker.inside_for_iterator) { ERROR(remove->token->pos, "#remove is only allowed in the body of a for-loop over an iterator."); } return Check_Success; } -CheckStatus check_directive_first(AstDirectiveFirst *first) { - if (bh_arr_length(context.checker.for_node_stack) == 0) { +CHECK_FUNC(directive_first, AstDirectiveFirst *first) { + if (bh_arr_length(context->checker.for_node_stack) == 0) { ERROR(first->token->pos, "#first is only allowed in the body of a for-loop."); } - first->for_node = bh_arr_last(context.checker.for_node_stack); + first->for_node = bh_arr_last(context->checker.for_node_stack); assert(first->for_node); first->for_node->has_first = 1; @@ -2699,7 +3522,9 @@ CheckStatus check_directive_first(AstDirectiveFirst *first) { return Check_Success; } -CheckStatus check_directive_export_name(AstDirectiveExportName *ename) { +CHECK_FUNC(directive_export_name, AstDirectiveExportName *ename) { + CHECK(expression, (AstTyped **) &ename->func); + if (ename->func->kind != Ast_Kind_Function) { ERROR(ename->token->pos, "#export_name can only be used on functions."); } @@ -2713,61 +3538,71 @@ CheckStatus check_directive_export_name(AstDirectiveExportName *ename) { // to make string literals, tokens, exports, etc... if (ename->func->exported_name == NULL) { if (ename->created_export_entity) { - return Check_Yield_Macro; + return Check_Yield; } // In this case, we know the function is not exported. assert(ename->func->is_exported == 0); - char *random_name = bh_alloc_array(context.ast_alloc, char, 16); + char *random_name = bh_alloc_array(context->ast_alloc, char, 16); random_name[15] = 0; fori (i, 0, 15) random_name[i] = (rand() % 26) + 'a'; - OnyxToken *name_token = bh_alloc_item(context.ast_alloc, OnyxToken); + OnyxToken *name_token = bh_alloc_item(context->ast_alloc, OnyxToken); memset(name_token, 0, sizeof(*name_token)); name_token->type = Token_Type_Literal_String; name_token->length = 15; name_token->text = random_name; - AstStrLit* name = bh_alloc_item(context.ast_alloc, AstStrLit); + AstStrLit* name = bh_alloc_item(context->ast_alloc, AstStrLit); memset(name, 0, sizeof(AstStrLit)); name->kind = Ast_Kind_StrLit; name->token = name_token; - name->type_node = builtin_string_type; + name->type_node = context->builtins.string_type; - add_entities_for_node(NULL, (AstNode *) name, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) name, NULL, NULL); ename->name = name; - AstDirectiveExport *export = onyx_ast_node_new(context.ast_alloc, sizeof(AstDirectiveExport), Ast_Kind_Directive_Export); + AstDirectiveExport *export = onyx_ast_node_new(context->ast_alloc, sizeof(AstDirectiveExport), Ast_Kind_Directive_Export); export->token = ename->token; export->export_name_expr = (AstTyped *) name; export->export = (AstTyped *) ename->func; - add_entities_for_node(NULL, (AstNode *) export, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) export, NULL, NULL); ename->created_export_entity = 1; - return Check_Yield_Macro; + return Check_Yield; } else { - AstStrLit* name = bh_alloc_item(context.ast_alloc, AstStrLit); + AstStrLit* name = bh_alloc_item(context->ast_alloc, AstStrLit); memset(name, 0, sizeof(AstStrLit)); name->kind = Ast_Kind_StrLit; name->token = ename->func->exported_name; - name->type_node = builtin_string_type; + name->type_node = context->builtins.string_type; - add_entities_for_node(NULL, (AstNode *) name, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) name, NULL, NULL); ename->name = name; } return Check_Success; } -CheckStatus check_capture_block(AstCaptureBlock *block) { +CHECK_FUNC(capture_block, AstCaptureBlock *block, Scope *captured_scope) { // // Reserve 8 bytes at the beginning of the closure block for the size of the closure. block->total_size_in_bytes = 8; bh_arr_each(AstCaptureLocal *, capture, block->captures) { + OnyxToken *token = (*capture)->token; + AstTyped *resolved = (AstTyped *) symbol_resolve(context, captured_scope, token); + + if (!resolved) { + // Should this do a yield? In there any case that that would make sense? + ERROR_(token->pos, "'%b' is not found in the enclosing scope.", token->text, token->length); + } + + (*capture)->captured_value = resolved; + CHECK(expression, (AstTyped **) capture); if (!(*capture)->type) YIELD((*capture)->token->pos, "Waiting to resolve captures type."); @@ -2775,55 +3610,70 @@ CheckStatus check_capture_block(AstCaptureBlock *block) { block->total_size_in_bytes += type_size_of((*capture)->type); } + bh_arr_each(AstCaptureLocal *, capture, block->captures) { + symbol_introduce(context, context->checker.current_scope, (*capture)->token, (AstNode *) *capture); + } + return Check_Success; } -CheckStatus check_statement(AstNode** pstmt) { +CHECK_FUNC(statement, AstNode** pstmt) { AstNode* stmt = *pstmt; - current_checking_level = STATEMENT_LEVEL; + context->checker.current_checking_level = STATEMENT_LEVEL; switch (stmt->kind) { - case Ast_Kind_Jump: return Check_Success; - - case Ast_Kind_Return: return check_return((AstReturn *) stmt); - case Ast_Kind_If: return check_if((AstIfWhile *) stmt); - case Ast_Kind_Static_If: return check_if((AstIfWhile *) stmt); - case Ast_Kind_While: return check_while((AstIfWhile *) stmt); - case Ast_Kind_For: return check_for((AstFor *) stmt); - case Ast_Kind_Switch: return check_switch((AstSwitch *) stmt); - case Ast_Kind_Block: return check_block((AstBlock *) stmt); - case Ast_Kind_Defer: return check_statement(&((AstDefer *) stmt)->stmt); - case Ast_Kind_Directive_Remove: return check_remove_directive((AstDirectiveRemove *) stmt); - case Ast_Kind_Directive_Insert: return check_insert_directive((AstDirectiveInsert **) pstmt, 0); - case Ast_Kind_Call: { + case Ast_Kind_Jump: return Check_Success; + + case Ast_Kind_Return: return check_return(context, (AstReturn *) stmt); + case Ast_Kind_If: return check_if(context, (AstIfWhile *) stmt); + case Ast_Kind_Static_If: return check_if(context, (AstIfWhile *) stmt); + case Ast_Kind_While: return check_while(context, (AstIfWhile *) stmt); + case Ast_Kind_For: return check_for(context, (AstFor *) stmt); + case Ast_Kind_Switch: return check_switch(context, (AstSwitch *) stmt); + case Ast_Kind_Switch_Case: return check_case(context, (AstSwitchCase *) stmt); + case Ast_Kind_Block: return check_block(context, (AstBlock *) stmt); + case Ast_Kind_Defer: return check_statement(context, &((AstDefer *) stmt)->stmt); + case Ast_Kind_Argument: return check_expression(context, (AstTyped **) &((AstArgument *) stmt)->value); + case Ast_Kind_Directive_Remove: return check_remove_directive(context, (AstDirectiveRemove *) stmt); + case Ast_Kind_Directive_Insert: return check_insert_directive(context, (AstDirectiveInsert **) pstmt, 0); + case Ast_Kind_Procedural_Expansion: return check_proc_expansion(context, (AstProceduralExpansion **) pstmt, PMEK_Statement); + + + // + // Call and Binary op nodes need to be treated differently here, because they use the current_checking_level + // to determine if an assignment is legal. + // + case Ast_Kind_Call: CHECK(call, (AstCall **) pstmt); (*pstmt)->flags |= Ast_Flag_Expr_Ignored; - return Check_Success; - } + break; case Ast_Kind_Binary_Op: CHECK(binaryop, (AstBinaryOp **) pstmt); (*pstmt)->flags |= Ast_Flag_Expr_Ignored; return Check_Success; + // NOTE: Local variable declarations used to be removed after the symbol // resolution phase because long long ago, all locals needed to be known // in a block in order to efficiently allocate enough space and registers // for them all. Now with LocalAllocator, this is no longer necessary. // Therefore, locals stay in the tree and need to be passed along. case Ast_Kind_Local: { + CHECK(local, (AstLocal **) pstmt); + AstTyped* typed_stmt = (AstTyped *) stmt; - fill_in_type(typed_stmt); + fill_in_type(context, typed_stmt); if (typed_stmt->type_node != NULL && typed_stmt->type == NULL) { CHECK(type, &typed_stmt->type_node); if (!node_is_type((AstNode *) typed_stmt->type_node)) { - if (typed_stmt->type_node->type == &basic_types[Basic_Kind_Type_Index]) { - onyx_report_error(stmt->token->pos, Error_Critical, "The type of this local variable is a runtime-known type, not a compile-time known type."); + if (typed_stmt->type_node->type == context->types.basic[Basic_Kind_Type_Index]) { + ONYX_ERROR(stmt->token->pos, Error_Critical, "The type of this local variable is a runtime-known type, not a compile-time known type."); if (typed_stmt->type_node->kind == Ast_Kind_Param) { - onyx_report_error(stmt->token->pos, Error_Critical, "Try adding a '$' to the parameter name to make this a compile-time known type."); + ONYX_ERROR(stmt->token->pos, Error_Critical, "Try adding a '$' to the parameter name to make this a compile-time known type."); } return Check_Error; @@ -2842,84 +3692,255 @@ CheckStatus check_statement(AstNode** pstmt) { } } - if (typed_stmt->type != NULL && typed_stmt->type == &basic_types[Basic_Kind_Void]) { + if (typed_stmt->type != NULL && typed_stmt->type == context->types.basic[Basic_Kind_Void]) { ERROR(stmt->token->pos, "This local variable has a type of 'void', which is not allowed."); } + // + // Investigate: Why is something return a "node" when it should be returning a type? + // Where is this value coming from? Likely in types.c... + // + if (typed_stmt->type == (Type *) &context->node_that_signals_failure) { + ERROR(stmt->token->pos, "Invalid type for this local variable."); + } + return Check_Success; } + // + // I'm 99.99% sure this node can never appear here, but the code for it + // was there in the past so I am adding an assert false just in case it + // is actually possible through some mechanism I am unaware of. + // + case Ast_Kind_Import: assert(0); break; + default: CHECK(expression, (AstTyped **) pstmt); (*pstmt)->flags |= Ast_Flag_Expr_Ignored; return Check_Success; } + + return Check_Success; } -CheckStatus check_statement_chain(AstNode** start) { - while (*start) { - CHECK(statement, start); - start = &(*start)->next; +CHECK_FUNC(statement_chain, AstNode** walker) { + while (*walker) { + CHECK(statement, walker); + walker = &(*walker)->next; } return Check_Success; } -CheckStatus check_block(AstBlock* block) { +CHECK_FUNC(block, AstBlock* block) { // This used to use statement_chain, but since block optimize which statements need to be rechecked, // it has to be its own thing. + if (block->rules & Block_Rule_New_Scope) { + if (block->scope == NULL) + block->scope = scope_create(context, context->checker.current_scope, block->token->pos); + + scope_enter(context, block->scope); + } + + if (block->binding_scope != NULL) + scope_include(context, context->checker.current_scope, block->binding_scope, block->token->pos); + + if (block->quoted_block_capture_scope != NULL) + scope_include(context, context->checker.current_scope, block->quoted_block_capture_scope, block->token->pos); + + if (!block->body) { + if (block->rules & Block_Rule_New_Scope) { + scope_leave(context); + } + + return Check_Success; + } + + AstNode *last = block->body; AstNode** start = &block->body; fori (i, 0, block->statement_idx) { + last = *start; start = &(*start)->next; } while (*start) { - CheckStatus cs = check_statement(start); + if ((*start)->kind == Ast_Kind_Return) { + block->flags |= Ast_Flag_Block_Returns; + } + + CheckStatus cs = check_statement(context, start); switch (cs) { case Check_Success: + last = *start; start = &(*start)->next; block->statement_idx++; break; - case Check_Return_To_Symres: - block->statement_idx = 0; + case Check_Failed: + case Check_Error: + if (block->macro_generated_from) { + ONYX_ERROR( + block->macro_generated_from->pos, + Error_Critical, + "Error in 'macro' that was generated from here." + ); + } + return cs; default: return cs; } + } + if (last && last->flags & Ast_Flag_Block_Returns) { + block->flags |= Ast_Flag_Block_Returns; } + if (block->rules & Block_Rule_New_Scope) + scope_leave(context); + return Check_Success; } -CheckStatus check_function(AstFunction* func) { +CHECK_FUNC(polyproc, AstFunction* pp) { + pp->flags |= Ast_Flag_Comptime; + pp->parent_scope_of_poly_proc = context->checker.current_scope; + + bh_arr_each(AstPolyParam, p, pp->poly_params) { + if (p->kind != PSK_Value) continue; + + AstParam *param = &pp->params[p->idx]; + if (param->default_value != NULL) { + CHECK(expression, ¶m->default_value); + } + } + + return Check_Complete; +} + +CHECK_FUNC(function, AstFunction* func) { + if (func->kind == Ast_Kind_Polymorphic_Proc) return Check_Complete; + if (func->flags & Ast_Flag_Function_Is_Lambda_Inside_PolyProc) return Check_Complete; + if (func->flags & Ast_Flag_Has_Been_Checked) return Check_Success; - if (func->entity_header && func->entity_header->state < Entity_State_Code_Gen) + if (!func->ready_for_body_to_be_checked || !func->type) { YIELD(func->token->pos, "Waiting for procedure header to pass type-checking"); + } + + bh_arr_clear(context->checker.expected_return_type_stack); + bh_arr_clear(context->checker.named_return_values_stack); + bh_arr_push(context->checker.expected_return_type_stack, &func->type->Function.return_type); + bh_arr_push(context->checker.named_return_values_stack, func->named_return_locals); + + context->checker.inside_for_iterator = 0; + if (context->checker.for_node_stack) bh_arr_clear(context->checker.for_node_stack); + + assert(func->scope); + + scope_enter(context, func->scope); - bh_arr_clear(context.checker.expected_return_type_stack); - bh_arr_push(context.checker.expected_return_type_stack, &func->type->Function.return_type); + if ((func->flags & Ast_Flag_Has_Been_Symres) == 0) { + bh_arr_each(AstParam, param, func->params) { + // CLEANUP: Currently, in order to 'use' parameters, the type must be completely + // resolved and built. This is excessive because all that should need to be known + // is the names of the members, since all that happens is implicit field accesses + // are placed in the scope. So instead, there should be a way to just query all the + // member names in the structure, without needing to know their type. This would be + // easy if it were not for 'use' statements in structs. It is made even more complicated + // by this situtation: + // + // Foo :: struct (T: type_expr) { + // use t : T; + // + // something_else := 5 + 6 * 8; + // } + // + // The 'use t : T' member requires completely knowing the type of T, to know which + // members should be brought in. At the moment, that requires completely building the + // type of Foo($T). + if (param->is_used && !param->use_processed) { + fill_in_type(context, (AstTyped *) param->local); + if (!param->local->type) { + YIELD(param->local->token->pos, "Waiting for parameter type to be known."); + } + + if (type_is_struct(param->local->type)) { + Type* st; + if (param->local->type->kind == Type_Kind_Struct) { + st = param->local->type; + } else { + st = param->local->type->Pointer.elem; + } + + if (st->Struct.status != SPS_Uses_Done) return Check_Yield; + + fori (i, 0, shlen(st->Struct.members)) { + StructMember* value = st->Struct.members[i].value; + AstFieldAccess* fa = make_field_access(context, (AstTyped *) param->local, value->name); + symbol_raw_introduce(context, context->checker.current_scope, value->name, param->local->token->pos, (AstNode *) fa); + } + + param->use_processed = 1; + + } else if (param->local->type != NULL) { + ONYX_ERROR(param->local->token->pos, Error_Critical, "Can only 'use' structures or pointers to structures."); + + } else { + // :ExplicitTyping + ERROR_(param->local->token->pos, "Cannot deduce type of parameter '%b'; Try adding it explicitly.", + param->local->token->text, + param->local->token->length); + } + } + } + + func->flags |= Ast_Flag_Has_Been_Symres; + } + + if (func->named_return_locals) { + bh_arr_each(AstLocal *, named_return, func->named_return_locals) { + CHECK(local, named_return); + } + + if (!func->named_return_locals_added) { + func->named_return_locals_added = 1; + + AstNode **prev = &func->body->body; + bh_arr_each(AstLocal *, named_return, func->named_return_locals) { + (*named_return)->next = *prev; + *prev = (AstNode *) *named_return; + } + } + } - context.checker.inside_for_iterator = 0; - if (context.checker.for_node_stack) bh_arr_clear(context.checker.for_node_stack); if (func->body) { CheckStatus status = Check_Success; if (func->captures) { - status = check_capture_block(func->captures); + status = check_capture_block(context, func->captures, func->scope_to_lookup_captured_values); } if (status == Check_Success && func->stack_trace_local) { - status = check_expression((AstTyped **) &func->stack_trace_local); + status = check_expression(context, (AstTyped **) &func->stack_trace_local); } if (status == Check_Success) { - status = check_block(func->body); + status = check_block(context, func->body); + } + + if (status == Check_Success && + !(func->body->flags & Ast_Flag_Block_Returns) && + *bh_arr_last(context->checker.expected_return_type_stack) != context->types.basic[Basic_Kind_Void] && + *bh_arr_last(context->checker.expected_return_type_stack) != context->types.auto_return && + !func->is_intrinsic && + !func->is_foreign + ) { + status = Check_Error; + ONYX_ERROR(func->token->pos, Error_Critical, "Not all code paths return a value."); } - if (status == Check_Error && func->generated_from && context.cycle_detected == 0) + if (status == Check_Error && func->generated_from && context->cycle_detected == 0) ERROR(func->generated_from->pos, "Error in polymorphic procedure generated from this location."); if (status != Check_Success) { @@ -2927,12 +3948,14 @@ CheckStatus check_function(AstFunction* func) { } } - if (*bh_arr_last(context.checker.expected_return_type_stack) == &type_auto_return) { - *bh_arr_last(context.checker.expected_return_type_stack) = &basic_types[Basic_Kind_Void]; + if (*bh_arr_last(context->checker.expected_return_type_stack) == context->types.auto_return) { + *bh_arr_last(context->checker.expected_return_type_stack) = context->types.basic[Basic_Kind_Void]; } func->flags |= Ast_Flag_Has_Been_Checked; + scope_leave(context); + if (bh_arr_length(func->tags) > 0 || (func->flags & Ast_Flag_Proc_Is_Null) != 0) { func->flags |= Ast_Flag_Has_Been_Scheduled_For_Emit; return Check_Success; @@ -2941,11 +3964,19 @@ CheckStatus check_function(AstFunction* func) { return Check_Complete; } -CheckStatus check_overloaded_function(AstOverloadedFunction* ofunc) { - b32 done = 1; +CHECK_FUNC(overloaded_function, AstOverloadedFunction* ofunc) { + bh_arr_each(OverloadOption, overload, ofunc->overloads) { + CHECK(expression, &overload->option); + } - bh_imap all_overloads; - bh_imap_init(&all_overloads, global_heap_allocator, 4); + if (ofunc->expected_return_node) { + CHECK(type, &ofunc->expected_return_node); + } + + b32 done = 1; + + bh_imap all_overloads; + bh_imap_init(&all_overloads, context->gp_alloc, 4); build_all_overload_options(ofunc->overloads, &all_overloads); bh_arr_each(bh__imap_entry, entry, all_overloads.entries) { @@ -2955,7 +3986,7 @@ CheckStatus check_overloaded_function(AstOverloadedFunction* ofunc) { if ( node->kind != Ast_Kind_Function && node->kind != Ast_Kind_Polymorphic_Proc && node->kind != Ast_Kind_Macro) { - onyx_report_error(node->token->pos, Error_Critical, "Overload option not procedure or macro. Got '%s'", + ONYX_ERROR(node->token->pos, Error_Critical, "Overload option not procedure or macro. Got '%s'", onyx_ast_node_kind_string(node->kind)); bh_imap_free(&all_overloads); @@ -2990,10 +4021,10 @@ CheckStatus check_overloaded_function(AstOverloadedFunction* ofunc) { // type_build_from_ast() will never return a polymorphic structure type // because that is never valid in the type system. However, we can by-pass // this and look it up directly using type_lookup_by_id. - type_build_from_ast(context.ast_alloc, expected_return_node); + type_build_from_ast(context, expected_return_node); if (expected_return_node->type_id) { - ofunc->expected_return_type = type_lookup_by_id(expected_return_node->type_id); + ofunc->expected_return_type = type_lookup_by_id(context, expected_return_node->type_id); // Return early here because the following code does not work with a // polymorphic expected return type. @@ -3002,7 +4033,7 @@ CheckStatus check_overloaded_function(AstOverloadedFunction* ofunc) { } } - ofunc->expected_return_type = type_build_from_ast(context.ast_alloc, expected_return_node); + ofunc->expected_return_type = type_build_from_ast(context, expected_return_node); if (!ofunc->expected_return_type) YIELD(ofunc->token->pos, "Waiting to construct expected return type."); bh_arr_each(bh__imap_entry, entry, all_overloads.entries) { @@ -3015,10 +4046,10 @@ CheckStatus check_overloaded_function(AstOverloadedFunction* ofunc) { if (!func->type->Function.return_type) continue; Type *return_type = func->type->Function.return_type; - if (return_type == &type_auto_return) continue; + if (return_type == context->types.auto_return) continue; - if (!types_are_compatible(return_type, ofunc->expected_return_type)) { - report_incorrect_overload_expected_type(return_type, ofunc->expected_return_type, func->token, ofunc->token); + if (!types_are_compatible(context, return_type, ofunc->expected_return_type)) { + report_incorrect_overload_expected_type(context, return_type, ofunc->expected_return_type, func->token, ofunc->token); bh_imap_free(&all_overloads); return Check_Error; } @@ -3031,14 +4062,118 @@ CheckStatus check_overloaded_function(AstOverloadedFunction* ofunc) { return Check_Success; } -CheckStatus check_meta_tags(bh_arr(AstTyped *) tags) { +CHECK_FUNC(package, AstPackage* package) { + if (package->package == NULL) { + if (!package->package_name) { + ERROR(package->token->pos, "Internal compiler error: Expected package to have a name"); + } + + package->package = package_lookup(context, package->package_name); + } + + if (package->package) { + package_mark_as_used(context, package->package); + return Check_Success; + + } else { + YIELD_ERROR_(package->token->pos, "Package '%s' not found in included source files.", package->package_name); + } +} + +CHECK_FUNC(enum, AstEnumType* enum_node) { + if (!enum_node->backing_type) { + CHECK(type, (AstType **) &enum_node->backing); + + enum_node->backing_type = type_build_from_ast(context, enum_node->backing); + if (enum_node->backing_type == NULL) { + YIELD(enum_node->token->pos, "Unable to construct the backing type of this enum."); + } + } + + if (enum_node->scope == NULL) { + enum_node->scope = scope_create(context, context->checker.current_scope, enum_node->token->pos); + + symbol_raw_introduce(context, enum_node->scope, "__backing_type", enum_node->token->pos, (AstNode *) enum_node->backing); + + type_build_from_ast(context, (AstType *) enum_node); + } + + scope_enter(context, enum_node->scope); + + u64 next_assign_value = enum_node->is_flags ? 1 : 0; + bh_arr_each(AstEnumValue *, pvalue, enum_node->values) { + AstEnumValue *value = *pvalue; + if (value->flags & Ast_Flag_Has_Been_Checked) continue; + + value->type = enum_node->etcache; + value->flags |= Ast_Flag_Comptime; + + if (value->value != NULL) { + CHECK(expression, &value->value); + + if (value->value->kind == Ast_Kind_Enum_Value) { + value->value = ((AstEnumValue *) value->value)->value; + value->value->type = enum_node->etcache; + } + + if (value->value->kind == Ast_Kind_NumLit) { + AstNumLit *n_value = (AstNumLit *) value->value; + resolve_expression_type(context, (AstTyped *) n_value); + + if (type_is_small_integer(n_value->type)) { + next_assign_value = n_value->value.i; + } else if (type_is_integer(n_value->type)) { + next_assign_value = n_value->value.l; + } else { + ERROR_(value->token->pos, "expected numeric integer literal for enum initialization, got '%s'", type_get_name(context, n_value->type)); + } + + n_value->type = enum_node->etcache; + + } else { + if (value->entity == NULL) { + add_entities_for_node(&context->entities, NULL, (AstNode *) value, enum_node->scope, NULL); + } + + YIELD(value->token->pos, "Expected compile time known value for enum initialization."); + } + + } else { + AstNumLit* num = make_int_literal(context, next_assign_value); + num->type = enum_node->etcache; + + value->value = (AstTyped *) num; + } + + symbol_introduce(context, enum_node->scope, value->token, (AstNode *) value); + + value->flags |= Ast_Flag_Comptime | Ast_Flag_Has_Been_Checked; + + if (enum_node->is_flags) { + next_assign_value <<= 1; + } else { + next_assign_value++; + } + } + + scope_leave(context); + + // HACK this ensure that you can only lookup symbols in an Enum that are actually defined in the enum. + // However, during the symbol resolution of the values in an enum, they need to be able to see the + // enclosing scope. + enum_node->scope->parent = NULL; + + return Check_Success; +} + +CHECK_FUNC(meta_tags, bh_arr(AstTyped *) tags) { if (tags) { bh_arr_each(AstTyped *, meta, tags) { CHECK(expression, meta); - resolve_expression_type(*meta); + resolve_expression_type(context, *meta); if (((*meta)->flags & Ast_Flag_Comptime) == 0) { - onyx_report_error((*meta)->token->pos, Error_Critical, "#tag expressions are expected to be compile-time known."); + ONYX_ERROR((*meta)->token->pos, Error_Critical, "#tag expressions are expected to be compile-time known."); return Check_Error; } } @@ -3047,10 +4182,15 @@ CheckStatus check_meta_tags(bh_arr(AstTyped *) tags) { return Check_Success; } -CheckStatus check_struct(AstStructType* s_node) { +CHECK_FUNC(struct, AstStructType* s_node) { if (s_node->entity_defaults && s_node->entity_defaults->state < Entity_State_Check_Types) YIELD(s_node->token->pos, "Waiting for struct member defaults to pass symbol resolution."); + s_node->flags |= Ast_Flag_Comptime; + + assert(s_node->scope); + scope_enter(context, s_node->scope); + if (s_node->min_size_) CHECK(expression, &s_node->min_size_); if (s_node->min_alignment_) CHECK(expression, &s_node->min_alignment_); @@ -3058,7 +4198,9 @@ CheckStatus check_struct(AstStructType* s_node) { assert(s_node->polymorphic_arguments); fori (i, 0, (i64) bh_arr_length(s_node->polymorphic_argument_types)) { - Type *arg_type = type_build_from_ast(context.ast_alloc, s_node->polymorphic_argument_types[i]); + CHECK(type, &s_node->polymorphic_argument_types[i]); + + Type *arg_type = type_build_from_ast(context, s_node->polymorphic_argument_types[i]); if (arg_type == NULL) YIELD(s_node->polymorphic_argument_types[i]->token->pos, "Waiting to build type for polymorph argument."); // @@ -3069,11 +4211,14 @@ CheckStatus check_struct(AstStructType* s_node) { if (i >= bh_arr_length(s_node->polymorphic_arguments) || !s_node->polymorphic_arguments[i].value) continue; + if (s_node->polymorphic_arguments[i].value) { + CHECK(expression, &s_node->polymorphic_arguments[i].value); + } TYPE_CHECK(&s_node->polymorphic_arguments[i].value, arg_type) { ERROR_(s_node->polymorphic_arguments[i].value->token->pos, "Expected value of type '%s', got '%s'.", - type_get_name(arg_type), - type_get_name(s_node->polymorphic_arguments[i].value->type)); + type_get_name(context, arg_type), + type_get_name(context, s_node->polymorphic_arguments[i].value->type)); } } } @@ -3085,33 +4230,44 @@ CheckStatus check_struct(AstStructType* s_node) { if (s_node->polymorphic_error_loc.filename) { pos = s_node->polymorphic_error_loc; } + CHECK(constraint_context, &s_node->constraints, s_node->scope, pos); } bh_arr_each(AstStructMember *, smem, s_node->members) { - if ((*smem)->type_node != NULL) { - CHECK(type, &(*smem)->type_node); + AstStructMember *member = *smem; + if (member->initial_value) { + CHECK(expression, &member->initial_value); } + } + + bh_arr_each(AstStructMember *, smem, s_node->members) { + AstStructMember *member = *smem; + track_declaration_for_symbol_info(context, member->token->pos, (AstNode *) member); - if ((*smem)->type_node == NULL && (*smem)->initial_value != NULL) { - CHECK(expression, &(*smem)->initial_value); + if (member->type_node) { + CHECK(type, &member->type_node); + } - fill_in_type((*smem)->initial_value); - if ((*smem)->initial_value->type == NULL) - YIELD((*smem)->initial_value->token->pos, "Trying to resolve type for initial value for member."); + if (member->type_node == NULL && member->initial_value != NULL) { + CHECK(expression, &member->initial_value); - resolve_expression_type((*smem)->initial_value); - if ((*smem)->type == NULL) (*smem)->type = (*smem)->initial_value->type; + fill_in_type(context, member->initial_value); + if (member->initial_value->type == NULL) + YIELD(member->initial_value->token->pos, "Trying to resolve type for initial value for member."); - if ((*smem)->type == NULL) { - ERROR((*smem)->initial_value->token->pos, "Unable to deduce type of initial value. This is probably a compiler bug."); + resolve_expression_type(context, member->initial_value); + if (member->type == NULL) member->type = member->initial_value->type; + + if (member->type == NULL) { + ERROR(member->initial_value->token->pos, "Unable to deduce type of initial value. This is probably a compiler bug."); } } } // NOTE: fills in the pending_type. s_node->ready_to_build_type = 1; - type_build_from_ast(context.ast_alloc, (AstType *) s_node); + type_build_from_ast(context, (AstType *) s_node); if (s_node->pending_type == NULL || !s_node->pending_type_is_valid) YIELD(s_node->token->pos, "Waiting for type to be constructed."); @@ -3121,7 +4277,7 @@ CheckStatus check_struct(AstStructType* s_node) { } if ((*smem)->used && !(*smem)->use_processed) { - if (!type_struct_member_apply_use(context.ast_alloc, s_node->pending_type, *smem)) { + if (!type_struct_member_apply_use(context, s_node->pending_type, *smem)) { YIELD((*smem)->token->pos, "Waiting for use to be applied."); } @@ -3132,15 +4288,20 @@ CheckStatus check_struct(AstStructType* s_node) { s_node->stcache = s_node->pending_type; s_node->stcache->Struct.status = SPS_Uses_Done; + scope_leave(context); return Check_Success; } -CheckStatus check_struct_defaults(AstStructType* s_node) { +CHECK_FUNC(struct_defaults, AstStructType* s_node) { if (s_node->entity_type && s_node->entity_type->state < Entity_State_Code_Gen) YIELD(s_node->token->pos, "Waiting for struct type to be constructed before checking defaulted members."); if (s_node->entity_type && s_node->entity_type->state == Entity_State_Failed) return Check_Failed; + if (s_node->scope) { + scope_enter(context, s_node->scope); + } + CHECK(meta_tags, s_node->meta_tags); bh_arr_each(StructMember *, smem, s_node->stcache->Struct.memarr) { @@ -3150,32 +4311,52 @@ CheckStatus check_struct_defaults(AstStructType* s_node) { TYPE_CHECK((*smem)->initial_value, (*smem)->type) { ERROR_((*(*smem)->initial_value)->token->pos, "Mismatched type for initial value, expected '%s', got '%s'.", - type_get_name((*smem)->type), - type_get_name((*(*smem)->initial_value)->type)); + type_get_name(context, (*smem)->type), + type_get_name(context, (*(*smem)->initial_value)->type)); } - resolve_expression_type(*(*smem)->initial_value); + resolve_expression_type(context, *(*smem)->initial_value); } CHECK(meta_tags, (*smem)->meta_tags); } + if (s_node->scope) { + scope_leave(context); + } + return Check_Success; } -CheckStatus check_union(AstUnionType *u_node) { - CHECK(type, &u_node->tag_backing_type); +CHECK_FUNC(union, AstUnionType *u_node) { + u_node->flags |= Ast_Flag_Comptime; - Type *tag_type = type_build_from_ast(context.ast_alloc, u_node->tag_backing_type); + if (!u_node->tag_backing_type) { + int n = (31 - bh_clz(bh_arr_length(u_node->variants) - 1)) >> 3; + if (n == 0) u_node->tag_backing_type = (AstType *) &context->basic_types.type_u8; + else if (n == 1) u_node->tag_backing_type = (AstType *) &context->basic_types.type_u16; + else if (n <= 3) u_node->tag_backing_type = (AstType *) &context->basic_types.type_u32; + else { + ERROR(u_node->token->pos, "Too many union variants. How did you even do this...?"); + } + } + + CHECK(type, &u_node->tag_backing_type); + Type *tag_type = type_build_from_ast(context, u_node->tag_backing_type); if (!type_is_integer(tag_type)) { - ERROR_(u_node->token->pos, "Union tag types must be an integer, got '%s'.", type_get_name(tag_type)); + ERROR_(u_node->token->pos, "Union tag types must be an integer, got '%s'.", type_get_name(context, tag_type)); } + assert(u_node->scope); + scope_enter(context, u_node->scope); + if (u_node->polymorphic_argument_types) { assert(u_node->polymorphic_arguments); fori (i, 0, (i64) bh_arr_length(u_node->polymorphic_argument_types)) { - Type *arg_type = type_build_from_ast(context.ast_alloc, u_node->polymorphic_argument_types[i]); + CHECK(type, &u_node->polymorphic_argument_types[i]); + + Type *arg_type = type_build_from_ast(context, u_node->polymorphic_argument_types[i]); if (arg_type == NULL) YIELD(u_node->polymorphic_argument_types[i]->token->pos, "Waiting to build type for polymorph argument."); // @@ -3186,16 +4367,21 @@ CheckStatus check_union(AstUnionType *u_node) { if (i >= bh_arr_length(u_node->polymorphic_arguments) || !u_node->polymorphic_arguments[i].value) continue; + CHECK(expression, &u_node->polymorphic_arguments[i].value); TYPE_CHECK(&u_node->polymorphic_arguments[i].value, arg_type) { ERROR_(u_node->polymorphic_arguments[i].value->token->pos, "Expected value of type %s, got %s.", - type_get_name(arg_type), - type_get_name(u_node->polymorphic_arguments[i].value->type)); + type_get_name(context, arg_type), + type_get_name(context, u_node->polymorphic_arguments[i].value->type)); } } } if (u_node->constraints.constraints) { + // bh_arr_each(AstConstraint *, constraint, u_node->constraints.constraints) { + // CHECK(constraint, *constraint); + // } + u_node->constraints.produce_errors = (u_node->flags & Ast_Flag_Header_Check_No_Error) == 0; OnyxFilePos pos = u_node->token->pos; @@ -3207,26 +4393,36 @@ CheckStatus check_union(AstUnionType *u_node) { CHECK(meta_tags, u_node->meta_tags); - bh_arr_each(AstUnionVariant *, variant, u_node->variants) { - CHECK(type, &(* variant)->type_node); - CHECK(meta_tags, (* variant)->meta_tags); + bh_arr_each(AstUnionVariant *, pvariant, u_node->variants) { + AstUnionVariant *variant = *pvariant; + track_declaration_for_symbol_info(context, variant->token->pos, (AstNode *) variant); + + assert(variant->type_node); + + CHECK(type, &variant->type_node); + if (variant->explicit_tag_value) { + CHECK(expression, &variant->explicit_tag_value); + } + + CHECK(meta_tags, variant->meta_tags); } - type_build_from_ast(context.ast_alloc, (AstType *) u_node); + type_build_from_ast(context, (AstType *) u_node); if (u_node->pending_type == NULL || !u_node->pending_type_is_valid) YIELD(u_node->token->pos, "Waiting for type to be constructed."); + scope_leave(context); u_node->utcache = u_node->pending_type; return Check_Success; } -CheckStatus check_temp_function_header(AstFunction* func) { +CHECK_FUNC(temp_function_header, AstFunction* func) { if (func->flags & Ast_Flag_Header_Check_No_Error) { - onyx_errors_disable(); + onyx_errors_disable(context); } - CheckStatus cs = check_function_header(func); - onyx_errors_enable(); + CheckStatus cs = check_function_header(context, func); + onyx_errors_enable(context); if (cs == Check_Error) return Check_Failed; if (cs != Check_Success) return cs; @@ -3234,24 +4430,93 @@ CheckStatus check_temp_function_header(AstFunction* func) { return Check_Complete; } -CheckStatus check_function_header(AstFunction* func) { - //if (func->entity_body && func->entity_body->state < Entity_State_Check_Types) - // YIELD(func->token->pos, "Waiting for function body to complete symbol resolution to check header."); +CHECK_FUNC(function_header, AstFunction* func) { + func->flags |= Ast_Flag_Comptime; + + if (!(func->flags & Ast_Flag_Function_Is_Lambda) && func->captures) { + ONYX_ERROR(func->captures->token->pos, Error_Critical, "This procedure cannot capture values as it is not defined in an expression."); + return Check_Error; + } + + if (func->captures && !func->scope_to_lookup_captured_values) { + if (func->flags & Ast_Flag_Function_Is_Lambda_Inside_PolyProc) return Check_Complete; + + return Check_Yield; + } b32 expect_default_param = 0; b32 has_had_varargs = 0; + if (func->scope == NULL) { + func->scope = scope_create(context, context->checker.current_scope, func->token->pos); + } + if (func->constraints.constraints != NULL && func->constraints.constraints_met == 0) { + // bh_arr_each(AstConstraint *, constraint, func->constraints.constraints) { + // CHECK(constraint, *constraint); + // } + func->constraints.produce_errors = (func->flags & Ast_Flag_Header_Check_No_Error) == 0; OnyxToken *tkn = func->token; if (func->generated_from) tkn = func->generated_from; CHECK(constraint_context, &func->constraints, func->scope, tkn->pos); + } + + scope_enter(context, func->scope); + + if (!mode_enabled(context, CM_Dont_Resolve_Symbols)) { + if (func->captures) { + CHECK(capture_block, func->captures, func->scope_to_lookup_captured_values); + } + + bh_arr_each(AstParam, param, func->params) { + symbol_introduce(context, context->checker.current_scope, param->local->token, (AstNode *) param->local); + } - // All constraints have been met. Return to symbol resolution to finish - // looking up all symbols in the function. - return Check_Return_To_Symres; + // + // We have to pre-check the type nodes of the parameters. + bh_arr_each(AstParam, param, func->params) { + if (param->local->type_node != NULL) { + param->local->type_node->flags |= (func->flags & Ast_Flag_Header_Check_No_Error); + param->local->flags |= Ast_Flag_Symbol_Invisible; + check_type(context, ¶m->local->type_node); + param->local->flags &= ~Ast_Flag_Symbol_Invisible; + } + } + + if (potentially_convert_function_to_polyproc(context, func)) { + return Check_Complete; + } + } + + if (func->nodes_that_need_entities_after_clone && bh_arr_length(func->nodes_that_need_entities_after_clone) > 0 && func->entity) { + bh_arr_each(AstNode *, node, func->nodes_that_need_entities_after_clone) { + // This makes a lot of assumptions about how these nodes are being processed, + // and I don't want to start using this with other nodes without considering + // what the ramifications of that is. + assert((*node)->kind == Ast_Kind_Static_If || (*node)->kind == Ast_Kind_File_Contents + || (*node)->kind == Ast_Kind_Function || (*node)->kind == Ast_Kind_Polymorphic_Proc); + + // Need to use current_scope->parent because current_scope is the function body scope. + Scope *scope = context->checker.current_scope->parent; + + if ((*node)->kind == Ast_Kind_Static_If) { + AstIf *static_if = (AstIf *) *node; + assert(static_if->defined_in_scope); + scope = static_if->defined_in_scope; + + if (func->poly_scope) { + scope = scope_create(context, scope, static_if->token->pos); + scope_include(context, scope, func->poly_scope, static_if->token->pos); + } + } + + add_entities_for_node(&context->entities, NULL, *node, scope, func->entity->package); + } + + bh_arr_set_length(func->nodes_that_need_entities_after_clone, 0); } bh_arr_each(AstParam, param, func->params) { @@ -3267,17 +4532,12 @@ CheckStatus check_function_header(AstFunction* func) { "Can only have one param that is of variable argument type."); } - if (has_had_varargs && param->vararg_kind != VA_Kind_Not_VA) { - ERROR(local->token->pos, - "Variable arguments must be last in parameter list"); - } - if (param->vararg_kind == VA_Kind_Untyped) { // HACK - if (builtin_vararg_type_type == NULL) - builtin_vararg_type_type = type_build_from_ast(context.ast_alloc, builtin_vararg_type); + if (context->builtins.vararg_type_type == NULL) + context->builtins.vararg_type_type = type_build_from_ast(context, context->builtins.vararg_type); - local->type = builtin_vararg_type_type; + local->type = context->builtins.vararg_type_type; } if (param->default_value != NULL) { @@ -3288,7 +4548,7 @@ CheckStatus check_function_header(AstFunction* func) { CHECK(expression, ¶m->default_value); if (local->type_node == NULL && local->type == NULL) { - local->type = resolve_expression_type(param->default_value); + local->type = resolve_expression_type(context, param->default_value); } expect_default_param = 1; @@ -3298,15 +4558,16 @@ CheckStatus check_function_header(AstFunction* func) { // If the function has the no_error flag, then the type node should have it set too. // This allows for polymorphic structures with constraints to fail gracefully. local->type_node->flags |= (func->flags & Ast_Flag_Header_Check_No_Error); - CHECK(type, &local->type_node); + CHECK_INVISIBLE(type, local, &local->type_node); } - fill_in_type((AstTyped *) local); + fill_in_type(context, (AstTyped *) local); if (local->type == NULL) { YIELD(local->token->pos, "Waiting for parameter type to be known."); } - if (local->type == (Type *) &node_that_signals_failure) { + if (local->type == (Type *) &context->node_that_signals_failure) { + ONYX_ERROR(local->token->pos, Error_Critical, "BAD TYPE"); return Check_Failed; } @@ -3319,9 +4580,16 @@ CheckStatus check_function_header(AstFunction* func) { if (local->type->kind != Type_Kind_Array && type_size_of(local->type) == 0) { ERROR(local->token->pos, "Function parameters cannot have 'void' as their type."); } + + if (local->type->kind == Type_Kind_Array && type_size_of(local->type) >= 128) { + ONYX_WARNING(local->token->pos, "Since arrays are passed by value, this array parameter would copy %d bytes per function call. Unless this is what you want, you should make this parameter a slice instead ('[] %s').", + type_size_of(local->type), + type_get_name(context, local->type->Array.elem) + ); + } } - if (func->return_type != NULL) CHECK(type, &func->return_type); + CHECK(type, &func->return_type); if (func->deprecated_warning) { CHECK(expression, (AstTyped **) &func->deprecated_warning); @@ -3338,14 +4606,36 @@ CheckStatus check_function_header(AstFunction* func) { } } - func->type = type_build_function_type(context.ast_alloc, func); - if (func->type == NULL) YIELD(func->token->pos, "Waiting for function type to be constructed"); + func->ready_for_body_to_be_checked = 1; + + func->type = type_build_function_type(context, func); + if (func->type == NULL) { + YIELD(func->token->pos, "Waiting for function type to be constructed"); + } if (func->foreign.import_name) { CHECK(expression, &func->foreign.module_name); CHECK(expression, &func->foreign.import_name); } + if (context->options->stack_trace_enabled) { + if (!func->stack_trace_local) { + OnyxToken *stack_trace_token = bh_alloc_item(context->ast_alloc, OnyxToken); + stack_trace_token->type = Token_Type_Symbol; + stack_trace_token->length = 13; + stack_trace_token->text = bh_strdup(context->ast_alloc, "__stack_trace "); + stack_trace_token->pos = func->token->pos; + + assert(context->builtins.stack_trace_type); + func->stack_trace_local = make_local(context, stack_trace_token, context->builtins.stack_trace_type); + func->stack_trace_local->flags |= Ast_Flag_Decl_Followed_By_Init; + } + + CHECK(local, &func->stack_trace_local); + } + + scope_leave(context); + if (bh_arr_length(func->tags) > 0 || (func->flags & Ast_Flag_Proc_Is_Null) != 0) { func->flags |= Ast_Flag_Has_Been_Scheduled_For_Emit; return Check_Success; @@ -3354,9 +4644,9 @@ CheckStatus check_function_header(AstFunction* func) { return Check_Complete; } -CheckStatus check_memres_type(AstMemRes* memres) { +CHECK_FUNC(memres_type, AstMemRes* memres) { CHECK(type, &memres->type_node); - fill_in_type((AstTyped *) memres); + fill_in_type(context, (AstTyped *) memres); if (memres->type_node && !memres->type) YIELD(memres->token->pos, "Waiting for global type to be constructed."); if (bh_arr_length(memres->tags) > 0) { @@ -3367,14 +4657,13 @@ CheckStatus check_memres_type(AstMemRes* memres) { return Check_Complete; } -CheckStatus check_memres(AstMemRes* memres) { +CHECK_FUNC(memres, AstMemRes* memres) { assert(memres->type_entity); if (memres->type_entity->state < Entity_State_Code_Gen) YIELD(memres->token->pos, "Waiting for global to pass type construction."); if (memres->initial_value != NULL) { if (memres->threadlocal) { - onyx_report_error(memres->token->pos, Error_Critical, "'#thread_local' variables cannot have an initializer at the moment."); - return Check_Error; + ERROR(memres->token->pos, "'#thread_local' variables cannot have an initializer at the moment."); } CHECK(expression, &memres->initial_value); @@ -3384,12 +4673,12 @@ CheckStatus check_memres(AstMemRes* memres) { TYPE_CHECK(&memres->initial_value, memres_type) { ERROR_(memres->token->pos, "Cannot assign value of type '%s' to a '%s'.", - node_get_type_name(memres->initial_value), - type_get_name(memres_type)); + node_get_type_name(context, memres->initial_value), + type_get_name(context, memres_type)); } } else { - resolve_expression_type(memres->initial_value); + resolve_expression_type(context, memres->initial_value); if (memres->initial_value->type == NULL && memres->initial_value->entity != NULL && memres->initial_value->entity->state <= Entity_State_Check_Types) { YIELD(memres->token->pos, "Waiting for global type to be constructed."); } @@ -3417,35 +4706,29 @@ CheckStatus check_memres(AstMemRes* memres) { return Check_Complete; } -CheckStatus check_type(AstType** ptype) { +CHECK_FUNC(type, AstType** ptype) { if (ptype == NULL || *ptype == NULL) return Check_Success; AstType* type = *ptype; AstType* original_type = type; while (type->kind == Ast_Kind_Type_Alias) type = ((AstTypeAlias *) type)->to; + + if (type->kind == Ast_Kind_Symbol) { + CHECK(symbol, (AstNode **) ptype); + type = *ptype; + original_type = type; + } if (type->flags & Ast_Flag_Has_Been_Checked) return Check_Success; switch (type->kind) { - case Ast_Kind_Poly_Call_Type: { - AstPolyCallType* pc_node = (AstPolyCallType *) type; - - bh_arr_each(AstNode *, param, pc_node->params) { - if (!node_is_type(*param)) { - CHECK(expression, (AstTyped **) param); - resolve_expression_type((AstTyped *) *param); - fill_in_type((AstTyped *) *param); - } - } - - break; - } + case Ast_Kind_Basic_Type: break; case Ast_Kind_Typeof: { AstTypeOf *type_of = (AstTypeOf *) type; CHECK(expression, (AstTyped **) &type_of->expr); - resolve_expression_type(type_of->expr); + resolve_expression_type(context, type_of->expr); if (type_of->expr->type == NULL) { YIELD(type_of->token->pos, "Trying to check type for type-of expression."); @@ -3455,21 +4738,48 @@ CheckStatus check_type(AstType** ptype) { break; } - case Ast_Kind_Pointer_Type: ((AstPointerType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; CHECK(type, &((AstPointerType *) type)->elem); break; - case Ast_Kind_Slice_Type: ((AstSliceType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; CHECK(type, &((AstSliceType *) type)->elem); break; - case Ast_Kind_DynArr_Type: ((AstDynArrType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; CHECK(type, &((AstDynArrType *) type)->elem); break; - case Ast_Kind_VarArg_Type: ((AstVarArgType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; CHECK(type, &((AstVarArgType *) type)->elem); break; + case Ast_Kind_Pointer_Type: + ((AstPointerType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; + CHECK(type, &((AstPointerType *) type)->elem); + break; + + case Ast_Kind_Slice_Type: + ((AstSliceType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; + CHECK(type, &((AstSliceType *) type)->elem); + break; + + case Ast_Kind_DynArr_Type: + ((AstDynArrType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; + CHECK(type, &((AstDynArrType *) type)->elem); + break; + + case Ast_Kind_VarArg_Type: + ((AstVarArgType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; + CHECK(type, &((AstVarArgType *) type)->elem); + break; + + case Ast_Kind_Multi_Pointer_Type: + ((AstMultiPointerType *) type)->elem->flags |= type->flags & Ast_Flag_Header_Check_No_Error; + CHECK(type, &((AstMultiPointerType *) type)->elem); + break; case Ast_Kind_Function_Type: { AstFunctionType* ftype = (AstFunctionType *) type; - CHECK(type, &ftype->return_type); + // + // We have to check the parameter types here before the return type, + // because when doing a nested polymorph lookup, the parameter types + // need to be constructable in order to create the polymorph variant + // and return type can be whatever (since it is replaced with void). + // if (ftype->param_count > 0) { fori (i, 0, (i64) ftype->param_count) { CHECK(type, &ftype->params[i]); } } + + CHECK(type, &ftype->return_type); break; } @@ -3482,9 +4792,11 @@ CheckStatus check_type(AstType** ptype) { case Ast_Kind_Array_Type: { AstArrayType* atype = (AstArrayType *) type; + CHECK(type, &atype->elem); + if (atype->count_expr) { CHECK(expression, &atype->count_expr); - resolve_expression_type(atype->count_expr); + resolve_expression_type(context, atype->count_expr); } break; @@ -3501,10 +4813,77 @@ CheckStatus check_type(AstType** ptype) { break; } + // + // We do not recurse down to check structs, unions and enums at this point, + // as they should be checked separately using their entity. check_entity + // will automatically directive Entity_Type_Type_Alias to check_struct/union + // so we don't have to do it here. + // + // case Ast_Kind_Struct_Type: CHECK(struct, (AstStructType *) type)); break; + // case Ast_Kind_Union_Type: CHECK(union, (AstUnionType *) type)); break; + + case Ast_Kind_Enum_Type: break; + + case Ast_Kind_Poly_Struct_Type: { + AstPolyStructType* pst_node = (AstPolyStructType *) type; + assert(pst_node->scope); + + if (type == context->builtins.array_type) { + assert(((AstPolyStructType *) context->builtins.slice_type)->scope); + pst_node->scope->parent = ((AstPolyStructType *) context->builtins.slice_type)->scope; + } + break; + } + + case Ast_Kind_Poly_Union_Type: { + AstPolyUnionType* put_node = (AstPolyUnionType *) type; + assert(put_node->scope); + break; + } + + case Ast_Kind_Poly_Call_Type: { + AstPolyCallType* pc_node = (AstPolyCallType *) type; + + CHECK(type, &pc_node->callee); + + bh_arr_each(AstNode *, param, pc_node->params) { + if (node_is_type(*param)) { + CHECK(type, (AstType **) param); + } else { + CHECK(expression, (AstTyped **) param); + resolve_expression_type(context, (AstTyped *) *param); + fill_in_type(context, (AstTyped *) *param); + } + } + + break; + } + + case Ast_Kind_Type_Alias: { + AstTypeAlias *type_alias = (AstTypeAlias *) type; + CHECK(type, &type_alias->to); + break; + } + + case Ast_Kind_Alias: { + AstAlias* alias = (AstAlias *) type; + CHECK_INVISIBLE(type, alias, (AstType **) &alias->alias); + + break; + } + + case Ast_Kind_Distinct_Type: { + AstDistinctType *distinct = (AstDistinctType *) type; + CHECK(type, &distinct->base_type); + break; + } + default: break; } type = original_type; + + // CLEANUP: Should Type_Alias nodes just be made comptime at creation? Since they will always be set to comptime here? type->flags |= Ast_Flag_Comptime; while (type->kind == Ast_Kind_Type_Alias) { type->flags |= Ast_Flag_Comptime; @@ -3515,11 +4894,14 @@ CheckStatus check_type(AstType** ptype) { return Check_Success; } -CheckStatus check_static_if(AstIf* static_if) { - context.checker.expression_types_must_be_known = 1; - CheckStatus result = check_expression(&static_if->cond); - context.checker.expression_types_must_be_known = 0; - if (result == Check_Yield_Macro) return Check_Yield_Macro; +CHECK_FUNC(static_if, AstIf* static_if) { + if (static_if->flags & Ast_Flag_Dead) return Check_Complete; + + context->checker.expression_types_must_be_known = 1; + CheckStatus result = check_expression(context, &static_if->cond); + context->checker.expression_types_must_be_known = 0; + + if (result == Check_Yield) return Check_Yield; if (result > Check_Errors_Start || !(static_if->cond->flags & Ast_Flag_Comptime)) { ERROR(static_if->token->pos, "Expected this condition to be compile time known."); @@ -3531,9 +4913,9 @@ CheckStatus check_static_if(AstIf* static_if) { static_if->flags |= Ast_Flag_Static_If_Resolved; - b32 resolution = static_if_resolution(static_if); + b32 resolution = static_if_resolution(context, static_if); - if (context.options->print_static_if_results) + if (context->options->print_static_if_results) bh_printf("Static if statement at %s:%d:%d resulted in %s\n", static_if->token->pos.filename, static_if->token->pos.line, @@ -3542,32 +4924,138 @@ CheckStatus check_static_if(AstIf* static_if) { if (resolution) { bh_arr_each(Entity *, ent, static_if->true_entities) { - entity_heap_insert_existing(&context.entities, *ent); + entity_heap_insert_existing(&context->entities, *ent); } } else { bh_arr_each(Entity *, ent, static_if->false_entities) { - entity_heap_insert_existing(&context.entities, *ent); + entity_heap_insert_existing(&context->entities, *ent); } } return Check_Complete; } -CheckStatus check_process_directive(AstNode* directive) { +CHECK_FUNC(process_directive, AstNode* directive) { + if (directive->kind == Ast_Kind_Directive_Add_Overload) { + AstDirectiveAddOverload *add_overload = (AstDirectiveAddOverload *) directive; + + CHECK(expression, (AstTyped **) &add_overload->overloaded_function); + if (add_overload->overloaded_function == NULL) { + // NOTE: Error message will already be generated + return Check_Error; + } + + AstOverloadedFunction *ofunc = (AstOverloadedFunction *) strip_aliases((AstNode *) add_overload->overloaded_function); + if (ofunc->kind != Ast_Kind_Overloaded_Function) { + YIELD_ERROR_(add_overload->token->pos, "#overload directive expects a matched procedure, got '%s'.", + onyx_ast_node_kind_string(ofunc->kind)); + } + + if (ofunc->locked) { + ONYX_ERROR(add_overload->token->pos, Error_Critical, "Cannot add match option here as the original #match was declared as #locked."); + ONYX_ERROR(ofunc->token->pos, Error_Critical, "Here is the original #match."); + return Check_Error; + } + + if (ofunc->only_local_functions) { + if (!token_same_file(add_overload->token, ofunc->token)) { + ONYX_ERROR(add_overload->token->pos, Error_Critical, "Cannot add match option here as this option is not within the same file as the original #match declared with #local."); + ONYX_ERROR(ofunc->token->pos, Error_Critical, "Here is the original #match."); + return Check_Error; + } + } + + AstKind kind = add_overload->overload->kind; + if (kind != Ast_Kind_Function && kind != Ast_Kind_Polymorphic_Proc && kind != Ast_Kind_Overloaded_Function && kind != Ast_Kind_Macro) { + // This check could be converted to something like `is_node_function_like()`? + CHECK(expression, (AstTyped **) &add_overload->overload); + } + + add_overload->overload->flags &= ~Ast_Flag_Function_Is_Lambda; + + add_overload_option(&ofunc->overloads, add_overload->order, add_overload->overload); + return Check_Success; + } + + if (directive->kind == Ast_Kind_Directive_Operator) { + AstDirectiveOperator *operator = (AstDirectiveOperator *) directive; + CHECK(expression, &operator->overload); + if (!operator->overload) { + return Check_Error; + } + + AstFunction* overload = get_function_from_node((AstNode *) operator->overload); + if (overload == NULL) { + ERROR(operator->token->pos, "This cannot be used as an operator overload."); + } + + overload->flags &= ~Ast_Flag_Function_Is_Lambda; + + // First try unary operator overloading + // CLEANUP This is not written well at all... + if (operator->operator == Binary_Op_Count) { + if (bh_arr_length(overload->params) != 1) { + ERROR(operator->token->pos, "Expected exactly 1 argument for unary operator overload."); + } + + UnaryOp unop = Unary_Op_Count; + if (operator->operator_token->type == (TokenType) '?') { + unop = Unary_Op_Try; + } + + if (operator->operator_token->type == (TokenType) '!') { + unop = Unary_Op_Unwrap; + } + + if (unop == Unary_Op_Count) { + ERROR(operator->token->pos, "Unknown operator."); + } + + add_overload_option(&context->unary_operator_overloads[unop], operator->order, operator->overload); + return Check_Success; + } + + if (operator->operator != Binary_Op_Subscript_Equals && bh_arr_length(overload->params) != 2) { + ERROR(operator->token->pos, "Expected exactly 2 arguments for binary operator overload."); + } + + add_overload_option(&context->operator_overloads[operator->operator], operator->order, operator->overload); + return Check_Success; + } + if (directive->kind == Ast_Kind_Directive_Export) { AstDirectiveExport *export = (AstDirectiveExport *) directive; + CHECK(expression, &export->export); + CHECK(expression, &export->export_name_expr); + AstTyped *exported = export->export; + + if (exported->kind == Ast_Kind_Polymorphic_Proc) { + ERROR(export->token->pos, "Cannot export a polymorphic function."); + } + + if (exported->kind == Ast_Kind_Function) { + AstFunction *func = (AstFunction *) export->export; + func->is_exported = 1; + + if (func->is_foreign) { + ERROR(export->token->pos, "Cannot export a foreign function."); + } + + if (func->is_intrinsic) { + ERROR(export->token->pos, "Cannot export an intrinsic function."); + } + } + if (exported->entity && exported->entity->state <= Entity_State_Check_Types) YIELD(directive->token->pos, "Waiting for exported type to be known."); if (exported->kind != Ast_Kind_Function) { - onyx_report_error(export->token->pos, Error_Critical, "Cannot export something that is not a procedure."); + ONYX_ERROR(export->token->pos, Error_Critical, "Cannot export something that is not a procedure."); ERROR(exported->token->pos, "Here is the thing being exported that is not a procedure."); } - CHECK(expression, &export->export_name_expr); - if (export->export_name_expr->kind != Ast_Kind_StrLit) { ERROR_(export->token->pos, "Expected export name to be a string literal, got '%s'.", onyx_ast_node_kind_string(export->export_name_expr->kind)); } @@ -3578,6 +5066,8 @@ CheckStatus check_process_directive(AstNode* directive) { if (exported_func->exported_name == NULL) { exported_func->exported_name = export->export_name; } + + return Check_Success; } if (directive->kind == Ast_Kind_Directive_Init) { @@ -3600,6 +5090,10 @@ CheckStatus check_process_directive(AstNode* directive) { if (init->dependencies) { i32 i = 0; bh_arr_each(AstDirectiveInit *, dependency, init->dependencies) { + enable_mode(context, CM_Allow_Init_Expressions); + CHECK(expression, (AstTyped **) dependency); + disable_mode(context, CM_Allow_Init_Expressions); + AstTyped *d = (AstTyped *) strip_aliases((AstNode *) *dependency); if (d->kind != Ast_Kind_Directive_Init) { ERROR_(init->token->pos, "All dependencies of an #init must be another #init. The %d%s dependency was not.", i + 1, bh_num_suffix(i + 1)); @@ -3614,12 +5108,13 @@ CheckStatus check_process_directive(AstNode* directive) { } } - bh_arr_push(init_procedures, (AstFunction *) init->init_proc); + bh_arr_push(context->builtins.init_procedures, (AstFunction *) init->init_proc); return Check_Complete; } if (directive->kind == Ast_Kind_Directive_Library) { AstDirectiveLibrary *library = (AstDirectiveLibrary *) directive; + CHECK(expression, &library->library_symbol); if (library->library_symbol->kind != Ast_Kind_StrLit) { ERROR_(library->token->pos, "#library directive expected compile-time known string for library name. Got '%s'.", @@ -3627,55 +5122,133 @@ CheckStatus check_process_directive(AstNode* directive) { } AstStrLit *symbol = (AstStrLit *) library->library_symbol; - char* temp_name = bh_alloc_array(global_scratch_allocator, char, symbol->token->length); + char* temp_name = bh_alloc_array(context->scratch_alloc, char, symbol->token->length); i32 temp_name_len = string_process_escape_seqs(temp_name, symbol->token->text, symbol->token->length); - library->library_name = bh_strdup(global_heap_allocator, temp_name); + library->library_name = bh_strdup(context->gp_alloc, temp_name); return Check_Success; } if (directive->kind == Ast_Kind_Injection) { AstInjection *inject = (AstInjection *) directive; - if (!node_is_type((AstNode *) inject->dest)) { - CHECK(expression, &inject->dest); + + if (inject->dest == NULL) { + if (inject->full_loc == NULL) return Check_Error; + + AstTyped *full_loc = (AstTyped *) strip_aliases((AstNode *) inject->full_loc); + + if (full_loc->kind != Ast_Kind_Field_Access) { + ERROR(inject->token->pos, "#inject expects a dot expression (a.b) for the injection point."); + return Check_Error; + } + + AstFieldAccess *acc = (AstFieldAccess *) full_loc; + inject->dest = acc->expr; + inject->symbol = acc->token; } - Scope *scope = get_scope_from_node_or_create((AstNode *) inject->dest); + // + // We do not "properly" handle the check status of this function here, because + // we actually don't care if it is completely done type checking. We only care + // if we can get a scope from it. We are effectively just using this call as a + // means to resolve the symbols in the destination + // + check_expression(context, &inject->dest); + + Scope *scope = get_scope_from_node_or_create(context, (AstNode *) inject->dest); if (scope == NULL) { YIELD_ERROR(inject->token->pos, "Cannot #inject here."); } - AstBinding *binding = onyx_ast_node_new(context.ast_alloc, sizeof(AstBinding), Ast_Kind_Binding); - binding->token = inject->symbol; - binding->node = (AstNode *) inject->to_inject; - binding->documentation = inject->documentation; + inject->binding->token = inject->symbol; + + if (inject->binding->kind == Ast_Kind_Function || inject->binding->kind == Ast_Kind_Polymorphic_Proc) { + AstFunction *func = (void *) inject->binding; + func->name = generate_name_within_scope(context, scope, inject->symbol); + } Package *pac = NULL; if (inject->dest->kind == Ast_Kind_Package) { pac = ((AstPackage *) inject->dest)->package; } else { - pac = context.checker.current_entity->package; + pac = context->checker.current_entity->package; } - add_entities_for_node(NULL, (AstNode *) binding, scope, pac); + add_entities_for_node(&context->entities, NULL, (AstNode *) inject->binding, scope, pac); return Check_Complete; } + if (directive->kind == Ast_Kind_Directive_This_Package) { + AstPackage *package = (AstPackage *) directive; + package->kind = Ast_Kind_Package; + package->package = context->checker.current_entity->package; + return Check_Complete; + } + + if (directive->kind == Ast_Kind_Directive_Wasm_Section) { + AstDirectiveWasmSection *section = (AstDirectiveWasmSection *) directive; + + CHECK(expression, §ion->section_name); + CHECK(expression, §ion->section_contents); + + if (section->section_name->kind != Ast_Kind_StrLit) ERROR(section->token->pos, "Expect section name to be a compile-time known string."); + if (section->section_contents->kind != Ast_Kind_StrLit) ERROR(section->token->pos, "Expect section contents to be a compile-time known string."); + + AstStrLit *symbol = (AstStrLit *) section->section_name; + char* temp_str = bh_alloc_array(context->scratch_alloc, char, symbol->token->length); + string_process_escape_seqs(temp_str, symbol->token->text, symbol->token->length); + section->name = bh_strdup(context->gp_alloc, temp_str); + + symbol = (AstStrLit *) section->section_contents; + temp_str = bh_alloc_array(context->scratch_alloc, char, symbol->token->length + 1); + u32 content_length = string_process_escape_seqs(temp_str, symbol->token->text, symbol->token->length); + + if (section->from_file) { + const char *containing_filename = section->token->pos.filename; + char *parent_folder = bh_path_get_parent(containing_filename, context->scratch_alloc); + + char *path = bh_strdup( + context->scratch_alloc, + bh_lookup_file(temp_str, parent_folder, NULL, NULL, NULL, context->scratch_alloc) + ); + + if (!bh_file_exists(path)) { + ERROR_(section->token->pos, "Failed to open file '%s' for custom section.", path); + } + + bh_file_contents contents = bh_file_read_contents(context->gp_alloc, path); + section->contents = contents.data; + section->length = contents.length; + } else { + section->contents = bh_strdup(context->gp_alloc, temp_str); + section->length = content_length; + } + + return Check_Success; + } + + assert("Bad directive in check_process_directive" && 0); + return Check_Success; } -CheckStatus check_macro(AstMacro* macro) { +CHECK_FUNC(macro, AstMacro* macro) { + macro->flags |= Ast_Flag_Comptime; + if (macro->body->kind == Ast_Kind_Function) { CHECK(function_header, (AstFunction *) macro->body); } + else if (macro->body->kind == Ast_Kind_Polymorphic_Proc) { + CHECK(polyproc, (AstFunction *) macro->body); + } return Check_Success; } -CheckStatus check_interface(AstInterface *interface) { +CHECK_FUNC(interface, AstInterface *interface) { bh_arr_each(InterfaceParam, param, interface->params) { CHECK(type, ¶m->value_type); - param->type = type_build_from_ast(context.ast_alloc, param->value_type); + param->type = type_build_from_ast(context, param->value_type); if (!param->type) { YIELD(param->value_type->token->pos, "Waiting for interface parameter's type to be constructed."); } @@ -3684,7 +5257,7 @@ CheckStatus check_interface(AstInterface *interface) { return Check_Success; } -CheckStatus check_interface_constraint(AstConstraint *constraint) { +CHECK_FUNC(interface_constraint, AstConstraint *constraint) { if (constraint->interface->kind != Ast_Kind_Interface) { // CLEANUP: This error message might not look totally right in some cases. ERROR_(constraint->token->pos, "'%b' is not an interface. It is a '%s'.", @@ -3694,7 +5267,7 @@ CheckStatus check_interface_constraint(AstConstraint *constraint) { // #intrinsic interfaces if (constraint->interface->is_intrinsic) { - b32 success = resolve_intrinsic_interface_constraint(constraint); + b32 success = resolve_intrinsic_interface_constraint(context, constraint); if (success) { *constraint->report_status = Constraint_Check_Status_Success; return Check_Complete; @@ -3704,11 +5277,11 @@ CheckStatus check_interface_constraint(AstConstraint *constraint) { } } - bh_arr_new(global_heap_allocator, constraint->exprs, bh_arr_length(constraint->interface->exprs)); + bh_arr_new(context->gp_alloc, constraint->exprs, bh_arr_length(constraint->interface->exprs)); bh_arr_each(InterfaceConstraint, ic, constraint->interface->exprs) { InterfaceConstraint new_ic = {0}; - new_ic.expr = (AstTyped *) ast_clone(context.ast_alloc, (AstNode *) ic->expr); - new_ic.expected_type_expr = (AstType *) ast_clone(context.ast_alloc, (AstNode *) ic->expected_type_expr); + new_ic.expr = (AstTyped *) ast_clone(context, (AstNode *) ic->expr); + new_ic.expected_type_expr = (AstType *) ast_clone(context, (AstNode *) ic->expected_type_expr); new_ic.invert_condition = ic->invert_condition; bh_arr_push(constraint->exprs, new_ic); } @@ -3717,7 +5290,9 @@ CheckStatus check_interface_constraint(AstConstraint *constraint) { assert(constraint->interface->scope); assert(constraint->interface->scope->parent == constraint->interface->entity->scope); - constraint->scope = scope_create(context.ast_alloc, constraint->interface->scope, constraint->token->pos); + if (constraint->scope == NULL) { + constraint->scope = scope_create(context, constraint->interface->scope, constraint->token->pos); + } if (bh_arr_length(constraint->args) != bh_arr_length(constraint->interface->params)) { ERROR_(constraint->token->pos, "Wrong number of arguments given to interface. Expected %d, got %d.", @@ -3732,43 +5307,43 @@ CheckStatus check_interface_constraint(AstConstraint *constraint) { CHECK(expression, arg); TYPE_CHECK(arg, ip->type) { - ERROR_((*arg)->token->pos, "Mismatched type in interface construction. Expected something of type '%s', but got something of type '%s'.", type_get_name(ip->type), type_get_name((*arg)->type)); + ERROR_((*arg)->token->pos, "Mismatched type in interface construction. Expected something of type '%s', but got something of type '%s'.", type_get_name(context, ip->type), type_get_name(context, (*arg)->type)); } - AstAlias *type_alias = onyx_ast_node_new(context.ast_alloc, sizeof(AstAlias), Ast_Kind_Alias); + AstAlias *type_alias = onyx_ast_node_new(context->ast_alloc, sizeof(AstAlias), Ast_Kind_Alias); type_alias->token = ip->value_token; type_alias->alias = *arg; - symbol_introduce(constraint->scope, ip->value_token, (AstNode *) type_alias); + symbol_introduce(context, constraint->scope, ip->value_token, (AstNode *) type_alias); } fori (i, 0, bh_arr_length(constraint->interface->sentinels)) { InterfaceSentinel *is = &constraint->interface->sentinels[i]; - AstTyped *sentinel = onyx_ast_node_new(context.ast_alloc, sizeof(AstTyped), Ast_Kind_Constraint_Sentinel); + AstTyped *sentinel = onyx_ast_node_new(context->ast_alloc, sizeof(AstTyped), Ast_Kind_Constraint_Sentinel); sentinel->token = is->name; - sentinel->type_node = (AstType *) ast_clone(context.ast_alloc, (AstNode *) is->type); + sentinel->type_node = (AstType *) ast_clone(context, (AstNode *) is->type); - symbol_introduce(constraint->scope, is->name, (AstNode *) sentinel); + symbol_introduce(context, constraint->scope, is->name, (AstNode *) sentinel); } assert(constraint->entity); constraint->entity->scope = constraint->scope; constraint->phase = Constraint_Phase_Checking_Expressions; - return Check_Return_To_Symres; + return Check_Yield; } -CheckStatus check_expression_constraint(AstConstraint *constraint) { - onyx_errors_enable(); +CHECK_FUNC(expression_constraint, AstConstraint *constraint) { + onyx_errors_enable(context); AstTyped* expr = constraint->const_expr; - context.checker.expression_types_must_be_known = 1; - CheckStatus result = check_expression(&expr); - context.checker.expression_types_must_be_known = 0; + context->checker.expression_types_must_be_known = 1; + CheckStatus result = check_expression(context, &expr); + context->checker.expression_types_must_be_known = 0; - if (result == Check_Yield_Macro) return Check_Yield_Macro; + if (result == Check_Yield) return Check_Yield; if (result > Check_Errors_Start || !(expr->flags & Ast_Flag_Comptime)) { ERROR(expr->token->pos, "Where clauses must be a constant expressions."); @@ -3778,42 +5353,47 @@ CheckStatus check_expression_constraint(AstConstraint *constraint) { ERROR(expr->token->pos, "Where clauses must result in a boolean."); } - b32 value = (b32)get_expression_integer_value(expr, NULL); + b32 value = (b32)get_expression_integer_value(context, expr, NULL); if (!value) { *constraint->report_status = Constraint_Check_Status_Failed; return Check_Failed; } - expr = (AstTyped *)make_bool_literal(context.ast_alloc, 1); + expr = (AstTyped *) make_bool_literal(context, 1); *constraint->report_status = Constraint_Check_Status_Success; return Check_Complete; } -CheckStatus check_constraint(AstConstraint *constraint) { +CHECK_FUNC(constraint, AstConstraint *constraint) { switch (constraint->phase) { + // nocheckin + case Constraint_Phase_Waiting_To_Be_Queued: return Check_Success; + case Constraint_Phase_Cloning_Expressions: { - if (constraint->interface->kind == Ast_Kind_Symbol) { - return Check_Return_To_Symres; + CHECK(expression, (AstTyped **) &constraint->interface); + + bh_arr_each(AstTyped *, arg, constraint->args) { + CHECK(expression, arg); } if (constraint->flags & Ast_Flag_Constraint_Is_Expression) { - return check_expression_constraint(constraint); + return check_expression_constraint(context, constraint); } else { - return check_interface_constraint(constraint); + return check_interface_constraint(context, constraint); } } case Constraint_Phase_Checking_Expressions: { - onyx_errors_disable(); + onyx_errors_disable(context); fori (i, constraint->expr_idx, bh_arr_length(constraint->exprs)) { InterfaceConstraint* ic = &constraint->exprs[i]; - CheckStatus cs = check_expression(&ic->expr); - if (cs == Check_Return_To_Symres || cs == Check_Yield_Macro) { - onyx_errors_enable(); + CheckStatus cs = check_expression(context, &ic->expr); + if (cs == Check_Yield) { + onyx_errors_enable(context); return cs; } @@ -3826,13 +5406,13 @@ CheckStatus check_constraint(AstConstraint *constraint) { } if (ic->expected_type_expr) { - cs = check_type(&ic->expected_type_expr); - if (cs == Check_Return_To_Symres || cs == Check_Yield_Macro) { - onyx_errors_enable(); + cs = check_type(context, &ic->expected_type_expr); + if (cs == Check_Yield) { + onyx_errors_enable(context); return cs; } - ic->expected_type = type_build_from_ast(context.ast_alloc, ic->expected_type_expr); + ic->expected_type = type_build_from_ast(context, ic->expected_type_expr); if (ic->expected_type == NULL) { // // To make interfaces easier to use, I wanted to have @@ -3851,18 +5431,18 @@ CheckStatus check_constraint(AstConstraint *constraint) { // the usual code and looks up the type directly, from // then it will be used in the TYPE_CHECK below. if (ic->expected_type_expr->type_id) { - ic->expected_type = type_lookup_by_id(ic->expected_type_expr->type_id); + ic->expected_type = type_lookup_by_id(context, ic->expected_type_expr->type_id); } else { - onyx_errors_enable(); + onyx_errors_enable(context); YIELD_ERROR(ic->expected_type_expr->token->pos, "Waiting on expected type expression to be resolved."); } } TYPE_CHECK(&ic->expr, ic->expected_type) { if (!ic->invert_condition) { - ic->error_msg = bh_aprintf(global_heap_allocator, "Expected expression to be of type %s, got expression of type %s.", - type_get_name(ic->expected_type), type_get_name(ic->expr->type)); + ic->error_msg = bh_aprintf(context->gp_alloc, "Expected expression to be of type %s, got expression of type %s.", + type_get_name(context, ic->expected_type), type_get_name(context, ic->expr->type)); goto constraint_error; } } @@ -3872,13 +5452,13 @@ CheckStatus check_constraint(AstConstraint *constraint) { continue; constraint_error: - onyx_errors_enable(); + onyx_errors_enable(context); *constraint->report_status = Constraint_Check_Status_Failed; return Check_Failed; } // HACK HACK HACK - onyx_errors_enable(); + onyx_errors_enable(context); *constraint->report_status = Constraint_Check_Status_Success; return Check_Complete; } @@ -3886,11 +5466,11 @@ CheckStatus check_constraint(AstConstraint *constraint) { default: break; } - onyx_errors_enable(); + onyx_errors_enable(context); return Check_Success; } -CheckStatus check_constraint_context(ConstraintContext *cc, Scope *scope, OnyxFilePos pos) { +CHECK_FUNC(constraint_context, ConstraintContext *cc, Scope *scope, OnyxFilePos pos) { if (cc->constraint_checks) { if (cc->constraints_met == 1) return Check_Success; @@ -3912,7 +5492,7 @@ CheckStatus check_constraint_context(ConstraintContext *cc, Scope *scope, OnyxFi token_toggle_end(symbol); strncat(constraint_map, " is of type '", 511); - strncat(constraint_map, type_get_name(type_build_from_ast(context.ast_alloc, (AstType *) constraint->args[i])), 511); + strncat(constraint_map, type_get_name(context, type_build_from_ast(context, (AstType *) constraint->args[i])), 511); strncat(constraint_map, "'", 511); } @@ -3926,21 +5506,21 @@ CheckStatus check_constraint_context(ConstraintContext *cc, Scope *scope, OnyxFi } if (constraint->flags & Ast_Flag_Constraint_Is_Expression) { - onyx_report_error(error_pos, Error_Critical, "Where clause did not evaluate to true."); + ONYX_ERROR(error_pos, Error_Critical, "Where clause did not evaluate to true."); } else { - onyx_report_error(error_pos, Error_Critical, "Failed to satisfy constraint where %s.", constraint_map); + ONYX_ERROR(error_pos, Error_Critical, "Failed to satisfy constraint where %s.", constraint_map); } if (error_msg) { - onyx_report_error(error_pos, Error_Critical, error_msg); + ONYX_ERROR(error_pos, Error_Critical, error_msg); } if (!(constraint->flags & Ast_Flag_Constraint_Is_Expression)) { - onyx_report_error(constraint->token->pos, Error_Critical, "Here is where the interface was used."); + ONYX_ERROR(constraint->token->pos, Error_Critical, "Here is where the interface was used."); } - onyx_report_error(pos, Error_Critical, "Here is the code that caused this constraint to be checked."); + ONYX_ERROR(pos, Error_Critical, "Here is the code that caused this constraint to be checked."); return Check_Error; @@ -3961,7 +5541,7 @@ CheckStatus check_constraint_context(ConstraintContext *cc, Scope *scope, OnyxFi } else { u32 count = bh_arr_length(cc->constraints); - ConstraintCheckStatus *ccs = bh_alloc_array(context.ast_alloc, ConstraintCheckStatus, count); + ConstraintCheckStatus *ccs = bh_alloc_array(context->ast_alloc, ConstraintCheckStatus, count); cc->constraint_checks = ccs; @@ -3970,21 +5550,54 @@ CheckStatus check_constraint_context(ConstraintContext *cc, Scope *scope, OnyxFi cc->constraints[i]->report_status = &ccs[i]; cc->constraints[i]->phase = Constraint_Phase_Cloning_Expressions; - add_entities_for_node(NULL, (AstNode *) cc->constraints[i], scope, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) cc->constraints[i], scope, NULL); } - return Check_Yield_Macro; + return Check_Yield; } } -CheckStatus check_polyquery(AstPolyQuery *query) { +CHECK_FUNC(polyquery, AstPolyQuery *query) { if (query->function_header->scope == NULL) - query->function_header->scope = scope_create(context.ast_alloc, query->proc->parent_scope_of_poly_proc, query->token->pos); + query->function_header->scope = scope_create(context, query->proc->parent_scope_of_poly_proc, query->token->pos); - CheckStatus header_check = check_temp_function_header(query->function_header); - if (header_check == Check_Return_To_Symres) return Check_Return_To_Symres; + enable_mode(context, CM_Dont_Resolve_Symbols); + check_temp_function_header(context, query->function_header); + disable_mode(context, CM_Dont_Resolve_Symbols); + + scope_enter(context, query->function_header->scope); + + { + u32 idx = 0; + bh_arr_each(AstParam, param, query->function_header->params) { + bh_arr_each(AstPolyParam, pp, query->proc->poly_params) { + if (pp->kind == PPK_Baked_Value && pp->idx == idx) goto skip_introducing_symbol; + } + + symbol_introduce(context, context->checker.current_scope, param->local->token, (AstNode *) param->local); + + skip_introducing_symbol: + idx++; + } + } b32 solved_something = 0; + bh_arr_each(AstParam, param, query->function_header->params) { + if (param->local->type_node != NULL) { + context->checker.resolved_a_symbol = 0; + + onyx_errors_disable(context); + param->local->flags |= Ast_Flag_Symbol_Invisible; + check_type(context, ¶m->local->type_node); + param->local->flags &= ~Ast_Flag_Symbol_Invisible; + onyx_errors_enable(context); + + if (context->checker.resolved_a_symbol) { + solved_something = 1; + } + } + } + i32 solved_count = 0; OnyxError err_msg = { 0 }; @@ -4004,27 +5617,23 @@ CheckStatus check_polyquery(AstPolyQuery *query) { } } - TypeMatch result = find_polymorphic_sln(&sln, param, query->function_header, query->pp_lookup, query->given, &err_msg); + TypeMatch result = find_polymorphic_sln(context, &sln, param, query->function_header, query->pp_lookup, query->given, &err_msg); switch (result) { case TYPE_MATCH_SUCCESS: goto poly_var_solved; case TYPE_MATCH_SPECIAL: - if (solved_something || query->successful_symres) { - return Check_Return_To_Symres; - } else { - return Check_Yield_Macro; - } + return Check_Yield; case TYPE_MATCH_YIELD: case TYPE_MATCH_FAILED: { - if (query->successful_symres || solved_something) continue; + if (solved_something) continue; - if (query->error_on_fail || context.cycle_detected) { - onyx_report_error(query->token->pos, Error_Critical, "Error solving for polymorphic variable '%b'.", param->poly_sym->token->text, param->poly_sym->token->length); - if (err_msg.text != NULL) onyx_submit_error(err_msg); - if (query->error_loc) onyx_report_error(query->error_loc->pos, Error_Critical, "Here is where the call is located."); // :ErrorMessage + if (query->error_on_fail || context->cycle_detected) { + ONYX_ERROR(query->token->pos, Error_Critical, "Error solving for polymorphic variable '%b'.", param->poly_sym->token->text, param->poly_sym->token->length); + if (err_msg.text != NULL) onyx_submit_error(context, err_msg); + if (query->error_loc) ONYX_ERROR(query->error_loc->pos, Error_Critical, "Here is where the call is located."); // :ErrorMessage } return Check_Failed; @@ -4034,15 +5643,17 @@ CheckStatus check_polyquery(AstPolyQuery *query) { poly_var_solved: solved_something = 1; bh_arr_push(query->slns, sln); - insert_poly_sln_into_scope(query->function_header->scope, &sln); + insert_poly_sln_into_scope(context, query->function_header->scope, &sln); poly_query_done: solved_count += 1; } + scope_leave(context); + if (solved_count != bh_arr_length(query->proc->poly_params)) { - if (solved_something || query->successful_symres) { - return Check_Return_To_Symres; + if (solved_something) { + return Check_Yield; } else { return Check_Failed; } @@ -4051,75 +5662,356 @@ CheckStatus check_polyquery(AstPolyQuery *query) { return Check_Complete; } -CheckStatus check_arbitrary_job(EntityJobData *job) { - TypeMatch result = job->func(job->job_data); +CHECK_FUNC(arbitrary_job, EntityJobData *job) { + TypeMatch result = job->func(context, job->job_data); switch (result) { case TYPE_MATCH_SUCCESS: return Check_Complete; case TYPE_MATCH_FAILED: return Check_Error; - case TYPE_MATCH_YIELD: return Check_Yield_Macro; - case TYPE_MATCH_SPECIAL: return Check_Yield_Macro; + case TYPE_MATCH_YIELD: return Check_Yield; + case TYPE_MATCH_SPECIAL: return Check_Yield; } return Check_Error; } -void check_entity(Entity* ent) { +CHECK_FUNC(js_node, AstJsNode *js) { + if (js->order_expr) { + CHECK(expression, &js->order_expr); + + TYPE_CHECK(&js->order_expr, context->types.basic[Basic_Kind_I32]) { + ERROR_(js->token->pos, "Expected an expression of type 'i32' for '#order', but got a '%s' instead.", type_get_name(context, js->order_expr->type)); + } + + b32 valid = 0; + i64 value = get_expression_integer_value(context, js->order_expr, &valid); + assert(valid); + + js->order = (u32) value; + } + + if (js->code) { + CHECK(expression, &js->code); + if (js->code->kind != Ast_Kind_StrLit) { + ERROR(js->token->pos, "Expected the provided code to be a string-literal, but it was not."); + } + } + + if (js->filepath) { + CHECK(expression, &js->filepath); + if (js->filepath->kind != Ast_Kind_StrLit) { + ERROR(js->token->pos, "Expected the provided file path to be a string-literal, but it was not."); + } + } + + return Check_Success; +} + +CHECK_FUNC(file_contents, AstFileContents* fc) { + CHECK(expression, &fc->filename_expr); + + if (fc->filename_expr->kind != Ast_Kind_StrLit) { + ERROR(fc->token->pos, "Expected given expression to be a compile-time stirng literal."); + } + + if (context->options->no_file_contents) { + ERROR(fc->token->pos, "#file_contents is disabled for this compilation."); + } + + return Check_Complete; +} + +CHECK_FUNC(foreign_block, AstForeignBlock *fb) { + if (fb->scope == NULL) + fb->scope = scope_create(context, context->checker.current_scope, fb->token->pos); + + CHECK(expression, &fb->module_name); + + if (fb->module_name->kind != Ast_Kind_StrLit) { + ERROR(fb->token->pos, "Expected module name to be a compile-time string literal."); + } + + bh_arr_each(Entity *, pent, fb->captured_entities) { + Entity *ent = *pent; + if (ent->type == Entity_Type_Function_Header) { + if (ent->function->body->next != NULL) { + ERROR(ent->function->token->pos, "Procedures declared in a #foreign block should not have bodies."); + } + + ent->function->foreign.import_name = (AstTyped *) make_string_literal(context, ent->function->intrinsic_name); + ent->function->foreign.module_name = fb->module_name; + ent->function->is_foreign = 1; + ent->function->is_foreign_dyncall = fb->uses_dyncall; + ent->function->entity = NULL; + ent->function->entity_header = NULL; + ent->function->entity_body = NULL; + + add_entities_for_node(&context->entities, NULL, (AstNode *) ent->function, ent->scope, ent->package); + continue; + } + + if (ent->type == Entity_Type_Binding) { + AstBinding* new_binding = onyx_ast_node_new(context->ast_alloc, sizeof(AstBinding), Ast_Kind_Binding); + new_binding->token = ent->binding->token; + new_binding->node = ent->binding->node; + + Entity e; + memset(&e, 0, sizeof(e)); + e.type = Entity_Type_Binding; + e.state = Entity_State_Introduce_Symbols; + e.binding = new_binding; + e.scope = fb->scope; + e.package = ent->package; + + entity_heap_insert(&context->entities, e); + } + + if (ent->type != Entity_Type_Function) { + entity_heap_insert_existing(&context->entities, ent); + } + } + + if (context->options->generate_foreign_info) { + // When generating foreign info, we have to pass this on to codegen + // so it can build the static data that goes in the binary. + return Check_Success; + + } else { + return Check_Complete; + } +} + +CHECK_FUNC(include, AstInclude* include) { + if (include->name != NULL) return Check_Goto_Parse; + + CHECK(expression, &include->name_node); + + if (include->name_node->kind != Ast_Kind_StrLit) { + ERROR_(include->token->pos, "Expected compile-time known string literal here. Got '%s'.", onyx_ast_node_kind_string(include->name_node->kind)); + } + + OnyxToken* str_token = include->name_node->token; + if (str_token != NULL) { + token_toggle_end(str_token); + include->name = bh_strdup(context->ast_alloc, str_token->text); + string_process_escape_seqs(include->name, include->name, strlen(include->name)); + token_toggle_end(str_token); + } + + return Check_Goto_Parse; +} + +CHECK_FUNC(import, AstImport* import) { + AstPackage* package = import->imported_package; + CHECK(package, package); + + if (import->import_package_itself) { + OnyxToken *name = bh_arr_last(package->path); + name = import->qualified_package_name ? import->qualified_package_name : name; + + symbol_introduce(context, context->checker.current_entity->scope, name, (AstNode *) package); + } + + if (import->specified_imports) { + package_track_use_package(context, package->package, import->entity); + + Scope *import_scope = package->package->scope; + if (import_scope == context->checker.current_scope) return Check_Complete; + + // use X { * } + if (import->only == NULL) { + OnyxFilePos pos = import->token->pos; + scope_include(context, context->checker.current_scope, import_scope, pos); + return Check_Complete; + } + + + // use X { a, b, c } + bh_arr_each(QualifiedImport, qi, import->only) { + AstNode* imported = symbol_resolve(context, import_scope, qi->symbol_name); + if (imported == NULL) { + YIELD_(qi->symbol_name->pos, + "The symbol '%b' was not found package '%s'.", + qi->symbol_name->text, qi->symbol_name->length, package->package->name); + } + + symbol_introduce(context, context->checker.current_scope, qi->as_name, imported); + } + } + + return Check_Complete; +} + +CHECK_FUNC(compiler_extension, AstCompilerExtension *ext) { + if (context->options->no_compiler_extensions) { + ERROR(ext->token->pos, "Compiler extensions are disabled in this compilation."); + } + + token_toggle_end(ext->name); + TypeMatch status = compiler_extension_start(context, ext->name->text, ext->token->pos.filename, context->checker.current_entity, &ext->extension_id); + token_toggle_end(ext->name); + + if (status == TYPE_MATCH_FAILED) { + ERROR(ext->token->pos, "Failed to initialize this compiler extension."); + } + + if (status == TYPE_MATCH_YIELD) { + return Check_Yield; + } + + return Check_Complete; +} + +CHECK_FUNC(proc_expansion, AstProceduralExpansion **pexp, ProceduralMacroExpansionKind exp_kind) { + AstProceduralExpansion *exp = *pexp; + CHECK(expression, &exp->proc_macro); + + exp->proc_macro = (AstTyped *) strip_aliases((AstNode *) exp->proc_macro); + + if (exp->proc_macro->kind != Ast_Kind_Procedural_Macro) { + YIELD_ERROR_(exp->token->pos, "Procedural macro expansion expected a procedural macro before the '!', but got '%s' instead.", + onyx_ast_node_kind_string(exp->proc_macro->kind)); + } + + AstProceduralMacro *proc_macro = (AstProceduralMacro *) exp->proc_macro; + + token_toggle_end(proc_macro->token); + // HACK store this differently so a copy is not necessary here. + char *macro_name = bh_strdup(context->scratch_alloc, proc_macro->token->text); + token_toggle_end(proc_macro->token); + + AstNode *expansion = NULL; + + TypeMatch expansion_state = compiler_extension_expand_macro( + context, + proc_macro->extension->extension_id, + exp_kind, + macro_name, + exp->expansion_body, + context->checker.current_entity, + &expansion, + &exp->expansion_id, + context->cycle_almost_detected > 0); + + if (expansion_state == TYPE_MATCH_FAILED) { + ERROR(exp->token->pos, "Procedural macro expansion failed. See other errors generated by procedural macro."); + } + + if (expansion_state == TYPE_MATCH_YIELD) { + return Check_Yield; + } + + if (expansion == NULL) { + if (exp_kind == PMEK_Expression) { + ERROR(exp->token->pos, "Expected this procedural macro to expand to an expression, but it expanded to nothing."); + } + + if (exp_kind == PMEK_Statement) { + *(AstNode **) pexp = exp->next; + CHECK(expression, (AstTyped **) pexp); + return Check_Success; + } + + // Top-level expansions do not turn into nodes, but instead will become other entities + // that will get queued separately. + return Check_Complete; + } + + // Stitch the expansion into the tree. + AstNode *last_expanded_node = expansion; + while (last_expanded_node->next != NULL) last_expanded_node = last_expanded_node->next; + last_expanded_node->next = (*pexp)->next; + + *pexp = (AstProceduralExpansion *) expansion; + switch (exp_kind) { + case PMEK_Expression: CHECK(expression, (AstTyped **) pexp); break; + case PMEK_Statement: CHECK(statement, (AstNode **) pexp); break; + case PMEK_Top_Level: return Check_Complete; + } + + return Check_Success; +} + +void check_entity(Context *context, Entity* ent) { CheckStatus cs = Check_Success; - context.checker.current_entity = ent; - context.checker.all_checks_are_final = 1; + + context->checker.current_entity = ent; + context->checker.all_checks_are_final = 1; + + bh_arr_clear(context->checker.scope_stack); + clear_modes(context); + + if (ent->scope) scope_enter(context, ent->scope); switch (ent->type) { + case Entity_Type_Binding: { + symbol_introduce(context, context->checker.current_scope, ent->binding->token, ent->binding->node); + track_documentation_for_symbol_info(context, ent->binding->node, ent->binding); + + onyx_docs_submit(context->doc_info, ent->binding); + + package_reinsert_use_packages(context, ent->package); + + cs = Check_Complete; + break; + } + + case Entity_Type_Load_Path: + case Entity_Type_Load_File: cs = check_include(context, ent->include); break; + case Entity_Type_Foreign_Function_Header: - case Entity_Type_Function_Header: cs = check_function_header(ent->function); break; - case Entity_Type_Temp_Function_Header: cs = check_temp_function_header(ent->function); break; - case Entity_Type_Function: cs = check_function(ent->function); break; - case Entity_Type_Overloaded_Function: cs = check_overloaded_function(ent->overloaded_function); break; - case Entity_Type_Global: cs = check_global(ent->global); break; - case Entity_Type_Struct_Member_Default: cs = check_struct_defaults((AstStructType *) ent->type_alias); break; - case Entity_Type_Memory_Reservation_Type: cs = check_memres_type(ent->mem_res); break; - case Entity_Type_Memory_Reservation: cs = check_memres(ent->mem_res); break; - case Entity_Type_Static_If: cs = check_static_if(ent->static_if); break; - case Entity_Type_Macro: cs = check_macro(ent->macro); break; - case Entity_Type_Constraint_Check: cs = check_constraint(ent->constraint); break; - case Entity_Type_Polymorph_Query: cs = check_polyquery(ent->poly_query); break; - case Entity_Type_Enum_Value: cs = check_expression(&ent->enum_value->value); break; - case Entity_Type_Process_Directive: cs = check_process_directive((AstNode *) ent->expr); break; - case Entity_Type_Interface: cs = check_interface(ent->interface); break; + case Entity_Type_Function_Header: cs = check_function_header(context, ent->function); break; + case Entity_Type_Temp_Function_Header: cs = check_temp_function_header(context, ent->function); break; + case Entity_Type_Function: cs = check_function(context, ent->function); break; + case Entity_Type_Overloaded_Function: cs = check_overloaded_function(context, ent->overloaded_function); break; + case Entity_Type_Global_Header: cs = check_global_header(context, ent->global); break; + case Entity_Type_Global: cs = check_global(context, ent->global); break; + case Entity_Type_Struct_Member_Default: cs = check_struct_defaults(context, (AstStructType *) ent->type_alias); break; + case Entity_Type_Memory_Reservation_Type: cs = check_memres_type(context, ent->mem_res); break; + case Entity_Type_Memory_Reservation: cs = check_memres(context, ent->mem_res); break; + case Entity_Type_Static_If: cs = check_static_if(context, ent->static_if); break; + case Entity_Type_Macro: cs = check_macro(context, ent->macro); break; + case Entity_Type_Constraint_Check: cs = check_constraint(context, ent->constraint); break; + case Entity_Type_Polymorphic_Proc: cs = check_polyproc(context, ent->poly_proc); break; + case Entity_Type_Polymorph_Query: cs = check_polyquery(context, ent->poly_query); break; + case Entity_Type_Enum: cs = check_enum(context, ent->enum_type); break; + case Entity_Type_Enum_Value: cs = check_expression(context, &ent->enum_value->value); break; + case Entity_Type_Process_Directive: cs = check_process_directive(context, (AstNode *) ent->expr); break; + case Entity_Type_Interface: cs = check_interface(context, ent->interface); break; case Entity_Type_String_Literal: case Entity_Type_Expression: - cs = check_expression(&ent->expr); - resolve_expression_type(ent->expr); + cs = check_expression(context, &ent->expr); + resolve_expression_type(context, ent->expr); if (cs == Check_Success) cs = Check_Complete; break; case Entity_Type_Type_Alias: if (ent->type_alias->kind == Ast_Kind_Struct_Type) - cs = check_struct((AstStructType *) ent->type_alias); + cs = check_struct(context, (AstStructType *) ent->type_alias); else if (ent->type_alias->kind == Ast_Kind_Union_Type) - cs = check_union((AstUnionType *) ent->type_alias); + cs = check_union(context, (AstUnionType *) ent->type_alias); else - cs = check_type(&ent->type_alias); + cs = check_type(context, &ent->type_alias); break; - case Entity_Type_File_Contents: - if (context.options->no_file_contents) { - onyx_report_error(ent->expr->token->pos, Error_Critical, "#file_contents is disabled for this compilation."); - } - cs = Check_Complete; - break; - - case Entity_Type_Job: cs = check_arbitrary_job(ent->job_data); break; + case Entity_Type_File_Contents: cs = check_file_contents(context, ent->file_contents); break; + case Entity_Type_Job: cs = check_arbitrary_job(context, ent->job_data); break; + case Entity_Type_JS: cs = check_js_node(context, ent->js); break; + case Entity_Type_Foreign_Block: cs = check_foreign_block(context, ent->foreign_block); break; + case Entity_Type_Import: cs = check_import(context, ent->import); break; + + case Entity_Type_Compiler_Extension: cs = check_compiler_extension(context, ent->compiler_extension); break; + case Entity_Type_Procedural_Expansion: cs = check_proc_expansion(context, &ent->proc_expansion, PMEK_Top_Level); break; default: break; } switch (cs) { - case Check_Yield_Macro: ent->macro_attempts++; break; + case Check_Yield: ent->macro_attempts++; break; case Check_Success: ent->state = Entity_State_Code_Gen; goto clear_attempts; case Check_Complete: ent->state = Entity_State_Finalized; goto clear_attempts; - case Check_Return_To_Symres: ent->state = Entity_State_Resolve_Symbols; goto clear_attempts; + case Check_Goto_Parse: ent->state = Entity_State_Parse; goto clear_attempts; case Check_Failed: ent->state = Entity_State_Failed; goto clear_attempts; clear_attempts: @@ -4128,4 +6020,7 @@ void check_entity(Entity* ent) { default: break; } + + context->checker.current_scope = NULL; + context->checker.current_entity = NULL; } diff --git a/compiler/src/clone.c b/compiler/src/clone.c index c457bb35e..17b29552f 100644 --- a/compiler/src/clone.c +++ b/compiler/src/clone.c @@ -2,13 +2,10 @@ #include "parser.h" #include "utils.h" -// Weird flags that shouldn't be used too often because they complicate things -static b32 dont_copy_structs = 0; - -static inline b32 should_clone(AstNode* node) { +static inline b32 should_clone(Context *context, AstNode* node) { if (node->flags & Ast_Flag_No_Clone) return 0; - if (dont_copy_structs) { + if (context->cloner.dont_copy_structs) { if (node->kind == Ast_Kind_Struct_Type) return 0; if (node->kind == Ast_Kind_Function) return 0; if (node->kind == Ast_Kind_Polymorphic_Proc) return 0; @@ -122,6 +119,7 @@ static inline i32 ast_kind_to_size(AstNode* node) { case Ast_Kind_Capture_Local: return sizeof(AstCaptureLocal); case Ast_Kind_Union_Type: return sizeof(AstUnionType); case Ast_Kind_Union_Variant: return sizeof(AstUnionVariant); + case Ast_Kind_Procedural_Expansion: return sizeof(AstProceduralExpansion); default: break; } @@ -129,28 +127,26 @@ static inline i32 ast_kind_to_size(AstNode* node) { return 0; } -static bh_arr(AstNode *) captured_entities=NULL; - -AstNode* ast_clone_with_captured_entities(bh_allocator a, void* n, bh_arr(AstNode *)* ents) { - captured_entities = *ents; +AstNode* ast_clone_with_captured_entities(Context *context, void* n, bh_arr(AstNode *)* ents) { + context->cloner.captured_entities = *ents; - AstNode* cloned = ast_clone(a, n); + AstNode* cloned = ast_clone(context, n); - *ents = captured_entities; - captured_entities = NULL; + *ents = context->cloner.captured_entities; + context->cloner.captured_entities = NULL; return cloned; } -AstNode* ast_clone_list(bh_allocator a, void* n) { +AstNode* ast_clone_list(Context *context, void* n) { AstNode* node = (AstNode *) n; if (node == NULL) return NULL; - AstNode* root = ast_clone(a, node); + AstNode* root = ast_clone(context, node); AstNode* curr = root->next; AstNode** insertion = &root->next; while (curr != NULL) { - curr = ast_clone(a, curr); + curr = ast_clone(context, curr); *insertion = curr; insertion = &curr->next; curr = curr->next; @@ -160,30 +156,31 @@ AstNode* ast_clone_list(bh_allocator a, void* n) { } #define E(ent) do { \ - assert(captured_entities); \ + assert(context->cloner.captured_entities); \ ent->entity = NULL; \ - bh_arr_push(captured_entities, (AstNode *) ent); \ + bh_arr_push(context->cloner.captured_entities, (AstNode *) ent); \ } while (0); -#define C(nt, mname) ((nt *) nn)->mname = (void *) ast_clone(a, ((nt *) node)->mname); +#define C(nt, mname) ((nt *) nn)->mname = (void *) ast_clone(context, ((nt *) node)->mname); // NOTE: Using void* to avoid a lot of unnecessary casting -AstNode* ast_clone(bh_allocator a, void* n) { +AstNode* ast_clone(Context *context, void* n) { AstNode* node = (AstNode *) n; if (node == NULL) return NULL; - if (!should_clone(node)) return node; + if (!should_clone(context, node)) return node; - static int clone_depth = 0; - clone_depth++; + context->cloner.clone_depth++; i32 node_size = ast_kind_to_size(node); // bh_printf("Cloning %s with size %d\n", onyx_ast_node_kind_string(node->kind), node_size); - AstNode* nn = onyx_ast_node_new(a, node_size, node->kind); + AstNode* nn = onyx_ast_node_new(context->ast_alloc, node_size, node->kind); memmove(nn, node, node_size); + nn->flags &= ~(Ast_Flag_Has_Been_Checked | Ast_Flag_Has_Been_Symres); + switch ((u16) node->kind) { case Ast_Kind_Binary_Op: case Ast_Kind_Pipe: @@ -204,7 +201,7 @@ AstNode* ast_clone(bh_allocator a, void* n) { case Ast_Kind_Call: C(AstCall, callee); - arguments_deep_clone(a, &((AstCall *) nn)->args, &((AstCall *) node)->args); + arguments_deep_clone(context, &((AstCall *) nn)->args, &((AstCall *) node)->args); break; case Ast_Kind_Argument: @@ -241,9 +238,9 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstStructLiteral* st = (AstStructLiteral *) node; AstStructLiteral* dt = (AstStructLiteral *) nn; - dt->stnode = (AstTyped *) ast_clone(a, st->stnode); + dt->stnode = (AstTyped *) ast_clone(context, st->stnode); - arguments_deep_clone(a, &dt->args, &st->args); + arguments_deep_clone(context, &dt->args, &st->args); break; } @@ -251,12 +248,12 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstArrayLiteral* st = (AstArrayLiteral *) node; AstArrayLiteral* dt = (AstArrayLiteral *) nn; - dt->atnode = (AstTyped *) ast_clone(a, st->atnode); + dt->atnode = (AstTyped *) ast_clone(context, st->atnode); dt->values = NULL; - bh_arr_new(global_heap_allocator, dt->values, bh_arr_length(st->values)); + bh_arr_new(context->gp_alloc, dt->values, bh_arr_length(st->values)); bh_arr_each(AstTyped *, val, st->values) - bh_arr_push(dt->values, (AstTyped *) ast_clone(a, *val)); + bh_arr_push(dt->values, (AstTyped *) ast_clone(context, *val)); break; } @@ -272,7 +269,7 @@ AstNode* ast_clone(bh_allocator a, void* n) { break; case Ast_Kind_Block: - ((AstBlock *) nn)->body = ast_clone_list(a, ((AstBlock *) node)->body); + ((AstBlock *) nn)->body = ast_clone_list(context, ((AstBlock *) node)->body); ((AstBlock *) nn)->quoted_block_capture_scope = NULL; break; @@ -288,7 +285,7 @@ AstNode* ast_clone(bh_allocator a, void* n) { case Ast_Kind_If: case Ast_Kind_While: - ((AstIfWhile *) nn)->initialization = ast_clone_list(a, ((AstIfWhile *) node)->initialization); + ((AstIfWhile *) nn)->initialization = ast_clone_list(context, ((AstIfWhile *) node)->initialization); //fallthrough case Ast_Kind_Static_If: @@ -316,9 +313,9 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstSwitchCase *sw = (AstSwitchCase *) node; dw->values = NULL; - bh_arr_new(global_heap_allocator, dw->values, bh_arr_length(sw->values)); + bh_arr_new(context->gp_alloc, dw->values, bh_arr_length(sw->values)); bh_arr_each(AstTyped *, value, sw->values) - bh_arr_push(dw->values, (AstTyped *) ast_clone(a, *value)); + bh_arr_push(dw->values, (AstTyped *) ast_clone(context, *value)); break; } @@ -327,7 +324,7 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstSwitch* dw = (AstSwitch *) nn; AstSwitch* sw = (AstSwitch *) node; - dw->initialization = ast_clone_list(a, sw->initialization); + dw->initialization = ast_clone_list(context, sw->initialization); C(AstSwitch, expr); @@ -370,24 +367,24 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstStructType* ss = (AstStructType *) node; ds->members = NULL; - bh_arr_new(global_heap_allocator, ds->members, bh_arr_length(ss->members)); + bh_arr_new(context->gp_alloc, ds->members, bh_arr_length(ss->members)); bh_arr_each(AstStructMember *, smem, ss->members) { - bh_arr_push(ds->members, (AstStructMember *) ast_clone(a, *smem)); + bh_arr_push(ds->members, (AstStructMember *) ast_clone(context, *smem)); } ds->meta_tags = NULL; - bh_arr_new(global_heap_allocator, ds->meta_tags, bh_arr_length(ss->meta_tags)); + bh_arr_new(context->gp_alloc, ds->meta_tags, bh_arr_length(ss->meta_tags)); bh_arr_each(AstTyped *, tag, ss->meta_tags) { - bh_arr_push(ds->meta_tags, (AstTyped *) ast_clone(a, *tag)); + bh_arr_push(ds->meta_tags, (AstTyped *) ast_clone(context, *tag)); } if (ss->constraints.constraints) { memset(&ds->constraints, 0, sizeof(ConstraintContext)); - bh_arr_new(global_heap_allocator, ds->constraints.constraints, bh_arr_length(ss->constraints.constraints)); + bh_arr_new(context->gp_alloc, ds->constraints.constraints, bh_arr_length(ss->constraints.constraints)); bh_arr_each(AstConstraint *, constraint, ss->constraints.constraints) { - bh_arr_push(ds->constraints.constraints, (AstConstraint *) ast_clone(a, (AstNode *) *constraint)); + bh_arr_push(ds->constraints.constraints, (AstConstraint *) ast_clone(context, (AstNode *) *constraint)); } } @@ -403,9 +400,9 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstStructMember *ss = (AstStructMember *) node; ds->meta_tags = NULL; - bh_arr_new(global_heap_allocator, ds->meta_tags, bh_arr_length(ss->meta_tags)); + bh_arr_new(context->gp_alloc, ds->meta_tags, bh_arr_length(ss->meta_tags)); bh_arr_each(AstTyped *, tag, ss->meta_tags) { - bh_arr_push(ds->meta_tags, (AstTyped *) ast_clone(a, *tag)); + bh_arr_push(ds->meta_tags, (AstTyped *) ast_clone(context, *tag)); } break; @@ -416,24 +413,24 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstUnionType* su = (AstUnionType *) node; du->variants = NULL; - bh_arr_new(global_heap_allocator, du->variants, bh_arr_length(su->variants)); + bh_arr_new(context->gp_alloc, du->variants, bh_arr_length(su->variants)); bh_arr_each(AstUnionVariant *, uv, su->variants) { - bh_arr_push(du->variants, (AstUnionVariant *) ast_clone(a, *uv)); + bh_arr_push(du->variants, (AstUnionVariant *) ast_clone(context, *uv)); } du->meta_tags = NULL; - bh_arr_new(global_heap_allocator, du->meta_tags, bh_arr_length(su->meta_tags)); + bh_arr_new(context->gp_alloc, du->meta_tags, bh_arr_length(su->meta_tags)); bh_arr_each(AstTyped *, tag, su->meta_tags) { - bh_arr_push(du->meta_tags, (AstTyped *) ast_clone(a, *tag)); + bh_arr_push(du->meta_tags, (AstTyped *) ast_clone(context, *tag)); } if (su->constraints.constraints) { memset(&du->constraints, 0, sizeof(ConstraintContext)); - bh_arr_new(global_heap_allocator, du->constraints.constraints, bh_arr_length(su->constraints.constraints)); + bh_arr_new(context->gp_alloc, du->constraints.constraints, bh_arr_length(su->constraints.constraints)); bh_arr_each(AstConstraint *, constraint, su->constraints.constraints) { - bh_arr_push(du->constraints.constraints, (AstConstraint *) ast_clone(a, (AstNode *) *constraint)); + bh_arr_push(du->constraints.constraints, (AstConstraint *) ast_clone(context, (AstNode *) *constraint)); } } @@ -448,9 +445,9 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstUnionVariant *su = (AstUnionVariant *) node; du->meta_tags = NULL; - bh_arr_new(global_heap_allocator, du->meta_tags, bh_arr_length(su->meta_tags)); + bh_arr_new(context->gp_alloc, du->meta_tags, bh_arr_length(su->meta_tags)); bh_arr_each(AstTyped *, tag, su->meta_tags) { - bh_arr_push(du->meta_tags, (AstTyped *) ast_clone(a, *tag)); + bh_arr_push(du->meta_tags, (AstTyped *) ast_clone(context, *tag)); } break; @@ -460,12 +457,12 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstPolyCallType* pcd = (AstPolyCallType *) nn; AstPolyCallType* pcs = (AstPolyCallType *) node; - pcd->callee = (AstType *) ast_clone(a, pcs->callee); + pcd->callee = (AstType *) ast_clone(context, pcs->callee); pcd->params = NULL; - bh_arr_new(global_heap_allocator, pcd->params, bh_arr_length(pcs->params)); + bh_arr_new(context->gp_alloc, pcd->params, bh_arr_length(pcs->params)); bh_arr_each(AstNode *, param, pcs->params) { - bh_arr_push(pcd->params, ast_clone(a, *param)); + bh_arr_push(pcd->params, ast_clone(context, *param)); } break; @@ -476,10 +473,10 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstCompoundType* cs = (AstCompoundType *) node; cd->types = NULL; - bh_arr_new(global_heap_allocator, cd->types, bh_arr_length(cs->types)); + bh_arr_new(context->gp_alloc, cd->types, bh_arr_length(cs->types)); bh_arr_each(AstType *, type, cs->types) { - bh_arr_push(cd->types, (AstType *) ast_clone(a, (AstNode *) *type)); + bh_arr_push(cd->types, (AstType *) ast_clone(context, (AstNode *) *type)); } break; } @@ -488,7 +485,7 @@ AstNode* ast_clone(bh_allocator a, void* n) { C(AstFunctionType, return_type); ((AstFunctionType *) nn)->param_count = ((AstFunctionType *) node)->param_count; fori (i, 0, (i64) ((AstFunctionType *) nn)->param_count) { - ((AstFunctionType *) nn)->params[i] = (AstType *) ast_clone(a, ((AstFunctionType *) node)->params[i]); + ((AstFunctionType *) nn)->params[i] = (AstType *) ast_clone(context, ((AstFunctionType *) node)->params[i]); } break; @@ -504,7 +501,7 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstFunction* sf = (AstFunction *) node; // Check if we are cloning a function inside of a function. - if (clone_depth > 1) { + if (context->cloner.clone_depth > 1) { // If we are, and the inner function has a scope, this means that // the inner function does not capture anything, and is not polymorphic. // Therefore, it should be treated as a normal function and not cloned @@ -514,7 +511,7 @@ AstNode* ast_clone(bh_allocator a, void* n) { // either polymorphic and/or it has captures. In either case, we have // to clone the function internally below. if (df->scope != NULL) { - clone_depth--; + context->cloner.clone_depth--; return node; } } @@ -526,49 +523,57 @@ AstNode* ast_clone(bh_allocator a, void* n) { assert(df->scope == NULL); df->nodes_that_need_entities_after_clone = NULL; - bh_arr_new(global_heap_allocator, df->nodes_that_need_entities_after_clone, 1); + bh_arr_new(context->gp_alloc, df->nodes_that_need_entities_after_clone, 1); - bh_arr(AstNode *) old_captured_entities = captured_entities; - captured_entities = df->nodes_that_need_entities_after_clone; + bh_arr(AstNode *) old_captured_entities = context->cloner.captured_entities; + context->cloner.captured_entities = df->nodes_that_need_entities_after_clone; - df->return_type = (AstType *) ast_clone(a, sf->return_type); - df->body = (AstBlock *) ast_clone(a, sf->body); - df->captures = (AstCaptureBlock *) ast_clone(a, sf->captures); + df->return_type = (AstType *) ast_clone(context, sf->return_type); + df->body = (AstBlock *) ast_clone(context, sf->body); + df->captures = (AstCaptureBlock *) ast_clone(context, sf->captures); - df->nodes_that_need_entities_after_clone = captured_entities; - captured_entities = old_captured_entities; + df->nodes_that_need_entities_after_clone = context->cloner.captured_entities; + context->cloner.captured_entities = old_captured_entities; df->params = NULL; - bh_arr_new(context.ast_alloc, df->params, bh_arr_length(sf->params)); + bh_arr_new(context->ast_alloc, df->params, bh_arr_length(sf->params)); bh_arr_each(AstParam, param, sf->params) { AstParam new_param = { 0 }; - dont_copy_structs = 1; - new_param.local = (AstLocal *) ast_clone(a, param->local); + context->cloner.dont_copy_structs = 1; + new_param.local = (AstLocal *) ast_clone(context, param->local); new_param.local->flags &= ~Ast_Flag_Param_Symbol_Dirty; - new_param.default_value = (AstTyped *) ast_clone(a, param->default_value); + new_param.default_value = (AstTyped *) ast_clone(context, param->default_value); new_param.use_processed = 0; - dont_copy_structs = 0; + context->cloner.dont_copy_structs = 0; new_param.vararg_kind = param->vararg_kind; new_param.is_used = param->is_used; bh_arr_push(df->params, new_param); } + df->named_return_locals = NULL; + if (sf->named_return_locals) { + bh_arr_new(context->ast_alloc, df->named_return_locals, bh_arr_length(sf->named_return_locals)); + bh_arr_each(AstLocal *, named_return, sf->named_return_locals) { + bh_arr_push(df->named_return_locals, (AstLocal *) ast_clone(context, (AstNode *) *named_return)); + } + } + if (sf->constraints.constraints) { memset(&df->constraints, 0, sizeof(ConstraintContext)); - bh_arr_new(context.ast_alloc, df->constraints.constraints, bh_arr_length(sf->constraints.constraints)); + bh_arr_new(context->ast_alloc, df->constraints.constraints, bh_arr_length(sf->constraints.constraints)); bh_arr_each(AstConstraint *, constraint, sf->constraints.constraints) { - bh_arr_push(df->constraints.constraints, (AstConstraint *) ast_clone(a, (AstNode *) *constraint)); + bh_arr_push(df->constraints.constraints, (AstConstraint *) ast_clone(context, (AstNode *) *constraint)); } } if (sf->tags) { - bh_arr_new(context.ast_alloc, df->tags, bh_arr_length(sf->tags)); + bh_arr_new(context->ast_alloc, df->tags, bh_arr_length(sf->tags)); bh_arr_each(AstTyped *, pexpr, sf->tags) { - bh_arr_push(df->tags, (AstTyped *) ast_clone(a, (AstNode *) *pexpr)); + bh_arr_push(df->tags, (AstTyped *) ast_clone(context, (AstNode *) *pexpr)); } } @@ -576,7 +581,7 @@ AstNode* ast_clone(bh_allocator a, void* n) { df->scope_to_lookup_captured_values = NULL; } - if (clone_depth > 1 && captured_entities) { + if (context->cloner.clone_depth > 1 && context->cloner.captured_entities) { sf->flags |= Ast_Flag_Function_Is_Lambda_Inside_PolyProc; df->flags &= ~Ast_Flag_Function_Is_Lambda_Inside_PolyProc; E(df); @@ -592,10 +597,10 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstConstraint* sc = (AstConstraint *) node; dc->args = NULL; - bh_arr_new(global_heap_allocator, dc->args, bh_arr_length(sc->args)); + bh_arr_new(context->gp_alloc, dc->args, bh_arr_length(sc->args)); bh_arr_each(AstTyped *, arg, sc->args) { - bh_arr_push(dc->args, (AstTyped *) ast_clone(a, (AstNode *) *arg)); + bh_arr_push(dc->args, (AstTyped *) ast_clone(context, (AstNode *) *arg)); } dc->phase = Constraint_Phase_Waiting_To_Be_Queued; @@ -606,17 +611,17 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstDirectiveSolidify* dd = (AstDirectiveSolidify *) nn; AstDirectiveSolidify* sd = (AstDirectiveSolidify *) node; - dd->poly_proc = (AstFunction *) ast_clone(a, (AstNode *) sd->poly_proc); + dd->poly_proc = (AstFunction *) ast_clone(context, (AstNode *) sd->poly_proc); dd->resolved_proc = NULL; dd->known_polyvars = NULL; - bh_arr_new(global_heap_allocator, dd->known_polyvars, bh_arr_length(sd->known_polyvars)); + bh_arr_new(context->gp_alloc, dd->known_polyvars, bh_arr_length(sd->known_polyvars)); bh_arr_each(AstPolySolution, sln, sd->known_polyvars) { AstPolySolution new_sln; new_sln.kind = sln->kind; - new_sln.poly_sym = (AstNode *) ast_clone(a, (AstNode *) sln->poly_sym); - new_sln.ast_type = (AstType *) ast_clone(a, (AstNode *) sln->ast_type); + new_sln.poly_sym = (AstNode *) ast_clone(context, (AstNode *) sln->poly_sym); + new_sln.ast_type = (AstType *) ast_clone(context, (AstNode *) sln->ast_type); bh_arr_push(dd->known_polyvars, new_sln); } @@ -628,10 +633,10 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstCompound* cs = (AstCompound *) node; cd->exprs = NULL; - bh_arr_new(global_heap_allocator, cd->exprs, bh_arr_length(cs->exprs)); + bh_arr_new(context->gp_alloc, cd->exprs, bh_arr_length(cs->exprs)); bh_arr_each(AstTyped *, expr, cs->exprs) { - bh_arr_push(cd->exprs, (AstTyped *) ast_clone(a, (AstNode *) *expr)); + bh_arr_push(cd->exprs, (AstTyped *) ast_clone(context, (AstNode *) *expr)); } break; } @@ -652,10 +657,10 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstDirectiveInsert* id = (AstDirectiveInsert *) nn; AstDirectiveInsert* is = (AstDirectiveInsert *) node; id->binding_exprs = NULL; - bh_arr_new(global_heap_allocator, id->binding_exprs, bh_arr_length(is->binding_exprs)); + bh_arr_new(context->gp_alloc, id->binding_exprs, bh_arr_length(is->binding_exprs)); bh_arr_each(AstTyped *, expr, is->binding_exprs) { - bh_arr_push(id->binding_exprs, (AstTyped *) ast_clone(a, (AstNode *) *expr)); + bh_arr_push(id->binding_exprs, (AstTyped *) ast_clone(context, (AstNode *) *expr)); } break; @@ -688,10 +693,10 @@ AstNode* ast_clone(bh_allocator a, void* n) { AstCaptureBlock* cs = (AstCaptureBlock *) node; cd->captures = NULL; - bh_arr_new(global_heap_allocator, cd->captures, bh_arr_length(cs->captures)); + bh_arr_new(context->gp_alloc, cd->captures, bh_arr_length(cs->captures)); bh_arr_each(AstCaptureLocal *, expr, cs->captures) { - bh_arr_push(cd->captures, (AstCaptureLocal *) ast_clone(a, (AstNode *) *expr)); + bh_arr_push(cd->captures, (AstCaptureLocal *) ast_clone(context, (AstNode *) *expr)); } break; } @@ -699,38 +704,42 @@ AstNode* ast_clone(bh_allocator a, void* n) { case Ast_Kind_Capture_Local: C(AstCaptureLocal, type_node); break; + + case Ast_Kind_Procedural_Expansion: + C(AstProceduralExpansion, proc_macro); + break; } - clone_depth--; + context->cloner.clone_depth--; return nn; } #undef C -AstFunction* clone_function_header(bh_allocator a, AstFunction* func) { +AstFunction* clone_function_header(Context *context, AstFunction* func) { if (func->kind != Ast_Kind_Function && func->kind != Ast_Kind_Polymorphic_Proc) return NULL; if (func->is_foreign) return func; - AstFunction* new_func = onyx_ast_node_new(a, sizeof(AstFunction), func->kind); + AstFunction* new_func = onyx_ast_node_new(context->ast_alloc, sizeof(AstFunction), func->kind); memmove(new_func, func, sizeof(AstFunction)); assert(new_func->scope == NULL); convert_polyproc_to_function(new_func); - new_func->return_type = (AstType *) ast_clone(a, func->return_type); + new_func->return_type = (AstType *) ast_clone(context, func->return_type); new_func->params = NULL; - bh_arr_new(global_heap_allocator, new_func->params, bh_arr_length(func->params)); + bh_arr_new(context->gp_alloc, new_func->params, bh_arr_length(func->params)); bh_arr_each(AstParam, param, func->params) { AstParam new_param; - dont_copy_structs = 1; - new_param.local = (AstLocal *) ast_clone(a, param->local); + context->cloner.dont_copy_structs = 1; + new_param.local = (AstLocal *) ast_clone(context, param->local); new_param.local->flags &= ~Ast_Flag_Param_Symbol_Dirty; - new_param.default_value = (AstTyped *) ast_clone(a, param->default_value); + new_param.default_value = (AstTyped *) ast_clone(context, param->default_value); new_param.use_processed = 0; - dont_copy_structs = 0; + context->cloner.dont_copy_structs = 0; new_param.vararg_kind = param->vararg_kind; new_param.is_used = param->is_used; @@ -739,10 +748,10 @@ AstFunction* clone_function_header(bh_allocator a, AstFunction* func) { if (func->constraints.constraints) { memset(&new_func->constraints, 0, sizeof(ConstraintContext)); - bh_arr_new(global_heap_allocator, new_func->constraints.constraints, bh_arr_length(func->constraints.constraints)); + bh_arr_new(context->gp_alloc, new_func->constraints.constraints, bh_arr_length(func->constraints.constraints)); bh_arr_each(AstConstraint *, constraint, func->constraints.constraints) { - bh_arr_push(new_func->constraints.constraints, (AstConstraint *) ast_clone(a, (AstNode *) *constraint)); + bh_arr_push(new_func->constraints.constraints, (AstConstraint *) ast_clone(context, (AstNode *) *constraint)); } } @@ -751,16 +760,16 @@ AstFunction* clone_function_header(bh_allocator a, AstFunction* func) { // Clones a function body from a given function. It is assumed that `dest` is // a function from `clone_function_header`. -void clone_function_body(bh_allocator a, AstFunction* dest, AstFunction* source) { +void clone_function_body(Context *context, AstFunction* dest, AstFunction* source) { if (dest->kind != Ast_Kind_Function) return; if (source->kind != Ast_Kind_Polymorphic_Proc && source->kind != Ast_Kind_Function) return; dest->nodes_that_need_entities_after_clone = NULL; - bh_arr_new(global_heap_allocator, dest->nodes_that_need_entities_after_clone, 1); - captured_entities = dest->nodes_that_need_entities_after_clone; + bh_arr_new(context->gp_alloc, dest->nodes_that_need_entities_after_clone, 1); + context->cloner.captured_entities = dest->nodes_that_need_entities_after_clone; - dest->body = (AstBlock *) ast_clone(a, source->body); + dest->body = (AstBlock *) ast_clone(context, source->body); - dest->nodes_that_need_entities_after_clone = captured_entities; - captured_entities = NULL; + dest->nodes_that_need_entities_after_clone = context->cloner.captured_entities; + context->cloner.captured_entities = NULL; } diff --git a/compiler/src/doc.c b/compiler/src/doc.c index 84f4539bc..abd912214 100644 --- a/compiler/src/doc.c +++ b/compiler/src/doc.c @@ -2,55 +2,6 @@ #include "utils.h" #include "types.h" -static i32 sort_tags(const void* a, const void* b) { - AstNode *n1 = *(AstNode **) a; - AstNode *n2 = *(AstNode **) b; - - i32 diff; - if ((diff = strncmp(n1->token->text, n2->token->text, n2->token->length))) { - return diff; - } - - return n1->token->length - n2->token->length; -} - - -void onyx_docs_emit_tags(char *dest) { - bh_file tags_file; - if (bh_file_create(&tags_file, dest) != BH_FILE_ERROR_NONE) { - bh_printf("Cannot create '%s'.\n", dest); - return; - } - - bh_fprintf(&tags_file, "!_TAG_FILE_FORMAT\t2\n"); - bh_fprintf(&tags_file, "!_TAG_FILE_SORTED\t1\n"); - bh_fprintf(&tags_file, "!_TAG_OUTPUT_FILESEP\tslash\n"); - bh_fprintf(&tags_file, "!_TAG_OUTPUT_MODE\tu-ctags\n"); - bh_fprintf(&tags_file, "!_TAG_PROGRAM_AUTHOR\tOnyx Compiler\n"); - bh_fprintf(&tags_file, "!_TAG_PROGRAM_NAME\tOnyx Compiler\n"); - bh_fprintf(&tags_file, "!_TAG_PROGRAM_URL\thttps://github.com/onyx-lang/onyx\n"); - bh_fprintf(&tags_file, "!_TAG_PROGRAM_VERSION\t0.1.0\n"); - - qsort(context.tag_locations, bh_arr_length(context.tag_locations), sizeof(AstNode *), sort_tags); - - bh_arr_each(AstNode *, pnode, context.tag_locations) { - AstBinding *node = (AstBinding *) *pnode; - assert(node->kind == Ast_Kind_Binding); - - i32 line_len = 0; - char *c = node->token->pos.line_start; - while (*c++ != '\n') line_len++; - - bh_fprintf(&tags_file, "%b\t%s\t/^%b$/\n", - node->token->text, node->token->length, - node->token->pos.filename, - node->token->pos.line_start, line_len); - - } - - bh_file_close(&tags_file); -} - static i32 sort_symbol_resolutions(const SymbolResolution *a, const SymbolResolution *b) { if (a->file_id != b->file_id) { return a->file_id > b->file_id ? 1 : -1; @@ -63,14 +14,9 @@ static i32 sort_symbol_resolutions(const SymbolResolution *a, const SymbolResolu return a->column > b->column ? 1 : -1; } -void onyx_docs_emit_symbol_info(const char *dest) { - bh_file sym_file; - if (bh_file_create(&sym_file, dest) != BH_FILE_ERROR_NONE) { - bh_printf("Cannot create '%s'.\n", dest); - return; - } - - SymbolInfoTable *syminfo = context.symbol_info; +void onyx_docs_emit_symbol_info(Context *context, bh_buffer *out_buffer) { + SymbolInfoTable *syminfo = context->symbol_info; + if (!syminfo) return; qsort(syminfo->symbols_resolutions, bh_arr_length(syminfo->symbols_resolutions), @@ -78,7 +24,7 @@ void onyx_docs_emit_symbol_info(const char *dest) { (int (*)(const void *, const void*)) sort_symbol_resolutions); bh_buffer file_section; - bh_buffer_init(&file_section, global_heap_allocator, 2048); + bh_buffer_init(&file_section, context->gp_alloc, 2048); fori (i, 0, shlen(syminfo->files)) { char *filename = syminfo->files[i].key; u32 file_id = syminfo->files[i].value; @@ -88,10 +34,10 @@ void onyx_docs_emit_symbol_info(const char *dest) { } bh_buffer sym_def_section; - bh_buffer_init(&sym_def_section, global_heap_allocator, 2048); + bh_buffer_init(&sym_def_section, context->gp_alloc, 2048); bh_buffer docs_section; - bh_buffer_init(&docs_section, global_heap_allocator, 4096); + bh_buffer_init(&docs_section, context->gp_alloc, 4096); bh_arr_each(SymbolInfo, sym, syminfo->symbols) { bh_buffer_write_u32(&sym_def_section, sym->id); @@ -99,12 +45,12 @@ void onyx_docs_emit_symbol_info(const char *dest) { bh_buffer_write_u32(&sym_def_section, sym->line); bh_buffer_write_u32(&sym_def_section, sym->column); - if (context.options->generate_lsp_info_file) { - if (sym->documentation) { + if (context->options->generate_lsp_info_file) { + if (sym->documentation_length > 0) { bh_buffer_write_u32(&sym_def_section, docs_section.length); - bh_buffer_write_u32(&sym_def_section, sym->documentation->length); + bh_buffer_write_u32(&sym_def_section, sym->documentation_length); - bh_buffer_append(&docs_section, sym->documentation->text, sym->documentation->length); + bh_buffer_append(&docs_section, sym->documentation, sym->documentation_length); } else { bh_buffer_write_u32(&sym_def_section, 0); bh_buffer_write_u32(&sym_def_section, 0); @@ -113,7 +59,7 @@ void onyx_docs_emit_symbol_info(const char *dest) { } bh_buffer sym_res_section; - bh_buffer_init(&sym_res_section, global_heap_allocator, 2048); + bh_buffer_init(&sym_res_section, context->gp_alloc, 2048); bh_arr_each(SymbolResolution, sym, syminfo->symbols_resolutions) { bh_buffer_write_u32(&sym_res_section, sym->file_id); bh_buffer_write_u32(&sym_res_section, sym->line); @@ -123,11 +69,11 @@ void onyx_docs_emit_symbol_info(const char *dest) { } bh_buffer header_section; - bh_buffer_init(&header_section, global_heap_allocator, 16); + bh_buffer_init(&header_section, context->gp_alloc, 16); bh_buffer_append(&header_section, "OSYM", 4); u32 header_size = 32; - if (context.options->generate_lsp_info_file) { + if (context->options->generate_lsp_info_file) { bh_buffer_write_u32(&header_section, 2); header_size = 40; } else { @@ -141,28 +87,26 @@ void onyx_docs_emit_symbol_info(const char *dest) { bh_buffer_write_u32(&header_section, header_size + file_section.length + sym_def_section.length); bh_buffer_write_u32(&header_section, bh_arr_length(syminfo->symbols_resolutions)); - if (context.options->generate_lsp_info_file) { + if (context->options->generate_lsp_info_file) { bh_buffer_write_u32(&header_section, header_size + file_section.length + sym_def_section.length + sym_res_section.length); bh_buffer_write_u32(&header_section, docs_section.length); } - bh_file_write(&sym_file, header_section.data, header_section.length); - bh_file_write(&sym_file, file_section.data, file_section.length); - bh_file_write(&sym_file, sym_def_section.data, sym_def_section.length); - bh_file_write(&sym_file, sym_res_section.data, sym_res_section.length); + bh_buffer_init(out_buffer, context->gp_alloc, header_section.length + file_section.length + sym_def_section.length + sym_res_section.length); + bh_buffer_append(out_buffer, header_section.data, header_section.length); + bh_buffer_append(out_buffer, file_section.data, file_section.length); + bh_buffer_append(out_buffer, sym_def_section.data, sym_def_section.length); + bh_buffer_append(out_buffer, sym_res_section.data, sym_res_section.length); - if (context.options->generate_lsp_info_file) { - bh_file_write(&sym_file, docs_section.data, docs_section.length); + if (context->options->generate_lsp_info_file) { + bh_buffer_append(out_buffer, docs_section.data, docs_section.length); } - bh_file_close(&sym_file); - bh_buffer_free(&header_section); bh_buffer_free(&file_section); bh_buffer_free(&sym_def_section); bh_buffer_free(&sym_res_section); - bh_arr_free(syminfo->symbols); bh_arr_free(syminfo->symbols_resolutions); shfree(syminfo->files); @@ -176,6 +120,7 @@ void onyx_docs_emit_symbol_info(const char *dest) { // void onyx_docs_submit(OnyxDocInfo *docs, AstBinding *binding) { + if (!docs) return; if (!binding->entity || !binding->entity->package) return; AstNode *node = binding->node; @@ -264,18 +209,18 @@ static void write_string(bh_buffer *buffer, i32 len, char *data) { bh_buffer_append(buffer, data, len); } -static void write_location(bh_buffer *buffer, OnyxFilePos location) { - if (shgeti(context.doc_info->file_ids, location.filename) == -1) { - shput(context.doc_info->file_ids, location.filename, context.doc_info->next_file_id); - context.doc_info->next_file_id++; +static void write_location(Context *context, bh_buffer *buffer, OnyxFilePos location) { + if (shgeti(context->doc_info->file_ids, location.filename) == -1) { + shput(context->doc_info->file_ids, location.filename, context->doc_info->next_file_id); + context->doc_info->next_file_id++; } - bh_buffer_write_u32(buffer, context.doc_info->file_ids[shgeti(context.doc_info->file_ids, location.filename)].value); + bh_buffer_write_u32(buffer, context->doc_info->file_ids[shgeti(context->doc_info->file_ids, location.filename)].value); bh_buffer_write_u32(buffer, location.line); bh_buffer_write_u32(buffer, location.column); } -static void write_type_node(bh_buffer *buffer, void *vnode) { +static void write_type_node(Context *context, bh_buffer *buffer, void *vnode) { AstNode *node = vnode; if (!node) goto unknown_case; @@ -283,7 +228,7 @@ static void write_type_node(bh_buffer *buffer, void *vnode) { switch (node->kind) { case Ast_Kind_Basic_Type: - if (((AstBasicType *) node)->basic_type == &type_auto_return) { + if (((AstBasicType *) node)->basic_type == context->types.auto_return) { bh_buffer_write_string(buffer, "#auto"); } else { bh_buffer_write_string(buffer, (char *) ((AstBasicType *) node)->basic_type->Basic.name); @@ -292,27 +237,32 @@ static void write_type_node(bh_buffer *buffer, void *vnode) { case Ast_Kind_Address_Of: bh_buffer_write_string(buffer, "&"); - write_type_node(buffer, ((AstAddressOf *) node)->expr); + write_type_node(context, buffer, ((AstAddressOf *) node)->expr); return; case Ast_Kind_Pointer_Type: bh_buffer_write_string(buffer, "&"); - write_type_node(buffer, ((AstPointerType *) node)->elem); + write_type_node(context, buffer, ((AstPointerType *) node)->elem); + return; + + case Ast_Kind_Multi_Pointer_Type: + bh_buffer_write_string(buffer, "[&] "); + write_type_node(context, buffer, ((AstPointerType *) node)->elem); return; case Ast_Kind_Slice_Type: bh_buffer_write_string(buffer, "[] "); - write_type_node(buffer, ((AstSliceType *) node)->elem); + write_type_node(context, buffer, ((AstSliceType *) node)->elem); return; case Ast_Kind_VarArg_Type: bh_buffer_write_string(buffer, ".."); - write_type_node(buffer, ((AstVarArgType *) node)->elem); + write_type_node(context, buffer, ((AstVarArgType *) node)->elem); return; case Ast_Kind_DynArr_Type: bh_buffer_write_string(buffer, "[..] "); - write_type_node(buffer, ((AstDynArrType *) node)->elem); + write_type_node(context, buffer, ((AstDynArrType *) node)->elem); return; case Ast_Kind_Struct_Type: @@ -332,13 +282,13 @@ static void write_type_node(bh_buffer *buffer, void *vnode) { return; case Ast_Kind_Poly_Call_Type: - if (((AstPolyCallType *) node)->callee == (AstType *) builtin_optional_type) { + if (((AstPolyCallType *) node)->callee == (AstType *) context->builtins.optional_type) { bh_buffer_write_string(buffer, "? "); - write_type_node(buffer, ((AstPolyCallType *) node)->params[0]); + write_type_node(context, buffer, ((AstPolyCallType *) node)->params[0]); return; } - write_type_node(buffer, ((AstPolyCallType *) node)->callee); + write_type_node(context, buffer, ((AstPolyCallType *) node)->callee); if (node->flags & Ast_Flag_Poly_Call_From_Auto) return; bh_buffer_write_byte(buffer, '('); @@ -348,7 +298,7 @@ static void write_type_node(bh_buffer *buffer, void *vnode) { bh_buffer_write_string(buffer, ", "); } - write_type_node(buffer, *param); + write_type_node(context, buffer, *param); } bh_buffer_write_byte(buffer, ')'); @@ -362,7 +312,7 @@ static void write_type_node(bh_buffer *buffer, void *vnode) { bh_buffer_write_string(buffer, ", "); } - write_type_node(buffer, *type); + write_type_node(context, buffer, *type); } bh_buffer_write_byte(buffer, ')'); @@ -376,32 +326,33 @@ static void write_type_node(bh_buffer *buffer, void *vnode) { bh_buffer_write_string(buffer, ", "); } - write_type_node(buffer, ((AstFunctionType *) node)->params[i]); + write_type_node(context, buffer, ((AstFunctionType *) node)->params[i]); } bh_buffer_write_string(buffer, ") -> "); - write_type_node(buffer, ((AstFunctionType *) node)->return_type); + write_type_node(context, buffer, ((AstFunctionType *) node)->return_type); return; case Ast_Kind_Field_Access: - write_type_node(buffer, ((AstFieldAccess *) node)->expr); + write_type_node(context, buffer, ((AstFieldAccess *) node)->expr); bh_buffer_write_byte(buffer, '.'); bh_buffer_append(buffer, node->token->text, node->token->length); return; case Ast_Kind_Typeof: bh_buffer_write_string(buffer, (char *) type_get_name( - type_build_from_ast(context.ast_alloc, (AstType *) node) + context, + type_build_from_ast(context, (AstType *) node) )); return; case Ast_Kind_Alias: - write_type_node(buffer, ((AstAlias *) node)->alias); + write_type_node(context, buffer, ((AstAlias *) node)->alias); return; case Ast_Kind_Type_Alias: - write_type_node(buffer, ((AstTypeAlias *) node)->to); + write_type_node(context, buffer, ((AstTypeAlias *) node)->to); return; case Ast_Kind_Symbol: @@ -420,14 +371,22 @@ static void write_type_node(bh_buffer *buffer, void *vnode) { } static void write_doc_notes(bh_buffer *buffer, AstBinding *binding) { - if (!binding || !binding->documentation) { - write_cstring(buffer, ""); - } else { - write_string(buffer, binding->documentation->length, binding->documentation->text); + if (binding) { + if (binding->documentation_token_old) { + write_string(buffer, binding->documentation_token_old->length, binding->documentation_token_old->text); + return; + } + + if (binding->documentation_string) { + write_string(buffer, strlen(binding->documentation_string), (char *) binding->documentation_string); + return; + } } + + write_cstring(buffer, ""); } -static void write_entity_header(bh_buffer *buffer, AstBinding *binding, OnyxFilePos location) { +static void write_entity_header(Context *context, bh_buffer *buffer, AstBinding *binding, OnyxFilePos location) { if (!binding) { bh_buffer_write_u32(buffer, 0); bh_buffer_write_u32(buffer, 1); @@ -448,17 +407,17 @@ static void write_entity_header(bh_buffer *buffer, AstBinding *binding, OnyxFile } // Location - write_location(buffer, location); + write_location(context, buffer, location); // Notes write_doc_notes(buffer, binding); } -static b32 write_doc_procedure(bh_buffer *buffer, AstBinding *binding, AstNode *proc); +static b32 write_doc_procedure(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *proc); -static void write_doc_constraints(bh_buffer *buffer, ConstraintContext *constraints, bh_arr(AstPolyParam) poly_params) { +static void write_doc_constraints(Context *context, bh_buffer *buffer, ConstraintContext *constraints, bh_arr(AstPolyParam) poly_params) { bh_buffer tmp_buffer; - bh_buffer_init(&tmp_buffer, global_scratch_allocator, 256); + bh_buffer_init(&tmp_buffer, context->scratch_alloc, 256); u32 constraint_count_patch = buffer->length; bh_buffer_write_u32(buffer, 0); @@ -468,7 +427,7 @@ static void write_doc_constraints(bh_buffer *buffer, ConstraintContext *constrai AstConstraint *constraint = *pconstraint; bh_buffer_clear(&tmp_buffer); - write_type_node(&tmp_buffer, constraint->interface); + write_type_node(context, &tmp_buffer, constraint->interface); bh_buffer_write_string(&tmp_buffer, "("); bh_arr_each(AstTyped *, ptype_arg, constraint->args) { @@ -477,7 +436,7 @@ static void write_doc_constraints(bh_buffer *buffer, ConstraintContext *constrai } AstTyped *type_arg = *ptype_arg; - write_type_node(&tmp_buffer, type_arg); + write_type_node(context, &tmp_buffer, type_arg); } bh_buffer_write_string(&tmp_buffer, ")"); @@ -491,11 +450,11 @@ static void write_doc_constraints(bh_buffer *buffer, ConstraintContext *constrai if (!poly_param->implicit_interface) continue; bh_buffer_clear(&tmp_buffer); - write_type_node(&tmp_buffer, poly_param->implicit_interface); + write_type_node(context, &tmp_buffer, poly_param->implicit_interface); bh_buffer_write_string(&tmp_buffer, "("); poly_param->poly_sym->flags &= ~Ast_Flag_Symbol_Is_PolyVar; - write_type_node(&tmp_buffer, poly_param->poly_sym); + write_type_node(context, &tmp_buffer, poly_param->poly_sym); poly_param->poly_sym->flags |= Ast_Flag_Symbol_Is_PolyVar; bh_buffer_write_string(&tmp_buffer, ")"); @@ -510,7 +469,7 @@ static void write_doc_constraints(bh_buffer *buffer, ConstraintContext *constrai bh_buffer_free(&tmp_buffer); } -static void write_doc_methods(bh_buffer *buffer, Scope *method_scope) { +static void write_doc_methods(Context *context, bh_buffer *buffer, Scope *method_scope) { u32 count_patch = buffer->length; bh_buffer_write_u32(buffer, 0); @@ -548,7 +507,7 @@ static void write_doc_methods(bh_buffer *buffer, Scope *method_scope) { binding->token = &tmp_name_token; method_count++; - write_doc_procedure(buffer, binding, (AstNode *) node); + write_doc_procedure(context, buffer, binding, (AstNode *) node); binding->token = old_token; } @@ -556,13 +515,13 @@ static void write_doc_methods(bh_buffer *buffer, Scope *method_scope) { *((u32 *) bh_pointer_add(buffer->data, count_patch)) = method_count; } -static b32 write_doc_function(bh_buffer *buffer, AstBinding *binding, AstNode *proc) { +static b32 write_doc_function(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *proc) { AstFunction *func = (void *) proc; if (func->kind == Ast_Kind_Macro) { func = (void *) ((AstMacro *) proc)->body; } - write_entity_header(buffer, binding, func->token->pos); + write_entity_header(context, buffer, binding, func->token->pos); // Flags bh_buffer_write_u32(buffer, proc->kind == Ast_Kind_Macro ? Doc_Procedure_Flag_Macro : 0); @@ -571,12 +530,12 @@ static b32 write_doc_function(bh_buffer *buffer, AstBinding *binding, AstNode *p bh_buffer_write_u32(buffer, bh_arr_length(func->params)); bh_arr_each(AstParam, param, func->params) { write_string(buffer, param->local->token->length, param->local->token->text); - write_cstring(buffer, type_get_name(param->local->type)); + write_cstring(buffer, type_get_name(context, param->local->type)); write_cstring(buffer, ""); } // Return type - write_cstring(buffer, type_get_name(func->type->Function.return_type)); + write_cstring(buffer, type_get_name(context, func->type->Function.return_type)); // Overload procs bh_buffer_write_u32(buffer, 0); @@ -587,14 +546,14 @@ static b32 write_doc_function(bh_buffer *buffer, AstBinding *binding, AstNode *p return 1; } -static b32 write_doc_overloaded_function(bh_buffer *buffer, AstBinding *binding, AstNode *proc) { +static b32 write_doc_overloaded_function(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *proc) { AstOverloadedFunction *ofunc = (void *) proc; bh_imap all_overloads; - bh_imap_init(&all_overloads, global_heap_allocator, bh_arr_length(ofunc->overloads) * 2); + bh_imap_init(&all_overloads, context->gp_alloc, bh_arr_length(ofunc->overloads) * 2); build_all_overload_options(ofunc->overloads, &all_overloads); - write_entity_header(buffer, binding, ofunc->token->pos); + write_entity_header(context, buffer, binding, ofunc->token->pos); // Flags bh_buffer_write_u32(buffer, Doc_Procedure_Flag_Overloaded); @@ -610,7 +569,7 @@ static b32 write_doc_overloaded_function(bh_buffer *buffer, AstBinding *binding, bh_arr_each(bh__imap_entry, entry, all_overloads.entries) { AstNode* node = strip_aliases((AstNode *) entry->key); - if (write_doc_procedure(buffer, NULL, node)) { + if (write_doc_procedure(context, buffer, NULL, node)) { proc_count += 1; } } @@ -624,13 +583,13 @@ static b32 write_doc_overloaded_function(bh_buffer *buffer, AstBinding *binding, return 1; } -static b32 write_doc_polymorphic_proc(bh_buffer *buffer, AstBinding *binding, AstNode *proc) { +static b32 write_doc_polymorphic_proc(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *proc) { AstFunction *func = (void *) proc; if (func->kind == Ast_Kind_Macro) { func = (void *) ((AstMacro *) proc)->body; } - write_entity_header(buffer, binding, func->token->pos); + write_entity_header(context, buffer, binding, func->token->pos); // Flags bh_buffer_write_u32(buffer, proc->kind == Ast_Kind_Macro ? Doc_Procedure_Flag_Macro : 0); @@ -638,7 +597,7 @@ static b32 write_doc_polymorphic_proc(bh_buffer *buffer, AstBinding *binding, As // Parameter types bh_buffer param_type_buf; - bh_buffer_init(¶m_type_buf, global_scratch_allocator, 256); + bh_buffer_init(¶m_type_buf, context->scratch_alloc, 256); bh_buffer_write_u32(buffer, bh_arr_length(func->params)); bh_arr_each(AstParam, param, func->params) { @@ -649,7 +608,7 @@ static b32 write_doc_polymorphic_proc(bh_buffer *buffer, AstBinding *binding, As else write_string(buffer, param->local->token->length, param->local->token->text); - write_type_node(¶m_type_buf, param->local->type_node); + write_type_node(context, ¶m_type_buf, param->local->type_node); write_string(buffer, param_type_buf.length, (char *) param_type_buf.data); write_cstring(buffer, ""); } @@ -657,7 +616,7 @@ static b32 write_doc_polymorphic_proc(bh_buffer *buffer, AstBinding *binding, As // Return type bh_buffer_clear(¶m_type_buf); - write_type_node(¶m_type_buf, func->return_type); + write_type_node(context, ¶m_type_buf, func->return_type); write_string(buffer, param_type_buf.length, (char *) param_type_buf.data); bh_buffer_free(¶m_type_buf); @@ -665,40 +624,40 @@ static b32 write_doc_polymorphic_proc(bh_buffer *buffer, AstBinding *binding, As bh_buffer_write_u32(buffer, 0); // Constraints - write_doc_constraints(buffer, &func->constraints, func->poly_params); + write_doc_constraints(context, buffer, &func->constraints, func->poly_params); return 1; } -static b32 write_doc_procedure(bh_buffer *buffer, AstBinding *binding, AstNode *proc) { +static b32 write_doc_procedure(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *proc) { if (proc->kind == Ast_Kind_Function) { - return write_doc_function(buffer, binding, proc); + return write_doc_function(context, buffer, binding, proc); } else if (proc->kind == Ast_Kind_Macro) { AstMacro *macro = (void *) proc; if (macro->body->kind == Ast_Kind_Function) - return write_doc_function(buffer, binding, proc); + return write_doc_function(context, buffer, binding, proc); else - return write_doc_polymorphic_proc(buffer, binding, proc); + return write_doc_polymorphic_proc(context, buffer, binding, proc); } else if (proc->kind == Ast_Kind_Overloaded_Function) { - return write_doc_overloaded_function(buffer, binding, proc); + return write_doc_overloaded_function(context, buffer, binding, proc); } else if (proc->kind == Ast_Kind_Polymorphic_Proc) { - return write_doc_polymorphic_proc(buffer, binding, proc); + return write_doc_polymorphic_proc(context, buffer, binding, proc); } return 0; } -static b32 write_doc_structure(bh_buffer *buffer, AstBinding *binding, AstNode *node) { +static b32 write_doc_structure(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *node) { Scope *method_scope = NULL; if (node->kind == Ast_Kind_Struct_Type) { AstStructType *struct_node = (void *) node; - method_scope = get_scope_from_node((AstNode *) struct_node); + method_scope = get_scope_from_node(context, (AstNode *) struct_node); - write_entity_header(buffer, binding, node->token->pos); + write_entity_header(context, buffer, binding, node->token->pos); Type *struct_type = struct_node->stcache; assert(struct_type); @@ -708,7 +667,7 @@ static b32 write_doc_structure(bh_buffer *buffer, AstBinding *binding, AstNode * StructMember* mem = *pmem; write_cstring(buffer, mem->name); - write_cstring(buffer, type_get_name(mem->type)); + write_cstring(buffer, type_get_name(context, mem->type)); write_cstring(buffer, ""); bh_buffer_write_u32(buffer, 0); @@ -718,25 +677,25 @@ static b32 write_doc_structure(bh_buffer *buffer, AstBinding *binding, AstNode * bh_buffer_write_u32(buffer, 0); // Constraints - write_doc_constraints(buffer, &struct_node->constraints, NULL); + write_doc_constraints(context, buffer, &struct_node->constraints, NULL); } else if (node->kind == Ast_Kind_Poly_Struct_Type) { AstPolyStructType *poly_struct_node = (void *) node; - method_scope = get_scope_from_node((AstNode *) poly_struct_node); + method_scope = get_scope_from_node(context, (AstNode *) poly_struct_node); AstStructType *struct_node = poly_struct_node->base_struct; - write_entity_header(buffer, binding, node->token->pos); + write_entity_header(context, buffer, binding, node->token->pos); bh_buffer type_buf; - bh_buffer_init(&type_buf, global_scratch_allocator, 256); + bh_buffer_init(&type_buf, context->scratch_alloc, 256); bh_buffer_write_u32(buffer, bh_arr_length(struct_node->members)); bh_arr_each(AstStructMember *, psmem, struct_node->members) { AstStructMember *smem = *psmem; bh_buffer_clear(&type_buf); - write_type_node(&type_buf, smem->type_node); + write_type_node(context, &type_buf, smem->type_node); write_string(buffer, smem->token->length, smem->token->text); write_string(buffer, type_buf.length, (char *) type_buf.data); @@ -749,7 +708,7 @@ static b32 write_doc_structure(bh_buffer *buffer, AstBinding *binding, AstNode * bh_buffer_write_u32(buffer, bh_arr_length(poly_struct_node->poly_params)); bh_arr_each(AstPolyStructParam, param, poly_struct_node->poly_params) { bh_buffer_clear(&type_buf); - write_type_node(&type_buf, param->type_node); + write_type_node(context, &type_buf, param->type_node); write_string(buffer, param->token->length, param->token->text); write_string(buffer, type_buf.length, (char *) type_buf.data); @@ -757,24 +716,24 @@ static b32 write_doc_structure(bh_buffer *buffer, AstBinding *binding, AstNode * } // Constraints - write_doc_constraints(buffer, &struct_node->constraints, NULL); + write_doc_constraints(context, buffer, &struct_node->constraints, NULL); bh_buffer_free(&type_buf); } - write_doc_methods(buffer, method_scope); + write_doc_methods(context, buffer, method_scope); return 1; } -static b32 write_doc_union_type(bh_buffer *buffer, AstBinding *binding, AstNode *node) { +static b32 write_doc_union_type(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *node) { Scope *method_scope = NULL; if (node->kind == Ast_Kind_Union_Type) { AstUnionType *union_node = (void *) node; - method_scope = get_scope_from_node((AstNode *) union_node); + method_scope = get_scope_from_node(context, (AstNode *) union_node); - write_entity_header(buffer, binding, node->token->pos); + write_entity_header(context, buffer, binding, node->token->pos); Type *union_type = union_node->utcache; assert(union_type); @@ -784,32 +743,32 @@ static b32 write_doc_union_type(bh_buffer *buffer, AstBinding *binding, AstNode UnionVariant* uv = *puv; write_cstring(buffer, uv->name); - write_cstring(buffer, type_get_name(uv->type)); + write_cstring(buffer, type_get_name(context, uv->type)); } // Polymorph parameters bh_buffer_write_u32(buffer, 0); // Constraints - write_doc_constraints(buffer, &union_node->constraints, NULL); + write_doc_constraints(context, buffer, &union_node->constraints, NULL); } else if (node->kind == Ast_Kind_Poly_Union_Type) { AstPolyUnionType *poly_union_node = (void *) node; - method_scope = get_scope_from_node((AstNode *) poly_union_node); + method_scope = get_scope_from_node(context, (AstNode *) poly_union_node); AstUnionType *union_node = poly_union_node->base_union; - write_entity_header(buffer, binding, node->token->pos); + write_entity_header(context, buffer, binding, node->token->pos); bh_buffer type_buf; - bh_buffer_init(&type_buf, global_scratch_allocator, 256); + bh_buffer_init(&type_buf, context->scratch_alloc, 256); bh_buffer_write_u32(buffer, bh_arr_length(union_node->variants)); bh_arr_each(AstUnionVariant*, puv, union_node->variants) { AstUnionVariant* uv = *puv; bh_buffer_clear(&type_buf); - write_type_node(&type_buf, uv->type_node); + write_type_node(context, &type_buf, uv->type_node); write_string(buffer, uv->token->length, uv->token->text); write_string(buffer, type_buf.length, (char *) type_buf.data); @@ -819,7 +778,7 @@ static b32 write_doc_union_type(bh_buffer *buffer, AstBinding *binding, AstNode bh_buffer_write_u32(buffer, bh_arr_length(poly_union_node->poly_params)); bh_arr_each(AstPolyStructParam, param, poly_union_node->poly_params) { bh_buffer_clear(&type_buf); - write_type_node(&type_buf, param->type_node); + write_type_node(context, &type_buf, param->type_node); write_string(buffer, param->token->length, param->token->text); write_string(buffer, type_buf.length, (char *) type_buf.data); @@ -827,20 +786,20 @@ static b32 write_doc_union_type(bh_buffer *buffer, AstBinding *binding, AstNode } // Constraints - write_doc_constraints(buffer, &union_node->constraints, NULL); + write_doc_constraints(context, buffer, &union_node->constraints, NULL); bh_buffer_free(&type_buf); } - write_doc_methods(buffer, method_scope); + write_doc_methods(context, buffer, method_scope); return 1; } -static b32 write_doc_enum(bh_buffer *buffer, AstBinding *binding, AstNode *node) { +static b32 write_doc_enum(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *node) { AstEnumType *enum_node = (void *) node; - write_entity_header(buffer, binding, node->token->pos); + write_entity_header(context, buffer, binding, node->token->pos); bh_buffer_write_u32(buffer, bh_arr_length(enum_node->values)); bh_arr_each(AstEnumValue *, pvalue, enum_node->values) { @@ -858,24 +817,24 @@ static b32 write_doc_enum(bh_buffer *buffer, AstBinding *binding, AstNode *node) return 1; } -static b32 write_doc_distinct_type(bh_buffer *buffer, AstBinding *binding, AstNode *node) { +static b32 write_doc_distinct_type(Context *context, bh_buffer *buffer, AstBinding *binding, AstNode *node) { AstDistinctType *distinct_node = (void *) node; - write_entity_header(buffer, binding, node->token->pos); + write_entity_header(context, buffer, binding, node->token->pos); bh_buffer type_buf; - bh_buffer_init(&type_buf, global_scratch_allocator, 256); - write_type_node(&type_buf, distinct_node->base_type); + bh_buffer_init(&type_buf, context->scratch_alloc, 256); + write_type_node(context, &type_buf, distinct_node->base_type); write_string(buffer, type_buf.length, (char *) type_buf.data); bh_buffer_free(&type_buf); - write_doc_methods(buffer, distinct_node->scope); + write_doc_methods(context, buffer, distinct_node->scope); return 1; } -static void write_doc_entity_array(bh_buffer *buffer, bh_arr(AstBinding *) arr, - b32 (*write_doc)(bh_buffer *buffer, AstBinding *, AstNode*), +static void write_doc_entity_array(Context *context, bh_buffer *buffer, bh_arr(AstBinding *) arr, + b32 (*write_doc)(Context *context, bh_buffer *buffer, AstBinding *, AstNode*), u32 offset_write_location) { *((u32 *) bh_pointer_add(buffer->data, offset_write_location)) = buffer->length; @@ -884,7 +843,7 @@ static void write_doc_entity_array(bh_buffer *buffer, bh_arr(AstBinding *) arr, u32 count = 0; bh_arr_each(AstBinding *, pbind, arr) { - if (write_doc(buffer, *pbind, (*pbind)->node)) { + if (write_doc(context, buffer, *pbind, (*pbind)->node)) { count++; } } @@ -892,21 +851,20 @@ static void write_doc_entity_array(bh_buffer *buffer, bh_arr(AstBinding *) arr, *((u32 *) bh_pointer_add(buffer->data, count_patch)) = count; } -void onyx_docs_emit_odoc(const char *dest) { - bh_file doc_file; - if (bh_file_create(&doc_file, dest) != BH_FILE_ERROR_NONE) { - bh_printf("Cannot create '%s'.\n", dest); +void onyx_docs_generate_odoc(Context *context, bh_buffer *out_buffer) { + if (!context->doc_info) { + out_buffer->data = NULL; + out_buffer->length = 0; return; } - bh_buffer doc_buffer; - bh_buffer_init(&doc_buffer, global_heap_allocator, 16 * 1024); + bh_buffer_init(&doc_buffer, context->gp_alloc, 16 * 1024); bh_buffer_append(&doc_buffer, Doc_Magic_Bytes, 4); bh_buffer_write_u32(&doc_buffer, 1); - const char *program_name = context.options->target_file; + const char *program_name = "out.wasm"; write_cstring(&doc_buffer, program_name); bh_buffer_write_u32(&doc_buffer, bh_time_curr() / 1000); @@ -925,7 +883,7 @@ void onyx_docs_emit_odoc(const char *dest) { // *((u32 *) bh_pointer_add(doc_buffer.data, offset_table_index + 0)) = doc_buffer.length; - Table(Package *) packages = (void *) context.packages; + Table(Package *) packages = (void *) context->packages; bh_buffer_write_u32(&doc_buffer, shlenu(packages)); fori (i, 0, shlen(packages)) { char *package_qualified_name = packages[i].key; @@ -945,41 +903,44 @@ void onyx_docs_emit_odoc(const char *dest) { bh_buffer_write_u32(&doc_buffer, (u32) p->sub_packages[j] - 1); } - bh_buffer_write_u32(&doc_buffer, bh_arr_length(p->doc_strings)); - bh_arr_each(OnyxToken *, ptkn, p->doc_strings) { + bh_buffer_write_u32(&doc_buffer, bh_arr_length(p->doc_strings) + bh_arr_length(p->doc_string_tokens)); + bh_arr_each(OnyxToken *, ptkn, p->doc_string_tokens) { OnyxToken *tkn = *ptkn; write_string(&doc_buffer, tkn->length, tkn->text); } + bh_arr_each(const char *, pstr, p->doc_strings) { + write_cstring(&doc_buffer, *pstr); + } } // // Procedure Info // - write_doc_entity_array(&doc_buffer, context.doc_info->procedures, write_doc_procedure, offset_table_index + 4); + write_doc_entity_array(context, &doc_buffer, context->doc_info->procedures, write_doc_procedure, offset_table_index + 4); // // Structure Info // - write_doc_entity_array(&doc_buffer, context.doc_info->structures, write_doc_structure, offset_table_index + 8); + write_doc_entity_array(context, &doc_buffer, context->doc_info->structures, write_doc_structure, offset_table_index + 8); // // Enum Info // - write_doc_entity_array(&doc_buffer, context.doc_info->enumerations, write_doc_enum, offset_table_index + 12); + write_doc_entity_array(context, &doc_buffer, context->doc_info->enumerations, write_doc_enum, offset_table_index + 12); // // Distinct Types Info // - write_doc_entity_array(&doc_buffer, context.doc_info->distinct_types, write_doc_distinct_type, offset_table_index + 16); + write_doc_entity_array(context, &doc_buffer, context->doc_info->distinct_types, write_doc_distinct_type, offset_table_index + 16); // // Union Info // - write_doc_entity_array(&doc_buffer, context.doc_info->unions, write_doc_union_type, offset_table_index + 20); + write_doc_entity_array(context, &doc_buffer, context->doc_info->unions, write_doc_union_type, offset_table_index + 20); // @@ -987,17 +948,15 @@ void onyx_docs_emit_odoc(const char *dest) { // *((u32 *) bh_pointer_add(doc_buffer.data, offset_table_index + 24)) = doc_buffer.length; - bh_buffer_write_u32(&doc_buffer, shlenu(context.doc_info->file_ids)); - fori (i, 0, shlen(context.doc_info->file_ids)) { - const char *key = context.doc_info->file_ids[i].key; + bh_buffer_write_u32(&doc_buffer, shlenu(context->doc_info->file_ids)); + fori (i, 0, shlen(context->doc_info->file_ids)) { + const char *key = context->doc_info->file_ids[i].key; bh_buffer_write_u32(&doc_buffer, 0); write_cstring(&doc_buffer, key); } - - bh_file_write(&doc_file, doc_buffer.data, doc_buffer.length); - bh_file_close(&doc_file); + *out_buffer = doc_buffer; } diff --git a/compiler/src/entities.c b/compiler/src/entities.c index 79396279d..858d24580 100644 --- a/compiler/src/entities.c +++ b/compiler/src/entities.c @@ -70,8 +70,13 @@ static void eh_shift_down(EntityHeap* entities, i32 index) { } } -void entity_heap_init(EntityHeap* entities) { - bh_arena_init(&entities->entity_arena, global_heap_allocator, 32 * 1024); +void entity_heap_init(bh_allocator a, EntityHeap* entities) { + memset(entities, 0, sizeof(*entities)); + entities->allocator = a; + + bh_arena_init(&entities->entity_arena, a, 32 * 1024); + bh_arr_new(a, entities->entities, 128); + bh_arr_new(a, entities->quick_unsorted_entities, 128); } // Allocates the entity in the entity heap. Don't quite feel this is necessary... @@ -79,7 +84,7 @@ Entity* entity_heap_register(EntityHeap* entities, Entity e) { bh_allocator alloc = bh_arena_allocator(&entities->entity_arena); Entity* entity = bh_alloc_item(alloc, Entity); *entity = e; - entity->id = context.next_entity_id++; + entity->id = entities->next_id++; entity->macro_attempts = 0; entity->micro_attempts = 0; entity->entered_in_queue = 0; @@ -90,11 +95,6 @@ Entity* entity_heap_register(EntityHeap* entities, Entity e) { void entity_heap_insert_existing(EntityHeap* entities, Entity* e) { if (e->entered_in_queue) return; - if (entities->entities == NULL) { - bh_arr_new(global_heap_allocator, entities->entities, 128); - bh_arr_new(global_heap_allocator, entities->quick_unsorted_entities, 128); - } - if (e->state <= Entity_State_Introduce_Symbols) { bh_arr_push(entities->quick_unsorted_entities, e); } else { @@ -172,8 +172,8 @@ void entity_change_state(EntityHeap* entities, Entity *ent, EntityState new_stat ent->state = new_state; } -void entity_heap_add_job(EntityHeap *entities, TypeMatch (*func)(void *), void *job_data) { - EntityJobData *job = bh_alloc(global_heap_allocator, sizeof(*job)); +void entity_heap_add_job(EntityHeap *entities, TypeMatch (*func)(Context *, void *), void *job_data) { + EntityJobData *job = bh_alloc(entities->allocator, sizeof(*job)); job->func = func; job->job_data = job_data; @@ -186,7 +186,7 @@ void entity_heap_add_job(EntityHeap *entities, TypeMatch (*func)(void *), void * } // NOTE(Brendan Hansen): Uses the entity heap in the context structure -void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* scope, Package* package) { +void add_entities_for_node(EntityHeap *entities, bh_arr(Entity *) *target_arr, AstNode* node, Scope* scope, Package* package) { #define ENTITY_INSERT(_ent) \ entity = entity_heap_register(entities, _ent); \ if (target_arr) { \ @@ -199,12 +199,11 @@ void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* s if (node->entity != NULL) return; - EntityHeap* entities = &context.entities; Entity* entity; Entity ent; ent.id = entities->next_id++; - ent.state = Entity_State_Resolve_Symbols; + ent.state = Entity_State_Check_Types; ent.package = package; ent.scope = scope; @@ -363,7 +362,6 @@ void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* s } case Ast_Kind_Static_If: { - ent.state = Entity_State_Resolve_Symbols; ent.type = Entity_Type_Static_If; ent.static_if = (AstIf *) node; ENTITY_INSERT(ent); @@ -384,6 +382,7 @@ void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* s case Ast_Kind_Directive_Init: case Ast_Kind_Directive_Library: case Ast_Kind_Directive_This_Package: + case Ast_Kind_Directive_Wasm_Section: case Ast_Kind_Injection: { ent.type = Entity_Type_Process_Directive; ent.expr = (AstTyped *) node; @@ -394,7 +393,6 @@ void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* s case Ast_Kind_Interface: { ent.type = Entity_Type_Interface; ent.interface = (AstInterface *) node; - ent.state = Entity_State_Resolve_Symbols; ENTITY_INSERT(ent); break; } @@ -402,7 +400,6 @@ void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* s case Ast_Kind_Constraint: { ent.type = Entity_Type_Constraint_Check; ent.constraint = (AstConstraint *) node; - ent.state = Entity_State_Resolve_Symbols; ENTITY_INSERT(ent); break; } @@ -410,7 +407,6 @@ void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* s case Ast_Kind_Foreign_Block: { ent.type = Entity_Type_Foreign_Block; ent.foreign_block = (AstForeignBlock *) node; - ent.state = Entity_State_Resolve_Symbols; ENTITY_INSERT(ent); break; } @@ -418,7 +414,27 @@ void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* s case Ast_Kind_Import: { ent.type = Entity_Type_Import; ent.import = (AstImport *) node; - ent.state = Entity_State_Resolve_Symbols; + ENTITY_INSERT(ent); + break; + } + + case Ast_Kind_Js_Code: { + ent.type = Entity_Type_JS; + ent.js = (AstJsNode *) node; + ENTITY_INSERT(ent); + break; + } + + case Ast_Kind_Compiler_Extension: { + ent.type = Entity_Type_Compiler_Extension; + ent.compiler_extension = (AstCompilerExtension *) node; + ENTITY_INSERT(ent); + break; + } + + case Ast_Kind_Procedural_Expansion: { + ent.type = Entity_Type_Procedural_Expansion; + ent.proc_expansion = (AstProceduralExpansion *) node; ENTITY_INSERT(ent); break; } diff --git a/compiler/src/errors.c b/compiler/src/errors.c index 951d8879e..8984ce772 100644 --- a/compiler/src/errors.c +++ b/compiler/src/errors.c @@ -1,158 +1,13 @@ #include "errors.h" #include "utils.h" -void onyx_errors_init(bh_arr(bh_file_contents)* files) { - context.errors.file_contents = files; +void onyx_errors_init(Context *context, bh_arr(bh_file_contents)* files) { + context->errors.file_contents = files; - bh_arena_init(&context.errors.msg_arena, global_heap_allocator, 16 * 1024); - context.errors.msg_alloc = bh_arena_allocator(&context.errors.msg_arena); + bh_arena_init(&context->errors.msg_arena, context->gp_alloc, 16 * 1024); + context->errors.msg_alloc = bh_arena_allocator(&context->errors.msg_arena); - bh_arr_new(global_heap_allocator, context.errors.errors, 4); -} - -static void print_error_text(char *text) { - if (context.options->no_colors) { - bh_printf("%s", text); - return; - } - - char *ch = text; - b32 in_color = 0; - - while (*ch != '\0') { - if (*ch == '\'') { - in_color = !in_color; - if (in_color) bh_printf("\033[92m"); - else bh_printf("\033[0m"); - } else { - bh_printf("%c", *ch); - } - - ch++; - } -} - -static void print_underline(OnyxError *err, i32 len, i32 first_non_whitespace, b32 colored_printing) { - char* pointer_str = bh_alloc_array(global_scratch_allocator, char, len); - memset(pointer_str, ' ', len); - memcpy(pointer_str - 1, err->pos.line_start, first_non_whitespace); - memset(pointer_str + first_non_whitespace - 1, ' ', err->pos.column - first_non_whitespace); - memset(pointer_str + err->pos.column - 1, '~', err->pos.length - 1); - pointer_str[err->pos.column - 2] = '^'; - pointer_str[err->pos.column + err->pos.length - 1] = 0; - - if (colored_printing) bh_printf("\033[91m"); - bh_printf("%s\n", pointer_str); - if (colored_printing) bh_printf("\033[0m\n"); -} - -static void print_detailed_message_v1(OnyxError *err, bh_file_contents* fc, b32 colored_printing) { - bh_printf("(%s:%l,%l) %s\n", err->pos.filename, err->pos.line, err->pos.column, err->text); - - i32 linelength = 0; - i32 first_char = 0; - char* walker = err->pos.line_start; - while (*walker == ' ' || *walker == '\t') first_char++, linelength++, walker++; - while (*walker != '\n') linelength++, walker++; - - if (colored_printing) bh_printf("\033[90m"); - i32 numlen = bh_printf(" %d | ", err->pos.line); - if (colored_printing) bh_printf("\033[94m"); - bh_printf("%b\n", err->pos.line_start, linelength); - - fori (i, 0, numlen) bh_printf(" "); - print_underline(err, linelength, first_char, colored_printing); -} - -static void print_detailed_message_v2(OnyxError* err, bh_file_contents* fc, b32 colored_printing) { - if (colored_printing) { - switch (err->rank) { - case Error_Warning: - bh_printf("\033[93mwarning\033[0m: "); - print_error_text(err->text); - bh_printf("\n\033[90m at: %s:%l,%l\033[0m\n", err->pos.filename, err->pos.line, err->pos.column); - break; - - default: - bh_printf("\033[91merror\033[0m: "); - print_error_text(err->text); - bh_printf("\n\033[90m at: %s:%l,%l\033[0m\n", err->pos.filename, err->pos.line, err->pos.column); - break; - } - } else { - switch (err->rank) { - case Error_Warning: - bh_printf("warning: "); - print_error_text(err->text); - bh_printf("\n at: %s:%l,%l\n", err->pos.filename, err->pos.line, err->pos.column); - break; - - default: - bh_printf("error: "); - print_error_text(err->text); - bh_printf("\n at: %s:%l,%l\n", err->pos.filename, err->pos.line, err->pos.column); - break; - } - } - - i32 linelength = 0; - i32 first_char = 0; - char* walker = err->pos.line_start; - while (*walker == ' ' || *walker == '\t') first_char++, linelength++, walker++; - while (*walker != '\n') linelength++, walker++; - - char numbuf[32]; - i32 numlen = bh_snprintf(numbuf, 31, " %d | ", err->pos.line); - - if (colored_printing) bh_printf("\033[90m"); - fori (i, 0, numlen - 3) bh_printf(" "); - bh_printf("|\n%s", numbuf); - if (colored_printing) bh_printf("\033[94m"); - - bh_printf("%b\n", err->pos.line_start, linelength); - - if (colored_printing) bh_printf("\033[90m"); - fori (i, 0, numlen - 3) bh_printf(" "); - bh_printf("| "); - if (colored_printing) bh_printf("\033[94m"); - - print_underline(err, linelength, first_char, colored_printing); -} - -static void print_detailed_message(OnyxError* err, bh_file_contents* fc) { - b32 colored_printing = 0; - #if defined(_BH_LINUX) || defined(_BH_DARWIN) - colored_printing = !context.options->no_colors; - #endif - - if (!err->pos.filename) { - // This makes the assumption that if a file is not specified for an error, - // the error must have come from the command line. - - if (colored_printing) { - bh_printf("\033[91merror\033[0m: "); - bh_printf("%s\n", err->text); - bh_printf("\033[90m at: command line argument\033[0m\n"); - } else { - bh_printf("error: "); - bh_printf("%s\n", err->text); - bh_printf(" at: command line argument\n"); - } - - return; - } - - char *error_format = context.options->error_format; - - if (!strcmp(error_format, "v2")) { - print_detailed_message_v2(err, fc, colored_printing); - } - else if (!strcmp(error_format, "v1")) { - print_detailed_message_v1(err, fc, colored_printing); - } - else { - bh_printf("Unknown error format: '%s'.\n", error_format); - } + bh_arr_new(context->gp_alloc, context->errors.errors, 4); } static i32 errors_sort(const void* v1, const void* v2) { @@ -161,75 +16,46 @@ static i32 errors_sort(const void* v1, const void* v2) { return e2->rank - e1->rank; } -void onyx_errors_print() { - // NOTE: If the format of the error messages is ever changed, - // update onyx_compile.vim and onyx.sublime-build to match - // the new format. This way editor error highlighting is still - // supported. - // - // - brendanfh 2020/09/03 - - qsort(context.errors.errors, bh_arr_length(context.errors.errors), sizeof(OnyxError), errors_sort); - - OnyxErrorRank last_rank = context.errors.errors[0].rank; - bh_arr_each(OnyxError, err, context.errors.errors) { - if (!context.options->show_all_errors && last_rank != err->rank) break; - - bh_file_contents file_contents = { 0 }; - if (err->pos.filename) { - bh_arr_each(bh_file_contents, fc, *context.errors.file_contents) { - if (!strcmp(fc->filename, err->pos.filename)) { - file_contents = *fc; - break; - } - } - } - - print_detailed_message(err, &file_contents); - - last_rank = err->rank; - } -} - -void onyx_errors_enable() { - context.errors_enabled = 1; +void onyx_errors_enable(Context *context) { + context->errors_enabled = 1; } -void onyx_errors_disable() { - if (context.cycle_detected) { - context.errors_enabled = 1; +void onyx_errors_disable(Context *context) { + if (context->cycle_detected) { + context->errors_enabled = 1; return; } - context.errors_enabled = 0; + context->errors_enabled = 0; } -b32 onyx_errors_are_enabled() { - return context.errors_enabled; +b32 onyx_errors_are_enabled(Context *context) { + return context->errors_enabled; } -b32 onyx_has_errors() { - bh_arr_each(OnyxError, err, context.errors.errors) { +b32 onyx_has_errors(Context *context) { + bh_arr_each(OnyxError, err, context->errors.errors) { if (err->rank >= Error_Waiting_On) return 1; } return 0; } -void onyx_clear_errors() { - if (context.cycle_detected) return; +void onyx_clear_errors(Context *context) { + if (context->cycle_detected) return; - bh_arr_set_length(context.errors.errors, 0); + bh_arr_set_length(context->errors.errors, 0); } -void onyx_submit_error(OnyxError error) { - if (!context.errors_enabled) return; +void onyx_submit_error(Context *context, OnyxError error) { + if (!context->errors_enabled) return; - bh_arr_push(context.errors.errors, error); + bh_arr_push(context->errors.errors, error); + qsort(context->errors.errors, bh_arr_length(context->errors.errors), sizeof(OnyxError), errors_sort); } -void onyx_report_error(OnyxFilePos pos, OnyxErrorRank rank, char * format, ...) { - if (!context.errors_enabled) return; +void onyx_report_error(Context *context, OnyxFilePos pos, OnyxErrorRank rank, char * format, ...) { + if (!context->errors_enabled) return; va_list vargs; va_start(vargs, format); @@ -239,28 +65,15 @@ void onyx_report_error(OnyxFilePos pos, OnyxErrorRank rank, char * format, ...) OnyxError err = { .pos = pos, .rank = rank, - .text = bh_strdup(context.errors.msg_alloc, msg), + .text = bh_strdup(context->errors.msg_alloc, msg), }; - bh_arr_push(context.errors.errors, err); -} - -void onyx_submit_warning(OnyxError error) { - if (!context.errors_enabled) return; - - bh_file_contents file_contents = { 0 }; - bh_arr_each(bh_file_contents, fc, *context.errors.file_contents) { - if (!strcmp(fc->filename, error.pos.filename)) { - file_contents = *fc; - break; - } - } - - print_detailed_message(&error, &file_contents); + bh_arr_push(context->errors.errors, err); + qsort(context->errors.errors, bh_arr_length(context->errors.errors), sizeof(OnyxError), errors_sort); } -void onyx_report_warning(OnyxFilePos pos, char* format, ...) { - if (!context.errors_enabled) return; +void onyx_report_warning(Context *context, OnyxFilePos pos, char* format, ...) { + if (!context->errors_enabled) return; va_list vargs; va_start(vargs, format); @@ -270,8 +83,9 @@ void onyx_report_warning(OnyxFilePos pos, char* format, ...) { OnyxError err = { .pos = pos, .rank = Error_Warning, - .text = bh_strdup(context.errors.msg_alloc, msg), + .text = bh_strdup(context->errors.msg_alloc, msg), }; - bh_arr_push(context.errors.errors, err); + bh_arr_push(context->errors.errors, err); + qsort(context->errors.errors, bh_arr_length(context->errors.errors), sizeof(OnyxError), errors_sort); } diff --git a/compiler/src/extensions.c b/compiler/src/extensions.c new file mode 100644 index 000000000..cf067268f --- /dev/null +++ b/compiler/src/extensions.c @@ -0,0 +1,496 @@ + +#include "astnodes.h" +#include "parser.h" +#include "utils.h" + +#define MSG_HOST_INIT 0 +#define MSG_HOST_TERMINATE 1 +#define MSG_HOST_EXPAND_MACRO 2 +#define MSG_HOST_HOOK 3 + +#define MSG_EXT_INIT 0 +#define MSG_EXT_ERROR_REPORT 1 +#define MSG_EXT_EXPANSION 2 +#define MSG_EXT_INJECT_CODE 3 +#define MSG_EXT_ACKNOWLEDGE_HOOK 4 + +#define HOOK_STALLED 1 + + +#if defined(_BH_LINUX) || defined(_BH_DARWIN) +#include +#include +#include + +static void extension_send(CompilerExtension *ext, void *data, i32 len) { + if (!ext->alive) return; + + i32 wrote = 0; + while (wrote < len) { + i32 w = write(ext->send_file, bh_pointer_add(data, wrote), len - wrote); + if (w > 0) wrote += w; + else { + ext->alive = 0; + return; + } + } +} + +static b32 extension_poll_recv(CompilerExtension *ext) { + struct pollfd fd; + fd.events = POLL_IN; + fd.fd = ext->recv_file; + + poll(&fd, 1, 0); + + return (fd.revents & POLL_IN) != 0; +} + +static i32 extension_recv(CompilerExtension *ext, void *buf, i32 maxlen) { + if (!ext->alive) return 0; + + i32 bytes_read = read(ext->recv_file, buf, maxlen); + if (bytes_read < 0) { + ext->alive = 0; + return 0; + } + return bytes_read; +} + +static void extension_kill(CompilerExtension *ext) { + ext->alive = 0; + kill(ext->pid, SIGKILL); + int status; + waitpid(ext->pid, &status, 0); +} + +static b32 extension_spawn(CompilerExtension *ext, const char *path) { + i32 ext_to_comp[2]; + i32 comp_to_ext[2]; + + if (pipe(ext_to_comp) || pipe(comp_to_ext)) { + return 0; + } + + u32 pid; + switch (pid = fork()) { + case -1: + close(ext_to_comp[0]); + close(ext_to_comp[1]); + close(comp_to_ext[0]); + close(comp_to_ext[1]); + return 0; + + case 0: + close(ext_to_comp[0]); + close(comp_to_ext[1]); + + dup2(comp_to_ext[0], 0); + dup2(ext_to_comp[1], 1); + + execlp("onyx", "onyx", "run", "--no-compiler-extensions", "--no-file-contents", path, NULL); + exit(1); + break; + + default: + close(ext_to_comp[1]); + close(comp_to_ext[0]); + break; + } + + ext->pid = pid; + ext->send_file = comp_to_ext[1]; + ext->recv_file = ext_to_comp[0]; + return 1; +} + +#endif + +#ifdef _BH_WINDOWS + +static void extension_send(CompilerExtension *ext, void *data, i32 len) { +} + +static i32 extension_recv(CompilerExtension *ext, void *buf, i32 maxlen) { + return 0; +} + +static b32 extension_poll_recv(CompilerExtension *ext) { + return 0; +} + +static void extension_kill(CompilerExtension *ext) { +} + +static b32 extension_spawn(CompilerExtension *ext, const char *path) { + printf("Compiler extensions are currently not support on Windows. Sorry! :(\n"); + return 0; +} + +#endif + + + +static void extension_send_int(CompilerExtension *ext, int v) { + int value = v; + extension_send(ext, &value, sizeof(value)); +} + +static void extension_send_bytes(CompilerExtension *ext, void *data, i32 len) { + extension_send_int(ext, len); + extension_send(ext, data, len); +} + +static void extension_send_str(CompilerExtension *ext, const char *msg) { + extension_send_bytes(ext, (void *) msg, strlen(msg)); +} + +static i32 extension_recv_int(CompilerExtension *ext) { + int i; + if (extension_recv(ext, &i, sizeof(i)) < (i32) sizeof(int)) return 0; + return i; +} + +// static char *extension_recv_bytes(CompilerExtension *ext, i32 len) { +// bh_allocator a = bh_arena_allocator(&ext->arena); +// char *buf = bh_alloc(a, len); +// +// i32 bytes_read = 0; +// while (bytes_read < len) { +// if (!ext->alive) break; +// +// bytes_read += extension_recv(ext, buf + bytes_read, len); +// } +// +// return buf; +// } + +static char *extension_recv_str(CompilerExtension *ext, i32 *out_len) { + i32 len = extension_recv_int(ext); + if (out_len) *out_len = len; + + bh_allocator a = bh_arena_allocator(&ext->arena); + char *buf = bh_alloc(a, len + 1); + if (!buf) return NULL; + + i32 bytes_read = 0; + while (bytes_read < len) { + if (!ext->alive) break; + + bytes_read += extension_recv(ext, buf + bytes_read, len); + } + + buf[bytes_read] = '\0'; + return buf; +} + + + +TypeMatch compiler_extension_start(Context *context, const char *name, const char *containing_filename, Entity *ent, i32 *out_extension_id) { + if (*out_extension_id == 0) { + char* parent_folder = bh_path_get_parent(containing_filename, context->scratch_alloc); + + char *path = bh_strdup( + context->scratch_alloc, + bh_lookup_file((char *) name, parent_folder, NULL, NULL, NULL, context->scratch_alloc) + ); + + if (!bh_file_exists(path)) { + return TYPE_MATCH_FAILED; + } + + CompilerExtension ext; + ext.state = COMP_EXT_STATE_SPAWNING; + bh_arena_init(&ext.arena, context->gp_alloc, 1 * 1024 * 1024); // 1MB + ext.entity = ent; + + if (!extension_spawn(&ext, path)) { + return TYPE_MATCH_FAILED; + } + + ext.alive = 1; + ext.id = bh_arr_length(context->extensions) + 1; + *out_extension_id = ext.id; + bh_arr_push(context->extensions, ext); + + // Init message is 5 ints as defined in `core/onyx/compiler_extension` + i32 init_msg[5]; + init_msg[0] = MSG_HOST_INIT; + init_msg[1] = VERSION_MAJOR; + init_msg[2] = VERSION_MINOR; + init_msg[3] = VERSION_PATCH; + init_msg[4] = 1; + + extension_send(&ext, init_msg, sizeof(init_msg)); + + return TYPE_MATCH_YIELD; + + } else { + CompilerExtension *ext = &context->extensions[*out_extension_id - 1]; + + ext->state = COMP_EXT_STATE_INITIATING; + + b32 compiler_extension_negotiate_capabilities(Context *context, CompilerExtension *ext); + b32 negotiated = compiler_extension_negotiate_capabilities(context, ext); + if (!negotiated) { + return TYPE_MATCH_FAILED; + } + + ext->state = COMP_EXT_STATE_READY; + return TYPE_MATCH_SUCCESS; + } +} + +b32 compiler_extension_negotiate_capabilities(Context *context, CompilerExtension *ext) { + if (extension_recv_int(ext) != MSG_EXT_INIT) { + extension_kill(ext); + return 0; + } + + int extension_protocol_version = extension_recv_int(ext); + char *extension_name = extension_recv_str(ext, NULL); + + ext->name = bh_strdup(context->ast_alloc, extension_name); + + // handle "hooks" + if (extension_protocol_version >= 2) { + int hook_count = extension_recv_int(ext); + fori (i, 0, hook_count) { + int hook = extension_recv_int(ext); + + if (hook == 1) ext->supports_stalled_hook = 1; + } + } + + { + CompilerEvent *e = compiler_event_add(context, 1); + compiler_event_add_field_str(context, e, "message", + bh_aprintf(context->scratch_alloc, "Extension '%s' spawned with protocol version %d.", + ext->name, extension_protocol_version + ) + ); + } + + bh_arena_clear(&ext->arena); + return 1; +} + + +static AstNode * parse_code(Context *context, ProceduralMacroExpansionKind kind, char *code, i32 code_length, Entity *entity, OnyxFilePos pos) { + bh_file_contents file_contents; + file_contents.allocator = context->ast_alloc; + file_contents.data = code; + file_contents.length = code_length; + file_contents.filename = bh_aprintf(context->ast_alloc, "(expansion from %s:%d,%d)", pos.filename, pos.line, pos.column); + + OnyxTokenizer tokenizer = onyx_tokenizer_create(context, &file_contents); + onyx_lex_tokens(&tokenizer); + + OnyxParser parser = onyx_parser_create(context, &tokenizer); + parser.package = entity->package; + parser.file_scope = entity->scope; + + AstNode *result = NULL; + switch (kind) { + case PMEK_Expression: result = (AstNode *) onyx_parse_expression(&parser, entity->scope); break; + case PMEK_Statement: result = (AstNode *) onyx_parse_statement(&parser, entity->scope); break; + case PMEK_Top_Level: result = NULL; onyx_parse_top_level_statements(&parser, entity->scope); break; + } + + onyx_parser_free(&parser); + return result; +} + +static b32 handle_common_messages(Context *context, CompilerExtension *ext, int msg_type, OnyxToken *tkn, Entity *entity) { + switch (msg_type) { + case MSG_EXT_INIT: + extension_kill(ext); + ONYX_ERROR(tkn->pos, Error_Critical, "Protocol error when talking to '%s'.", ext->name); + return 0; + + case MSG_EXT_ERROR_REPORT: { + char *filename = extension_recv_str(ext, NULL); + u32 line = extension_recv_int(ext); + u32 column = extension_recv_int(ext); + u32 length = extension_recv_int(ext); + char *msg = extension_recv_str(ext, NULL); + + OnyxFilePos pos; + pos.column = column + (line == tkn->pos.line ? 2 : 0); + pos.line = line; + pos.filename = tkn->pos.filename; + pos.length = length; + + i32 line_diff = line - tkn->pos.line; + if (line_diff == 0) { + pos.line_start = tkn->pos.line_start; + } else { + char *c = tkn->text; + while (*c && line_diff > 0) { + while (*c && *c != '\n') c++; + line_diff -= 1; + c++; + } + + pos.line_start = c; + } + + ONYX_ERROR(pos, Error_Critical, msg); + break; + } + + case MSG_EXT_INJECT_CODE: { + i32 code_length; + char *code = bh_strdup(context->ast_alloc, extension_recv_str(ext, &code_length)); + if (!code) { + ONYX_ERROR(tkn->pos, Error_Critical, "Code expansion of %d bytes is too large.", code_length); + return 0; + } + + parse_code(context, PMEK_Top_Level, code, code_length, entity, tkn->pos); + break; + } + + default: + extension_kill(ext); + ONYX_ERROR(tkn->pos, Error_Critical, "Protocol error when talking to '%s'.", ext->name); + return 0; + } + + return 1; +} + + +TypeMatch compiler_extension_expand_macro( + Context *context, + int extension_id, + ProceduralMacroExpansionKind kind, + const char *macro_name, + OnyxToken *body, + Entity *entity, + AstNode **out_node, + u32 *expansion_id, + b32 wait_for_response +) { + if (extension_id <= 0 || extension_id > bh_arr_length(context->extensions)) return TYPE_MATCH_FAILED; + + CompilerExtension *ext = &context->extensions[extension_id - 1]; + + if (!ext->alive) return TYPE_MATCH_FAILED; + + if (ext->state != COMP_EXT_STATE_READY && ext->current_expansion_id != (i32) *expansion_id) { + return TYPE_MATCH_YIELD; + } + + bh_arena_clear(&ext->arena); + + // If the extension is in the ready state, then it is waiting for an expansion request. + // We can issue this expansion request and flag that the extension is now in the expanding state. + if (ext->state == COMP_EXT_STATE_READY) { + *expansion_id = ++context->next_expansion_id; + ext->current_expansion_id = *expansion_id; + ext->state = COMP_EXT_STATE_EXPANDING; + + extension_send_int(ext, MSG_HOST_EXPAND_MACRO); + extension_send_int(ext, *expansion_id); + extension_send_int(ext, kind); + extension_send_str(ext, body->pos.filename); + extension_send_int(ext, body->pos.line); + extension_send_int(ext, body->pos.column); + extension_send_int(ext, 0); + extension_send_str(ext, macro_name); + extension_send_bytes(ext, body->text, body->length); + } + + while (1) { + if (!wait_for_response && !extension_poll_recv(ext)) { + return TYPE_MATCH_YIELD; + } + + int msg_type = extension_recv_int(ext); + + if (msg_type == MSG_EXT_EXPANSION) { + u32 id = extension_recv_int(ext); + if (id != *expansion_id) { + // PROTOCOL ERROR + extension_kill(ext); + ONYX_ERROR(body->pos, Error_Critical, "Protocol error when talking to '%s'.", ext->name); + return TYPE_MATCH_FAILED; + } + + int status = extension_recv_int(ext); + if (status == 0) { + int reason = extension_recv_int(ext); + switch (reason) { + // TODO: Make this an enum + case 0: ONYX_ERROR(body->pos, Error_Critical, "Macro expansion '%s' is not supported by '%s'.", macro_name, ext->name); break; + // case 1: ONYX_ERROR(body->pos, Error_Critical, "Macro expansion '%s' failed because of a syntax error.", macro_name); break; + } + return TYPE_MATCH_FAILED; + } + + i32 code_length; + char *code = extension_recv_str(ext, &code_length); + if (!code) { + return TYPE_MATCH_FAILED; + } + + code = bh_strdup(context->ast_alloc, code); + *out_node = parse_code(context, kind, code, code_length, entity, body->pos); + + ext->state = COMP_EXT_STATE_READY; + ext->current_expansion_id = 0; + return TYPE_MATCH_SUCCESS; + } + + if (!handle_common_messages(context, ext, msg_type, body, entity)) { + return TYPE_MATCH_FAILED; + } + } +} + + +TypeMatch compiler_extension_hook_stalled(Context *context, int extension_id) { + if (extension_id <= 0 || extension_id > bh_arr_length(context->extensions)) return TYPE_MATCH_FAILED; + + CompilerExtension *ext = &context->extensions[extension_id - 1]; + if (!ext->supports_stalled_hook) return TYPE_MATCH_FAILED; + + if (!ext->alive) return TYPE_MATCH_FAILED; + + if (ext->state != COMP_EXT_STATE_READY) { + return TYPE_MATCH_FAILED; + } + + bh_arena_clear(&ext->arena); + + ext->state = COMP_EXT_STATE_HANDLING_HOOK; + + extension_send_int(ext, MSG_HOST_HOOK); + extension_send_int(ext, HOOK_STALLED); + extension_send_int(ext, HOOK_STALLED); + + OnyxToken *tkn = ext->entity->compiler_extension->token; + + while (1) { + int msg_type = extension_recv_int(ext); + + if (msg_type == MSG_EXT_ACKNOWLEDGE_HOOK) { + u32 id = extension_recv_int(ext); + if (id != HOOK_STALLED) { + // PROTOCOL ERROR + extension_kill(ext); + ONYX_ERROR(tkn->pos, Error_Critical, "Protocol error when talking to '%s'.", ext->name); + return TYPE_MATCH_FAILED; + } + + ext->state = COMP_EXT_STATE_READY; + return TYPE_MATCH_SUCCESS; + } + + if (!handle_common_messages(context, ext, msg_type, tkn, ext->entity)) { + return TYPE_MATCH_FAILED; + } + } +} + diff --git a/compiler/src/lex.c b/compiler/src/lex.c index cc0e4d94f..c5341229b 100644 --- a/compiler/src/lex.c +++ b/compiler/src/lex.c @@ -4,10 +4,10 @@ #include "errors.h" static const char* token_type_names[] = { - "TOKEN_TYPE_UNKNOWN", - "TOKEN_TYPE_END_STREAM", + "UNKNOWN", + "the end of file", - "TOKEN_TYPE_COMMENT", + "a comment", "", // start "package", @@ -67,18 +67,23 @@ static const char* token_type_names[] = { ">>=", ">>>=", "..", + "..=", "~~", "??", - "TOKEN_TYPE_SYMBOL", - "TOKEN_TYPE_LITERAL_STRING", - "TOKEN_TYPE_LITERAL_CHAR", - "TOKEN_TYPE_LITERAL_INTEGER", - "TOKEN_TYPE_LITERAL_FLOAT", + "a symbol", + "a string", + "a character literal", + "an integer", + "a float", "true", "false", - "inserted semicolon", + "an inserted semicolon", + + "a doc comment", + + "a procedural macro body", "TOKEN_TYPE_COUNT" }; @@ -124,8 +129,10 @@ static inline b32 token_lit(OnyxTokenizer* tokenizer, OnyxToken* tk, char* lit, } const char *token_type_name(TokenType tkn_type) { + static char hack_tmp_buffer[32]; if (tkn_type < Token_Type_Ascii_End) { - return bh_aprintf(global_scratch_allocator, "%c", (char) tkn_type); + bh_snprintf(hack_tmp_buffer, 31, "%c", (char) tkn_type); + return hack_tmp_buffer; } else { return token_type_names[tkn_type - Token_Type_Ascii_End]; } @@ -135,7 +142,9 @@ const char* token_name(OnyxToken * tkn) { TokenType tkn_type = tkn->type; if (tkn_type == Token_Type_Symbol) { - return bh_aprintf(global_scratch_allocator, "%b", tkn->text, tkn->length); + static char hack_tmp_buffer[512]; + bh_snprintf(hack_tmp_buffer, 511, "%b", tkn->text, tkn->length); + return hack_tmp_buffer; } return token_type_name(tkn_type); @@ -211,14 +220,22 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { // Comments if (*tokenizer->curr == '/' && *(tokenizer->curr + 1) == '/') { tokenizer->curr += 2; - tk.type = Token_Type_Comment; + + if (*tokenizer->curr == '/') { + tokenizer->curr += 1; + tk.type = Token_Type_Doc_Comment; + } else { + tk.type = Token_Type_Comment; + } + tk.text = tokenizer->curr; + tk.pos.column = (u16)(tokenizer->curr - tokenizer->line_start) + 1; while (*tokenizer->curr != '\n' && tokenizer->curr != tokenizer->end) { INCREMENT_CURR_TOKEN(tokenizer); } - tk.length = tokenizer->curr - tk.text - 2; + tk.length = tokenizer->curr - tk.text; if (bh_arr_length(tokenizer->tokens) == 0 && bh_str_starts_with(tk.text, "+optional-semicolons")) { tokenizer->optional_semicolons = 1; @@ -288,7 +305,7 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { if (*tokenizer->curr == '\n' && ch == '\'') { tk.pos.length = (u16) len; - onyx_report_error(tk.pos, Error_Critical, "Character literal not terminated by end of line."); + onyx_report_error(tokenizer->context, tk.pos, Error_Critical, "Character literal not terminated by end of line."); break; } @@ -301,7 +318,7 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { INCREMENT_CURR_TOKEN(tokenizer); if (tokenizer->curr == tokenizer->end) { - onyx_report_error(tk.pos, Error_Critical, "String literal not closed. String literal starts here."); + onyx_report_error(tokenizer->context, tk.pos, Error_Critical, "String literal not closed. String literal starts here."); break; } } @@ -319,7 +336,7 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { INCREMENT_CURR_TOKEN(tokenizer); INCREMENT_CURR_TOKEN(tokenizer); u32 len = 3; - while (char_is_num(*(tokenizer->curr + 1)) || charset_contains("abcdefABCDEF", *(tokenizer->curr + 1))) { + while (char_is_num(*(tokenizer->curr + 1)) || charset_contains("abcdefABCDEF_", *(tokenizer->curr + 1))) { len++; INCREMENT_CURR_TOKEN(tokenizer); } @@ -337,15 +354,20 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { tk.type = Token_Type_Literal_Integer; b32 hit_decimal = 0; + b32 hit_exponent = 0; if (*tokenizer->curr == '.') hit_decimal = 1; u32 len = 1; while (char_is_num(*(tokenizer->curr + 1)) - || (!hit_decimal && *(tokenizer->curr + 1) == '.' && *(tokenizer->curr + 2) != '.')) { + || (*(tokenizer->curr + 1) == '_') + || (!hit_exponent && *(tokenizer->curr + 1) == 'e') + || (!hit_decimal && !hit_exponent && *(tokenizer->curr + 1) == '.' && *(tokenizer->curr + 2) != '.') + || (hit_exponent && *(tokenizer->curr + 1) == '-')) { len++; INCREMENT_CURR_TOKEN(tokenizer); - if (*tokenizer->curr == '.') hit_decimal = 1; + if (*tokenizer->curr == '.') hit_decimal = 1; + if (*tokenizer->curr == 'e') hit_exponent = 1; } if (*(tokenizer->curr + 1) == 'f') { @@ -355,8 +377,31 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { INCREMENT_CURR_TOKEN(tokenizer); } - if (hit_decimal) tk.type = Token_Type_Literal_Float; + if (hit_decimal || hit_exponent) tk.type = Token_Type_Literal_Float; + + tk.length = len; + + INCREMENT_CURR_TOKEN(tokenizer); + goto token_parsed; + } + + if (tokenizer->curr[0] == '!' && tokenizer->curr[1] == '{') { + INCREMENT_CURR_TOKEN(tokenizer); + INCREMENT_CURR_TOKEN(tokenizer); + + tk.text = tokenizer->curr; + + i32 bracket_count = 0; + i32 len = 0; + while ((bracket_count > 0 || tokenizer->curr[0] != '}') && tokenizer->curr != tokenizer->end) { + if (tokenizer->curr[0] == '{') bracket_count += 1; + if (tokenizer->curr[0] == '}') bracket_count -= 1; + + len++; + INCREMENT_CURR_TOKEN(tokenizer); + } + tk.type = Token_Type_Proc_Macro_Body; tk.length = len; INCREMENT_CURR_TOKEN(tokenizer); @@ -434,7 +479,7 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { case '<': LITERAL_TOKEN("<=", 0, Token_Type_Less_Equal); - LITERAL_TOKEN("<-", 0, Token_Type_Right_Arrow); + LITERAL_TOKEN("<-", 0, Token_Type_Left_Arrow); LITERAL_TOKEN("<<=", 0, Token_Type_Shl_Equal); LITERAL_TOKEN("<<", 0, Token_Type_Shift_Left); break; @@ -488,6 +533,7 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { break; case '.': + LITERAL_TOKEN("..=", 0, Token_Type_Dot_Dot_Equal); LITERAL_TOKEN("..", 0, Token_Type_Dot_Dot); break; @@ -537,7 +583,9 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { case Token_Type_Literal_Float: case Token_Type_Literal_Char: case Token_Type_Empty_Block: + case Token_Type_Proc_Macro_Body: case '?': + case '!': case ')': case '}': case ']': @@ -551,8 +599,10 @@ OnyxToken* onyx_get_token(OnyxTokenizer* tokenizer) { return &tokenizer->tokens[bh_arr_length(tokenizer->tokens) - 1]; } -OnyxTokenizer onyx_tokenizer_create(bh_allocator allocator, bh_file_contents *fc) { +OnyxTokenizer onyx_tokenizer_create(Context *context, bh_file_contents *fc) { OnyxTokenizer tknizer = { + .context = context, + .start = fc->data, .curr = fc->data, .end = bh_pointer_add(fc->data, fc->length), @@ -563,11 +613,11 @@ OnyxTokenizer onyx_tokenizer_create(bh_allocator allocator, bh_file_contents *fc .line_start = fc->data, .tokens = NULL, - .optional_semicolons = context.options->enable_optional_semicolons, + .optional_semicolons = context->options->enable_optional_semicolons, .insert_semicolon = 0, }; - bh_arr_new(allocator, tknizer.tokens, 1 << 12); + bh_arr_new(context->token_alloc, tknizer.tokens, 1 << 12); return tknizer; } @@ -581,8 +631,8 @@ void onyx_lex_tokens(OnyxTokenizer* tokenizer) { tk = onyx_get_token(tokenizer); } while (tk->type != Token_Type_End_Stream); - context.lexer_lines_processed += tokenizer->line_number - 1; - context.lexer_tokens_processed += bh_arr_length(tokenizer->tokens); + tokenizer->context->stats.lexer_lines_processed += tokenizer->line_number - 1; + tokenizer->context->stats.lexer_tokens_processed += bh_arr_length(tokenizer->tokens); } b32 token_equals(OnyxToken* tkn1, OnyxToken* tkn2) { diff --git a/compiler/src/library_main.c b/compiler/src/library_main.c new file mode 100644 index 000000000..cf10ce5ed --- /dev/null +++ b/compiler/src/library_main.c @@ -0,0 +1,939 @@ +#define BH_DEFINE +#define BH_NO_TABLE +#define STB_DS_IMPLEMENTATION +#include "bh.h" + +#include "lex.h" +#include "errors.h" +#include "parser.h" +#include "utils.h" +#include "wasm_emit.h" +#include "doc.h" +#include "onyx.h" + +#define STRINGIFY_(x) #x +#define STRINGIFY(x) STRINGIFY_(x) + +// +// Types +// + +struct onyx_context_t { + Context context; +}; + + +// +// Metadata +// + +int32_t onyx_version_major() { + return VERSION_MAJOR; +} + +int32_t onyx_version_minor() { + return VERSION_MINOR; +} + +int32_t onyx_version_patch() { + return VERSION_PATCH; +} + +char *onyx_version_suffix() { + return VERSION_SUFFIX; +} + +char *onyx_version_build_time() { + return __TIMESTAMP__; +} + +char *onyx_version_runtime() { +#ifdef ONYX_RUNTIME_LIBRARY + return STRINGIFY(ONYX_RUNTIME_LIBRARY); +#else + return "none"; +#endif +} + + +// +// Forward declarations of internal procedures +// + +static AstInclude* create_load(Context *context, char* filename, int32_t length); +static void introduce_defined_variables(Context *context); +static void create_and_add_defined_variable(Context *context, DefinedVariable *var); +static void link_wasm_module(Context *context); + + +// +// Lifecycle +// + + +onyx_context_t *onyx_context_create() { + onyx_context_t *ctx = malloc(sizeof(*ctx)); + + Context *context = &ctx->context; + memset(context, 0, sizeof *context); + + bh_scratch_init(&context->scratch, bh_heap_allocator(), 256 * 1024); // NOTE: 256 KiB + context->scratch_alloc = bh_scratch_allocator(&context->scratch); + + bh_managed_heap_init(&context->heap); + context->gp_alloc = bh_managed_heap_allocator(&context->heap); + + context->token_alloc = context->gp_alloc; + + // NOTE: Create the arena where tokens and AST nodes will exist + // Prevents nodes from being scattered across memory due to fragmentation + bh_arena_init(&context->ast_arena, context->gp_alloc, 16 * 1024 * 1024); // 16MB + context->ast_alloc = bh_arena_allocator(&context->ast_arena); + + types_init(context); + prepare_builtins(context); + compiler_events_init(context); + + // Options should be moved directly inside of the context instead of through a pointer... + context->options = malloc(sizeof(* context->options)); + memset(context->options, 0, sizeof(* context->options)); + context->options->use_post_mvp_features = 1; + context->options->enable_optional_semicolons = 1; + context->options->generate_type_info = 1; + + OnyxFilePos internal_location = { 0 }; + internal_location.filename = ""; + internal_location.line = 1; + internal_location.column = 1; + context->global_scope = scope_create(context, NULL, internal_location); + + sh_new_arena(context->packages); + bh_arr_new(context->gp_alloc, context->scopes, 128); + + onyx_errors_init(context, &context->loaded_files); + + context->wasm_module = bh_alloc_item(context->gp_alloc, OnyxWasmModule); + onyx_wasm_module_initialize(context, context->wasm_module); + + entity_heap_init(context->gp_alloc, &context->entities); + + return ctx; +} + +void onyx_context_free(onyx_context_t *ctx) { + Context *context = &ctx->context; + + bh_arr_each(Scope *, pscope, context->scopes) { + shfree((*pscope)->symbols); + } + + onyx_wasm_module_free(context->wasm_module); + bh_arena_free(&context->ast_arena); + bh_arr_free(context->loaded_files); + bh_arr_free(context->scopes); + bh_scratch_free(&context->scratch); + bh_managed_heap_free(&context->heap); +} + +void onyx_options_ready(onyx_context_t *ctx) { + Context *context = &ctx->context; + + // NOTE: Add builtin entities to pipeline. + entity_heap_insert(&context->entities, ((Entity) { + .state = Entity_State_Parse_Builtin, + .type = Entity_Type_Load_File, + .package = NULL, + .include = create_load(context, "core:builtin", -1), + })); + + entity_heap_insert(&context->entities, ((Entity) { + .state = Entity_State_Parse_Builtin, + .type = Entity_Type_Load_File, + .package = NULL, + .include = create_load(context, "core:runtime/build_opts", -1), + })); + + if (context->options->runtime != Runtime_Custom) { + // HACK + context->special_global_entities.remaining = 5; + + context->special_global_entities.runtime_info_types_entity = entity_heap_insert(&context->entities, ((Entity) { + .state = Entity_State_Parse, + .type = Entity_Type_Load_File, + .package = NULL, + .include = create_load(context, "core:runtime/info/types", -1), + })); + context->special_global_entities.runtime_info_foreign_entity = entity_heap_insert(&context->entities, ((Entity) { + .state = Entity_State_Parse, + .type = Entity_Type_Load_File, + .package = NULL, + .include = create_load(context, "core:runtime/info/foreign_blocks", -1), + })); + context->special_global_entities.runtime_info_proc_tags_entity = entity_heap_insert(&context->entities, ((Entity) { + .state = Entity_State_Parse, + .type = Entity_Type_Load_File, + .package = NULL, + .include = create_load(context, "core:runtime/info/proc_tags", -1), + })); + context->special_global_entities.runtime_info_global_tags_entity = entity_heap_insert(&context->entities, ((Entity) { + .state = Entity_State_Parse, + .type = Entity_Type_Load_File, + .package = NULL, + .include = create_load(context, "core:runtime/info/global_tags", -1), + })); + context->special_global_entities.runtime_info_stack_trace_entity = entity_heap_insert(&context->entities, ((Entity) { + .state = Entity_State_Parse, + .type = Entity_Type_Load_File, + .package = NULL, + .include = create_load(context, "core:runtime/info/stack_trace", -1), + })); + } + + add_entities_for_node(&context->entities, NULL, (AstNode *) &context->builtins.stack_top, context->global_scope, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) &context->builtins.heap_start, context->global_scope, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) &context->builtins.tls_base, context->global_scope, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) &context->builtins.tls_size, context->global_scope, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) &context->builtins.closure_base, context->global_scope, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) &context->builtins.stack_trace, context->global_scope, NULL); + + if (!context->options->no_core) { + entity_heap_insert(&context->entities, ((Entity) { + .state = Entity_State_Parse, + .type = Entity_Type_Load_File, + .package = NULL, + .include = create_load(context, "core:module", -1), + })); + } + + if (context->options->generate_symbol_info_file) { + context->symbol_info = bh_alloc_item(context->gp_alloc, SymbolInfoTable); + bh_imap_init(&context->symbol_info->node_to_id, context->gp_alloc, 512); + bh_arr_new(context->gp_alloc, context->symbol_info->symbols, 128); + bh_arr_new(context->gp_alloc, context->symbol_info->symbols_resolutions, 128); + sh_new_arena(context->symbol_info->files); + } + + if (context->options->generate_odoc) { + context->doc_info = bh_alloc_item(context->gp_alloc, OnyxDocInfo); + memset(context->doc_info, 0, sizeof(OnyxDocInfo)); + bh_arr_new(context->gp_alloc, context->doc_info->procedures, 128); + bh_arr_new(context->gp_alloc, context->doc_info->structures, 128); + bh_arr_new(context->gp_alloc, context->doc_info->enumerations, 128); + } +} + +static void parse_source_file(Context *context, bh_file_contents* file_contents) { + // :Remove passing the allocators as parameters + OnyxTokenizer tokenizer = onyx_tokenizer_create(context, file_contents); + onyx_lex_tokens(&tokenizer); + + file_contents->line_count = tokenizer.line_number; + + OnyxParser parser = onyx_parser_create(context, &tokenizer); + onyx_parse(&parser); + onyx_parser_free(&parser); +} + +static b32 process_source_file(Context *context, char* filename) { + bh_arr_each(bh_file_contents, fc, context->loaded_files) { + // Duplicates are detected here and since these filenames will be the full path, + // string comparing them should be all that is necessary. + if (!strcmp(fc->filename, filename)) return 1; + } + + bh_file file; + bh_file_error err = bh_file_open(&file, filename); + if (err != BH_FILE_ERROR_NONE) { + return 0; + } + + bh_file_contents fc = bh_file_read_contents(context->token_alloc, &file); + bh_file_close(&file); + + bh_arr_push(context->loaded_files, fc); + + // if (context->options->verbose_output == 2) + // bh_printf("Processing source file: %s (%d bytes)\n", file.filename, fc.length); + + parse_source_file(context, &bh_arr_last(context->loaded_files)); + return 1; +} + +static b32 process_load_entity(Context *context, Entity* ent) { + assert(ent->type == Entity_Type_Load_File || ent->type == Entity_Type_Load_Path); + AstInclude* include = ent->include; + + if (include->kind == Ast_Kind_Load_File) { + // :RelativeFiles + const char* parent_file = include->token->pos.filename; + if (parent_file == NULL) parent_file = "."; + + char* parent_folder = bh_path_get_parent(parent_file, context->scratch_alloc); + char* filename = bh_search_for_mapped_file( + include->name, + parent_folder, + ".onyx", + context->options->mapped_folders, + context->gp_alloc + ); + + if (filename == NULL) { + OnyxFilePos error_pos = include->token->pos; + if (error_pos.filename == NULL) { + ONYX_ERROR(error_pos, Error_Command_Line_Arg, "Failed to open file '%s'", include->name); + } else { + ONYX_ERROR(error_pos, Error_Critical, "Failed to open file '%s'", include->name); + } + return 0; + } + + return process_source_file(context, filename); + + } else if (include->kind == Ast_Kind_Load_All) { + const char* parent_file = include->token->pos.filename; + if (parent_file == NULL) parent_file = "."; + + char* parent_folder = bh_path_get_parent(parent_file, context->scratch_alloc); + + char* folder = bh_search_for_mapped_file( + include->name, + parent_folder, + "", + context->options->mapped_folders, + context->gp_alloc + ); + bh_path_convert_separators(folder); + + bh_arr(char *) folders_to_process = NULL; + bh_arr_new(context->gp_alloc, folders_to_process, 2); + + bh_arr_push(folders_to_process, bh_strdup(context->scratch_alloc, folder)); + + while (bh_arr_length(folders_to_process) > 0) { + char *folder = bh_arr_pop(folders_to_process); + bh_dir dir = bh_dir_open(folder); + if (dir == NULL) { + ONYX_ERROR(include->token->pos, Error_Critical, "Could not find or open folder '%s'.", folder); + return 0; + } + + bh_dirent entry; + char fullpath[512]; + while (bh_dir_read(dir, &entry)) { + if (entry.type == BH_DIRENT_FILE && bh_str_ends_with(entry.name, ".onyx")) { + bh_snprintf(fullpath, 511, "%s/%s", folder, entry.name); + bh_path_convert_separators(fullpath); + + char* formatted_name = bh_path_get_full_name(fullpath, context->gp_alloc); + + AstInclude* new_include = onyx_ast_node_new(context->ast_alloc, sizeof(AstInclude), Ast_Kind_Load_File); + new_include->token = include->token; + new_include->name = formatted_name; + add_entities_for_node(&context->entities, NULL, (AstNode *) new_include, include->entity->scope, include->entity->package); + } + + if (entry.type == BH_DIRENT_DIRECTORY && include->recursive) { + if (!strcmp(entry.name, ".") || !strcmp(entry.name, "..")) continue; + + bh_snprintf(fullpath, 511, "%s/%s", folder, entry.name); + char* formatted_name = bh_path_get_full_name(fullpath, context->scratch_alloc); // Could this overflow the scratch allocator? + + bh_arr_push(folders_to_process, formatted_name); + } + } + + bh_dir_close(dir); + } + + return 1; + + } else if (include->kind == Ast_Kind_Load_Path) { + ONYX_WARNING(include->token->pos, "'#load_path' has been deprecated and no longer does anything."); + + } else if (include->kind == Ast_Kind_Library_Path) { + bh_arr_push(context->wasm_module->library_paths, include->name); + } + + return 1; +} + +static b32 process_entity(Context *context, Entity* ent) { + EntityState before_state = ent->state; + switch (before_state) { + case Entity_State_Error: + if (ent->type != Entity_Type_Error) { + ONYX_ERROR(ent->expr->token->pos, Error_Critical, "Error entity unexpected. This is definitely a compiler bug"); + } else { + ONYX_ERROR(ent->error->token->pos, Error_Critical, "Static error occured: '%b'", ent->error->error_msg->text, ent->error->error_msg->length); + } + break; + + case Entity_State_Parse_Builtin: + process_load_entity(context, ent); + ent->state = Entity_State_Finalized; + break; + + case Entity_State_Introduce_Symbols: + // Currently, introducing symbols is handled in the checker + // function. Maybe there should be a different place where that happens? + check_entity(context, ent); + break; + + case Entity_State_Parse: + if (!context->builtins_initialized) { + context->builtins_initialized = 1; + initialize_builtins(context); + introduce_build_options(context); + introduce_defined_variables(context); + } + + // GROSS + if (context->special_global_entities.remaining == 0) { + context->special_global_entities.remaining--; + initalize_special_globals(context); + } + + if (process_load_entity(context, ent)) { + // GROSS + if ( ent == context->special_global_entities.runtime_info_types_entity + || ent == context->special_global_entities.runtime_info_proc_tags_entity + || ent == context->special_global_entities.runtime_info_global_tags_entity + || ent == context->special_global_entities.runtime_info_foreign_entity + || ent == context->special_global_entities.runtime_info_stack_trace_entity) { + context->special_global_entities.remaining--; + } + + ent->state = Entity_State_Finalized; + } else { + ent->macro_attempts++; + } + break; + + case Entity_State_Check_Types: check_entity(context, ent); break; + case Entity_State_Code_Gen: emit_entity(context, ent); break; + + default: break; + } + + b32 changed = ent->state != before_state; + return changed; +} + +static void dump_cycles(Context *context) { + context->cycle_detected = 1; + Entity* ent; + + while (1) { + ent = entity_heap_top(&context->entities); + entity_heap_remove_top(&context->entities); + if (ent->state < Entity_State_Code_Gen) process_entity(context, ent); + else break; + + if (bh_arr_length(context->entities.entities) == 0) { + break; + } + } +} + +// TODO: relocate this function +static void send_stalled_hooks(Context *context) { + bh_arr_each(CompilerExtension, ext, context->extensions) { + compiler_extension_hook_stalled(context, ext->id); + } +} + +onyx_pump_t onyx_pump(onyx_context_t *ctx) { + Context *context = &ctx->context; + + compiler_events_clear(context); + + if (bh_arr_is_empty(context->entities.entities)) { + // Once the module has been linked, we are all done and ready to say everything compiled successfully! + if (context->wasm_module_linked) return ONYX_PUMP_DONE; + + link_wasm_module(context); + context->wasm_module_linked = 1; + return ONYX_PUMP_DONE; + } + + Entity* ent = entity_heap_top(&context->entities); + + // Mostly a preventative thing to ensure that even if somehow + // errors were left disabled, they are re-enabled in this cycle. + onyx_errors_enable(context); + entity_heap_remove_top(&context->entities); + + u64 perf_start; + EntityType perf_entity_type; + EntityState perf_entity_state; + if (context->options->running_perf) { + perf_start = bh_time_curr_micro(); + perf_entity_type = ent->type; + perf_entity_state = ent->state; + } + + b32 changed = process_entity(context, ent); + + // NOTE: VERY VERY dumb cycle breaking. Basically, remember the first entity that did + // not change (i.e. did not make any progress). Then everytime an entity doesn't change, + // check if it is the same entity. If it is, it means all other entities that were processed + // between the two occurences didn't make any progress either, and there must be a cycle. + // - brendanfh 2021/02/06 + // + // Because of the recent changes to the compiler architecture (again), this condition + // does not always hold anymore. There can be nodes that get scheduled multiple times + // before the "key" node that will unblock the progress. This means a more sophisticated + // cycle detection algorithm must be used. + // + if (!changed) { + if (!context->watermarked_node || context->watermarked_node->macro_attempts < ent->macro_attempts) { + context->watermarked_node = ent; + context->highest_watermark = bh_max(context->highest_watermark, ent->macro_attempts); + } + else if (context->watermarked_node == ent) { + if (ent->macro_attempts > context->highest_watermark) { + entity_heap_insert_existing(&context->entities, ent); + + if (context->cycle_almost_detected == 4) { + dump_cycles(context); + } else if (context->cycle_almost_detected == 3) { + send_stalled_hooks(context); + } else if (context->cycle_almost_detected == 2) { + compiler_event_add(context, 4); + } + + context->cycle_almost_detected += 1; + } + } + } else { + context->watermarked_node = NULL; + context->cycle_almost_detected = 0; + } + + if (onyx_has_errors(context)) { + return ONYX_PUMP_ERRORED; + } + + if (ent->state != Entity_State_Finalized && ent->state != Entity_State_Failed) + entity_heap_insert_existing(&context->entities, ent); + + if (context->options->running_perf) { + u64 perf_end = bh_time_curr_micro(); + + u64 duration = perf_end - perf_start; + context->stats.microseconds_per_type[perf_entity_type] += duration; + context->stats.microseconds_per_state[perf_entity_state] += duration; + } + + return ONYX_PUMP_CONTINUE; +} + +// +// Events +// + +int32_t onyx_event_count(onyx_context_t *ctx) { + return ctx->context.events.event_count; +} + +onyx_event_type_t onyx_event_type(onyx_context_t *ctx, int event_idx) { + if (event_idx >= ctx->context.events.event_count) return ONYX_EVENT_UNKNOWN; + + CompilerEvent *ev = ctx->context.events.first; + while (event_idx-- > 0 && ev) { + ev = ev->next; + } + + if (!ev) return ONYX_EVENT_UNKNOWN; + + return (onyx_event_type_t) ev->type; +} + +int32_t onyx_event_field_int(onyx_context_t *ctx, int event_idx, char *field) { + if (event_idx >= ctx->context.events.event_count) return 0; + + CompilerEvent *ev = ctx->context.events.first; + while (ev && event_idx-- > 0) { + ev = ev->next; + } + + if (!ev) return 0; + + CompilerEventField *f = ev->first_field; + while (f && strcmp(f->field, field) != 0) { + f = f->next; + } + + if (!f) return 0; + if (f->type != 1) return 0; + + return f->i; +} + +const char *onyx_event_field_str(onyx_context_t *ctx, int event_idx, char *field) { + if (event_idx >= ctx->context.events.event_count) return ""; + + CompilerEvent *ev = ctx->context.events.first; + while (ev && event_idx-- > 0) { + ev = ev->next; + } + + if (!ev) return ""; + + CompilerEventField *f = ev->first_field; + while (f && strcmp(f->field, field) != 0) { + f = f->next; + } + + if (!f) return ""; + if (f->type != 0) return ""; + + return f->s; +} + + + + + +// +// Options +// +int32_t onyx_set_option_cstr(onyx_context_t *ctx, onyx_option_t opt, char *value) { + return onyx_set_option_bytes(ctx, opt, value, strlen(value)); +} + +int32_t onyx_set_option_bytes(onyx_context_t *ctx, onyx_option_t opt, char *value, int32_t length) { + if (length < 0) length = strlen(value); + return 0; +} + +int32_t onyx_set_option_int(onyx_context_t *ctx, onyx_option_t opt, int32_t value) { + switch (opt) { + case ONYX_OPTION_POST_MVP_FEATURES: ctx->context.options->use_post_mvp_features = value; return 1; + case ONYX_OPTION_MULTI_THREADING: ctx->context.options->use_multi_threading = value; return 1; + case ONYX_OPTION_GENERATE_FOREIGN_INFO: ctx->context.options->generate_foreign_info = value; return 1; + case ONYX_OPTION_GENERATE_TYPE_INFO: ctx->context.options->generate_type_info = value; return 1; + case ONYX_OPTION_GENERATE_METHOD_INFO: ctx->context.options->generate_method_info = value; return 1; + case ONYX_OPTION_GENERATE_DEBUG_INFO: ctx->context.options->debug_info_enabled = value; return 1; + case ONYX_OPTION_GENERATE_STACK_TRACE: ctx->context.options->stack_trace_enabled = value; return 1; + case ONYX_OPTION_GENERATE_NAME_SECTION: ctx->context.options->generate_name_section = value; return 1; + case ONYX_OPTION_GENERATE_SYMBOL_INFO: ctx->context.options->generate_symbol_info_file = value; return 1; + case ONYX_OPTION_GENERATE_LSP_INFO: ctx->context.options->generate_lsp_info_file = value; return 1; + case ONYX_OPTION_GENERATE_DOC_INFO: ctx->context.options->generate_odoc = value; return 1; + case ONYX_OPTION_DISABLE_CORE: ctx->context.options->no_core = value; return 1; + case ONYX_OPTION_DISABLE_STALE_CODE: ctx->context.options->no_stale_code = value; return 1; + case ONYX_OPTION_OPTIONAL_SEMICOLONS: ctx->context.options->enable_optional_semicolons = value; return 1; + case ONYX_OPTION_DISABLE_FILE_CONTENTS: ctx->context.options->no_file_contents = value; return 1; + case ONYX_OPTION_DISABLE_EXTENSIONS: ctx->context.options->no_compiler_extensions = value; return 1; + case ONYX_OPTION_PLATFORM: ctx->context.options->runtime = value; return 1; + + default: + break; + } + + return 0; +} + +void onyx_add_defined_var(onyx_context_t *ctx, char *variable, int32_t variable_length, char *value, int32_t value_length) { + if (variable_length < 0) variable_length = strlen(variable); + if (value_length < 0) value_length = strlen(value); + + bh_arr_push(ctx->context.defined_variables, ((DefinedVariable) { + .key = bh_strdup_len(ctx->context.ast_alloc, variable, variable_length), + .value = bh_strdup_len(ctx->context.ast_alloc, value, value_length), + })); +} + +// +// Loading code +// + +/// Adds a file to the compilation, following typical `#load` rules. +/// 1. `foo:file.onyx` will search in the `foo` mapped folder. +/// 2. `file.onyx` will search in the current directory for `file.onyx`. +void onyx_include_file(onyx_context_t *ctx, char *filename, int32_t length) { + if (length < 0) length = strlen(filename); + + AstInclude* load_node = create_load(&ctx->context, filename, -1); + add_entities_for_node(&ctx->context.entities, NULL, (AstNode *) load_node, ctx->context.global_scope, NULL); +} + +void onyx_add_mapped_dir(onyx_context_t *ctx, char *mapped_name, int32_t mapped_length, char *dir, int32_t dir_length) { + if (mapped_length < 0) mapped_length = strlen(mapped_name); + if (dir_length < 0) dir_length = strlen(dir); + + bh_arr_push(ctx->context.options->mapped_folders, ((bh_mapped_folder) { + bh_strdup_len(ctx->context.ast_alloc, mapped_name, mapped_length), + bh_strdup_len(ctx->context.ast_alloc, dir, dir_length) + })); +} + +/// Directly injects Onyx code as a new compilation unit +void onyx_inject_code(onyx_context_t *ctx, uint8_t *code, int32_t length) { + assert(0 && "unimplemented"); +} + +// +// Output +// + +int32_t onyx_error_count(onyx_context_t *ctx) { + return bh_arr_length(ctx->context.errors.errors); +} + +const char *onyx_error_message(onyx_context_t *ctx, int32_t error_idx) { + int32_t error_count = onyx_error_count(ctx); + if (error_idx < 0 || error_idx >= error_count) return NULL; + + return ctx->context.errors.errors[error_idx].text; +} + +const char *onyx_error_filename(onyx_context_t *ctx, int32_t error_idx) { + int32_t error_count = onyx_error_count(ctx); + if (error_idx < 0 || error_idx >= error_count) return NULL; + + return ctx->context.errors.errors[error_idx].pos.filename; +} + +int32_t onyx_error_line(onyx_context_t *ctx, int32_t error_idx) { + int32_t error_count = onyx_error_count(ctx); + if (error_idx < 0 || error_idx >= error_count) return 0; + + return ctx->context.errors.errors[error_idx].pos.line; +} + +int32_t onyx_error_column(onyx_context_t *ctx, int32_t error_idx) { + int32_t error_count = onyx_error_count(ctx); + if (error_idx < 0 || error_idx >= error_count) return 0; + + return ctx->context.errors.errors[error_idx].pos.column; +} + +int32_t onyx_error_length(onyx_context_t *ctx, int32_t error_idx) { + int32_t error_count = onyx_error_count(ctx); + if (error_idx < 0 || error_idx >= error_count) return 0; + + return ctx->context.errors.errors[error_idx].pos.length; +} + +int32_t onyx_error_line_text(onyx_context_t *ctx, int32_t error_idx, char *line_buffer, int max_length) { + int32_t error_count = onyx_error_count(ctx); + if (error_idx < 0 || error_idx >= error_count) return 0; + + int line_length = 0; + char *walker = ctx->context.errors.errors[error_idx].pos.line_start; + if (!walker) return 0; + + while (*walker && *walker++ != '\n') line_length++; + + if (line_buffer != NULL && max_length > 0) { + i32 to_copy = bh_min(max_length - 1, line_length); + memcpy(line_buffer, ctx->context.errors.errors[error_idx].pos.line_start, to_copy); + line_buffer[to_copy] = '\0'; + } + + return line_length; +} + +onyx_error_t onyx_error_rank(onyx_context_t *ctx, int32_t error_idx) { + int32_t error_count = onyx_error_count(ctx); + if (error_idx < 0 || error_idx >= error_count) return 0; + + return (onyx_error_t) ctx->context.errors.errors[error_idx].rank; +} + + +// +// Code Generation +// + +static void ensure_wasm_has_been_generated(onyx_context_t *ctx) { + if (ctx->context.generated_wasm_buffer.length == 0) { + if (onyx_has_errors(&ctx->context)) return; + onyx_wasm_module_write_to_buffer(ctx->context.wasm_module, &ctx->context.generated_wasm_buffer); + } +} + +static void ensure_js_has_been_generated(onyx_context_t *ctx) { + if (ctx->context.generated_js_buffer.data == NULL) { + if (onyx_has_errors(&ctx->context)) return; + onyx_wasm_module_write_js_partials_to_buffer(ctx->context.wasm_module, &ctx->context.generated_js_buffer); + } +} + +static void ensure_odoc_has_been_generated(onyx_context_t *ctx) { + if (ctx->context.generated_odoc_buffer.data == NULL) { + if (onyx_has_errors(&ctx->context)) return; + onyx_docs_generate_odoc(&ctx->context, &ctx->context.generated_odoc_buffer); + } +} + +static void ensure_osym_has_been_generated(onyx_context_t *ctx) { + if (ctx->context.generated_osym_buffer.data == NULL) { + if (onyx_has_errors(&ctx->context)) return; + onyx_docs_emit_symbol_info(&ctx->context, &ctx->context.generated_osym_buffer); + } +} + +int32_t onyx_output_length(onyx_context_t *ctx, onyx_output_type_t type) { + switch (type) { + case ONYX_OUTPUT_TYPE_WASM: + ensure_wasm_has_been_generated(ctx); + return ctx->context.generated_wasm_buffer.length; + + case ONYX_OUTPUT_TYPE_JS: + ensure_js_has_been_generated(ctx); + return ctx->context.generated_js_buffer.length; + + case ONYX_OUTPUT_TYPE_ODOC: + ensure_odoc_has_been_generated(ctx); + return ctx->context.generated_odoc_buffer.length; + + case ONYX_OUTPUT_TYPE_OSYM: + ensure_osym_has_been_generated(ctx); + return ctx->context.generated_osym_buffer.length; + } + + return 0; +} + +void onyx_output_write(onyx_context_t *ctx, onyx_output_type_t type, void *buffer) { + switch (type) { + case ONYX_OUTPUT_TYPE_WASM: + ensure_wasm_has_been_generated(ctx); + memcpy(buffer, ctx->context.generated_wasm_buffer.data, ctx->context.generated_wasm_buffer.length); + break; + + case ONYX_OUTPUT_TYPE_JS: + ensure_js_has_been_generated(ctx); + memcpy(buffer, ctx->context.generated_js_buffer.data, ctx->context.generated_js_buffer.length); + break; + + case ONYX_OUTPUT_TYPE_ODOC: + ensure_odoc_has_been_generated(ctx); + memcpy(buffer, ctx->context.generated_odoc_buffer.data, ctx->context.generated_odoc_buffer.length); + break; + + case ONYX_OUTPUT_TYPE_OSYM: + ensure_osym_has_been_generated(ctx); + memcpy(buffer, ctx->context.generated_osym_buffer.data, ctx->context.generated_osym_buffer.length); + break; + } +} + + +// +// Compilation Info +// + +int64_t onyx_stat(onyx_context_t *ctx, onyx_stat_t stat) { + switch (stat) { + case ONYX_STAT_FILE_COUNT: return bh_arr_length(ctx->context.loaded_files); + case ONYX_STAT_LINE_COUNT: return ctx->context.stats.lexer_lines_processed; + case ONYX_STAT_TOKEN_COUNT: return ctx->context.stats.lexer_tokens_processed; + default: return -1; + } +} + +const char *onyx_stat_filepath(onyx_context_t *ctx, int32_t file_index) { + if (file_index < 0 || file_index >= bh_arr_length(ctx->context.loaded_files)) { + return NULL; + } + + return ctx->context.loaded_files[file_index].filename; +} + + + +// +// Running WASM +// + +#ifdef ONYX_RUNTIME_LIBRARY +void onyx_run_wasm(void *buffer, int32_t buffer_length, int argc, char **argv) { + onyx_run_initialize(0, NULL); + + bh_buffer wasm_bytes; + wasm_bytes.data = buffer; + wasm_bytes.length = buffer_length; + + onyx_run_wasm_code(wasm_bytes, argc, argv); +} + +void onyx_run_wasm_with_debug(void *buffer, int32_t buffer_length, int argc, char **argv, char *socket_path) { + onyx_run_initialize(1, socket_path); + + bh_buffer wasm_bytes; + wasm_bytes.data = buffer; + wasm_bytes.length = buffer_length; + + onyx_run_wasm_code(wasm_bytes, argc, argv); +} +#else +void onyx_run_wasm(void *buffer, int32_t buffer_length, int argc, char **argv) { + printf("ERROR: Cannot run WASM code. No runtime was configured at the time Onyx was built"); +} + +void onyx_run_wasm_with_debug(void *buffer, int32_t buffer_length, int argc, char **argv, char *socket_path) { + printf("ERROR: Cannot run WASM code. No runtime was configured at the time Onyx was built"); +} +#endif + + + +// Internal procedures + +static AstInclude* create_load(Context *context, char* filename, int32_t length) { + static OnyxToken implicit_load_token = { '#', 1, 0, { 0, 0, 0, 0, 0 } }; + + AstInclude* include_node = onyx_ast_node_new(context->ast_alloc, sizeof(AstInclude), Ast_Kind_Load_File); + include_node->name = bh_strdup_len(context->ast_alloc, filename, length); + include_node->token = &implicit_load_token; + + return include_node; +} + +static void introduce_defined_variables(Context *context) { + bh_arr_each(DefinedVariable, var, context->defined_variables) { + create_and_add_defined_variable(context, var); + } +} + +static void create_and_add_defined_variable(Context *context, DefinedVariable *var) { + OnyxToken *value_token = bh_alloc_item(context->ast_alloc, OnyxToken); + value_token->text = var->value; + value_token->length = strlen(var->value); + + OnyxToken *name_token = bh_alloc_item(context->ast_alloc, OnyxToken); + name_token->text = var->key; + name_token->length = strlen(var->key); + + Package *p = package_lookup(context, "runtime.vars"); + assert(p); + + AstStrLit *value_node = make_string_literal(context, value_token); + add_entities_for_node(&context->entities, NULL, (AstNode *) value_node, NULL, NULL); + + AstBinding *binding = onyx_ast_node_new(context->ast_alloc, sizeof(AstBinding), Ast_Kind_Binding); + binding->token = name_token; + binding->node = (AstNode *) value_node; + + add_entities_for_node(&context->entities, NULL, (AstNode *) binding, p->scope, p); +} + + +static void link_wasm_module(Context *context) { + Package *runtime_var_package = package_lookup(context, "runtime.vars"); + assert(runtime_var_package); + + AstTyped *link_options_node = (AstTyped *) symbol_raw_resolve(context, runtime_var_package->scope, "link_options"); + Type *link_options_type = type_build_from_ast(context, context->builtins.link_options_type); + + assert(unify_node_and_type(context, &link_options_node, link_options_type) == TYPE_MATCH_SUCCESS); + + OnyxWasmLinkOptions link_opts; + // CLEANUP: Properly handle this case. + assert(onyx_wasm_build_link_options_from_node(context, &link_opts, link_options_node)); + + onyx_wasm_module_link(context, context->wasm_module, &link_opts); +} diff --git a/compiler/src/onyx.c b/compiler/src/onyx.c index 9e5bc27f5..e73308cce 100644 --- a/compiler/src/onyx.c +++ b/compiler/src/onyx.c @@ -1,761 +1,18 @@ -// #define BH_DEBUG -extern struct bh_allocator global_heap_allocator; - -#define STBDS_REALLOC(_,p,s) (bh_resize(global_heap_allocator, p, s)) -#define STBDS_FREE(_,p) (bh_free(global_heap_allocator, p)) - -#define BH_INTERNAL_ALLOCATOR (global_heap_allocator) - -#define BH_DEFINE -#define BH_NO_TABLE -#define STB_DS_IMPLEMENTATION -#include "bh.h" - -#include "lex.h" -#include "errors.h" -#include "parser.h" -#include "utils.h" -#include "wasm_emit.h" -#include "doc.h" - - -#define VERSION__(m,i,p) "v" #m "." #i "." #p -#define VERSION_(m,i,p) VERSION__(m,i,p) -#define VERSION VERSION_(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) - -#ifdef ONYX_RUNTIME_LIBRARY - #define ONYX_RUNTIME_LIBRARY_MAPPED ONYX_RUNTIME_LIBRARY -#else - #define ONYX_RUNTIME_LIBRARY_MAPPED none -#endif - -#define STRINGIFY_(x) #x -#define STRINGIFY(x) STRINGIFY_(x) - - -Context context; - -#define VERSION_STRING "Onyx toolchain version " VERSION "\n" \ - "Built on " __TIMESTAMP__ "\n" \ - "Runtime: " STRINGIFY(ONYX_RUNTIME_LIBRARY_MAPPED) "\n" - -#define DOCSTRING_HEADER VERSION_STRING \ - "\n" \ - "The toolchain for the Onyx programming language, created by Brendan Hansen.\n" \ - "\n" - - -static const char* top_level_docstring = DOCSTRING_HEADER - "Usage:\n" - "\tonyx \n" - "\n" - "Subcommands:\n" - "\thelp Shows this help message. Use \"onyx help \".\n" - "\tbuild Compiles an Onyx program into an executable.\n" -#ifdef ONYX_RUNTIME_LIBRARY - "\trun Compiles and runs an Onyx program, all at once.\n" -#endif - "\tcheck Checks syntax and types of an Onyx program.\n" -#ifdef _BH_LINUX - "\twatch Continuously rebuilds an Onyx program on file changes.\n" -#endif - "\tpackage Package manager\n" - "\tversion Prints version information\n"; - // "\tdoc \n" - -static const char *build_docstring = DOCSTRING_HEADER - "Usage:\n" - "\tonyx %s [-o target_file] OPTIONS\n" - "\n" - "Required:\n" - "\t One or more Onyx files to include in the program.\n" - "\n" - "Options:\n" - "\t-o Specify the target file (default: out.wasm).\n" - "\t --output \n" - "\t-I Include a directory in the search path.\n" - "\t--runtime, -r Specifies the runtime. Can be: onyx, wasi, js, custom.\n" - "\t (default: onyx)\n" - "\t--verbose, -V Verbose output.\n" - "\t -VV Very verbose output.\n" - "\t -VVV Very very verbose output (to be used by compiler developers).\n" - "\t--multi-threaded Enables multi-threading for this compilation.\n" - "\t Automatically enabled for \"onyx\" runtime.\n" - "\t--doc Generates an O-DOC file, a.k.a an Onyx documentation file. Used by onyx-doc-gen.\n" - "\t--tag Generates a C-Tag file.\n" - "\t--syminfo (DEPRECATED) Generates a symbol resolution information file. Used by onyx-lsp.\n" - "\t--lspinfo Generates an LSP information file. Used by onyx-lsp.\n" - "\t--stack-trace Enable dynamic stack trace.\n" - "\t--no-core Disable automatically including \"core/module\".\n" - "\t--no-stale-code Disables use of `#allow_stale_code` directive\n" - "\t--no-type-info Disables generating type information\n" - "\t--generate-method-info Populate method information in type information structures.\n" - "\t Can drastically increase binary size.\n" - "\t--generate-foreign-info Generate information for foreign blocks. Rarely needed, so disabled by default.\n" - "\t--wasm-mvp Use only WebAssembly MVP features.\n" - "\t--feature Enable an experimental language feature.\n" - "\n" - "Developer options:\n" - "\t--no-colors Disables colors in the error message.\n" - "\t--no-file-contents Disables '#file_contents' for security.\n" - "\t--error-format (v1|v2) Changes the output error format.\n" - "\t--show-all-errors Print all errors (can result in many consequencial errors from a single error)\n" - "\t--print-function-mappings Prints a mapping from WASM function index to source location.\n" - "\t--print-static-if-results Prints the conditional result of each #if statement. Useful for debugging.\n" - "\n"; - - -static CompileOptions compile_opts_parse(bh_allocator alloc, int argc, char *argv[]) { - CompileOptions options = { - .allocator = alloc, - .action = ONYX_COMPILE_ACTION_PRINT_HELP, - - .verbose_output = 0, - .fun_output = 0, - .print_function_mappings = 0, - .no_file_contents = 0, - - .use_post_mvp_features = 1, - .use_multi_threading = 0, - .generate_foreign_info = 0, - .generate_type_info = 1, - .generate_method_info = 0, - .no_core = 0, - .no_stale_code = 0, - .show_all_errors = 0, - - .enable_optional_semicolons = 0, - - .runtime = Runtime_Onyx, - - .files = NULL, - .target_file = "out.wasm", - - .documentation_file = NULL, - .symbol_info_file = NULL, - .help_subcommand = NULL, - - .defined_variables = NULL, - - .debug_info_enabled = 0, - - .passthrough_argument_count = 0, - .passthrough_argument_data = NULL, - - .generate_tag_file = 0, - .generate_symbol_info_file = 0, - .generate_lsp_info_file = 0, - - .running_perf = 0, - - .error_format = "v1", - }; - - bh_arr_new(alloc, options.files, 2); - bh_arr_new(alloc, options.included_folders, 2); - bh_arr_new(alloc, options.defined_variables, 2); - - char* core_installation = NULL; - - #if defined(_BH_LINUX) || defined(_BH_DARWIN) - core_installation = getenv("ONYX_PATH"); - - if (getenv("ONYX_ERROR_FORMAT")) { - options.error_format = getenv("ONYX_ERROR_FORMAT"); - } - #endif - #ifdef _BH_WINDOWS - char *tmp_core_installation = bh_alloc_array(alloc, u8, 512); - char *tmp_error_format = bh_alloc_array(alloc, u8, 512); - - if (GetEnvironmentVariableA("ONYX_PATH", tmp_core_installation, 512) > 0) { - core_installation = tmp_core_installation; - } - if (GetEnvironmentVariableA("ONYX_ERROR_FORMAT", tmp_error_format, 512) > 0) { - options.error_format = tmp_error_format; - } - #endif - - if (core_installation == NULL) { - bh_printf("error: ONYX_PATH environment variable is not set. Please set this to the location of your Onyx installation.\n"); - exit(1); - } - - // NOTE: Add the current folder - bh_arr_push(options.included_folders, core_installation); - bh_arr_push(options.included_folders, "."); - - if (argc == 1) return options; - i32 arg_parse_start = 1; - - if (!strcmp(argv[1], "help")) { - options.action = ONYX_COMPILE_ACTION_PRINT_HELP; - options.help_subcommand = argc > 2 ? argv[2] : NULL; - } - else if (!strcmp(argv[1], "version")) { - options.action = ONYX_COMPILE_ACTION_PRINT_VERSION; - goto skip_parsing_arguments; - } - else if (!strcmp(argv[1], "compile") || !strcmp(argv[1], "build")) { - options.action = ONYX_COMPILE_ACTION_COMPILE; - arg_parse_start = 2; - } - else if (!strcmp(argv[1], "check")) { - options.action = ONYX_COMPILE_ACTION_CHECK; - arg_parse_start = 2; - } - else if (!strcmp(argv[1], "pkg") || !strcmp(argv[1], "package")) { - options.action = ONYX_COMPILE_ACTION_RUN; - options.passthrough_argument_count = argc - 2; - options.passthrough_argument_data = &argv[2]; - options.generate_method_info = 1; // The package manager needs this to be enabled. - arg_parse_start = argc; - - bh_arr_push(options.files, bh_aprintf(alloc, "%s/tools/onyx-pkg.onyx", core_installation)); - goto skip_parsing_arguments; - } - #ifdef ONYX_RUNTIME_LIBRARY - else if (!strcmp(argv[1], "run")) { - options.action = ONYX_COMPILE_ACTION_RUN; - arg_parse_start = 2; - } - #endif - #ifdef _BH_LINUX - else if (!strcmp(argv[1], "watch")) { - options.action = ONYX_COMPILE_ACTION_WATCH; - arg_parse_start = 2; - } - // `#ifdef ONYX_RUNTIME_LIBRARY - // `else if (!strcmp(argv[1], "run-watch")) { - // ` options.action = ONYX_COMPILE_ACTION_RUN_WATCH; - // ` arg_parse_start = 2; - // `} - // `#endif - #endif - else { - char *script_filename = bh_aprintf(alloc, "%s/tools/%s.wasm", core_installation, argv[1]); - if (bh_file_exists(script_filename)) { - options.action = ONYX_COMPILE_ACTION_RUN_WASM; - options.target_file = script_filename; - - options.passthrough_argument_count = argc - 2; - options.passthrough_argument_data = &argv[2]; - goto skip_parsing_arguments; - } - - bh_printf("Unknown subcommand: '%s'\n", argv[1]); - bh_printf("Run \"onyx help\" for valid subcommands.\n"); - exit(1); - } - - if (options.action != ONYX_COMPILE_ACTION_PRINT_HELP) { - fori(i, arg_parse_start, argc) { - if (!strcmp(argv[i], "-o") || !strcmp(argv[i], "--output")) { - options.target_file = argv[++i]; - } - else if (!strcmp(argv[i], "--verbose") || !strcmp(argv[i], "-V")) { - options.verbose_output = 1; - } - else if (!strcmp(argv[i], "-VV")) { - options.verbose_output = 2; - } - else if (!strcmp(argv[i], "-VVV")) { - options.verbose_output = 3; - } - else if (!strcmp(argv[i], "--print-function-mappings")) { - options.print_function_mappings = 1; - } - else if (!strcmp(argv[i], "--print-static-if-results")) { - options.print_static_if_results = 1; - } - else if (!strcmp(argv[i], "--no-colors")) { - options.no_colors = 1; - } - else if (!strcmp(argv[i], "--no-file-contents")) { - options.no_file_contents = 1; - } - else if (!strcmp(argv[i], "--wasm-mvp")) { - options.use_post_mvp_features = 0; - } - else if (!strcmp(argv[i], "--multi-threaded")) { - options.use_multi_threading = 1; - } - else if (!strcmp(argv[i], "--generate-foreign-info")) { - options.generate_foreign_info = 1; - } - else if (!strcmp(argv[i], "--generate-method-info")) { - options.generate_method_info = 1; - } - else if (!strcmp(argv[i], "--no-type-info")) { - options.generate_type_info = 0; - } - else if (!strcmp(argv[i], "--no-core")) { - options.no_core = 1; - } - else if (!strcmp(argv[i], "--no-stale-code")) { - options.no_stale_code = 1; - } - else if (!strcmp(argv[i], "--show-all-errors")) { - options.show_all_errors = 1; - } - else if (!strcmp(argv[i], "--error-format")) { - options.error_format = argv[++i]; - } - else if (!strcmp(argv[i], "--feature")) { - char *next_arg = argv[++i]; - if (!strcmp(next_arg, "optional-semicolons")) { - options.enable_optional_semicolons = 1; - } - } - else if (!strcmp(argv[i], "-I")) { - bh_arr_push(options.included_folders, argv[++i]); - } - else if (!strncmp(argv[i], "-D", 2)) { - i32 len = strlen(argv[i]); - i32 j=2; - while (argv[i][j] != '=' && j < len) j++; - - if (j < len) argv[i][j] = '\0'; - - DefinedVariable dv; - dv.key = argv[i] + 2; - dv.value = argv[i] + j + 1; - bh_arr_push(options.defined_variables, dv); - } - else if (!strcmp(argv[i], "-r") || !strcmp(argv[i], "--runtime")) { - i += 1; - if (!strcmp(argv[i], "onyx")) options.runtime = Runtime_Onyx; - else if (!strcmp(argv[i], "wasi")) options.runtime = Runtime_Wasi; - else if (!strcmp(argv[i], "js")) options.runtime = Runtime_Js; - else if (!strcmp(argv[i], "custom")) options.runtime = Runtime_Custom; - else { - bh_printf("WARNING: '%s' is not a valid runtime. Defaulting to 'onyx'.\n", argv[i]); - options.runtime = Runtime_Onyx; - } - } - else if (!strcmp(argv[i], "--doc")) { - options.documentation_file = argv[++i]; - } - else if (!strcmp(argv[i], "--tag")) { - options.generate_tag_file = 1; - } - else if (!strcmp(argv[i], "--syminfo")) { - options.generate_symbol_info_file = 1; - options.symbol_info_file = argv[++i]; - } - else if (!strcmp(argv[i], "--lspinfo")) { - options.generate_symbol_info_file = 1; - options.generate_lsp_info_file = 1; - options.symbol_info_file = argv[++i]; - } - else if (!strcmp(argv[i], "--debug")) { - options.debug_session = 1; - options.debug_info_enabled = 1; - options.stack_trace_enabled = 1; - } - else if (!strcmp(argv[i], "--debug-info")) { - options.debug_info_enabled = 1; - options.stack_trace_enabled = 1; - } - else if (!strcmp(argv[i], "--stack-trace")) { - options.stack_trace_enabled = 1; - } - else if (!strcmp(argv[i], "--perf")) { - options.running_perf = 1; - } - else if (!strcmp(argv[i], "--")) { - options.passthrough_argument_count = argc - i - 1; - options.passthrough_argument_data = &argv[i + 1]; - break; - } -#if defined(_BH_LINUX) - // NOTE: Fun output is only enabled for Linux because Windows command line - // is not ANSI compatible and for a silly feature, I don't want to learn - // how to properly do arbitrary graphics in it. - else if (!strcmp(argv[i], "--fun") || !strcmp(argv[i], "-F")) { - options.fun_output = 1; - } -#endif - else { - if (bh_str_ends_with(argv[i], ".wasm") && options.action == ONYX_COMPILE_ACTION_RUN) { - if (bh_arr_length(options.files) > 0) { - bh_printf("Expected only one '.wasm', or multiple '.onyx' files to be given, not a mixture.\n"); - exit(1); - } - - options.action = ONYX_COMPILE_ACTION_RUN_WASM; - options.target_file = argv[i]; - - options.passthrough_argument_count = argc - i - 1; - options.passthrough_argument_data = &argv[i + 1]; - break; - - } else { - bh_arr_push(options.files, argv[i]); - } - } - } - } - - skip_parsing_arguments: - - // NOTE: Always enable multi-threading for the Onyx runtime. - if (options.runtime == Runtime_Onyx) { - options.use_multi_threading = 1; - } - - return options; -} - -static void compile_opts_free(CompileOptions* opts) { - bh_arr_free(opts->files); - bh_arr_free(opts->included_folders); -} - -static void print_subcommand_help(const char *subcommand) { - if (!strcmp(subcommand, "build") - || !strcmp(subcommand, "run") - || !strcmp(subcommand, "check") - || !strcmp(subcommand, "watch")) { - bh_printf(build_docstring, subcommand); - } - - else { - bh_printf("Unknown subcommand: '%s'\n", subcommand); - bh_printf("Run \"onyx help\" for valid subcommands.\n"); - exit(1); - } -} - - - -typedef enum CompilerProgress { - ONYX_COMPILER_PROGRESS_ERROR, - ONYX_COMPILER_PROGRESS_FAILED_OUTPUT, - ONYX_COMPILER_PROGRESS_SUCCESS -} CompilerProgress; - -static OnyxToken implicit_load_token = { '#', 1, 0, { 0, 0, 0, 0, 0 } }; -static AstInclude* create_load(bh_allocator alloc, char* filename) { - AstInclude* include_node = onyx_ast_node_new(alloc, sizeof(AstInclude), Ast_Kind_Load_File); - include_node->name = filename; - include_node->token = &implicit_load_token; - - return include_node; -} - -static void create_and_add_defined_variable(char *name, char *value) { - OnyxToken *value_token = bh_alloc_item(context.ast_alloc, OnyxToken); - value_token->text = value; - value_token->length = strlen(value); - - OnyxToken *name_token = bh_alloc_item(context.ast_alloc, OnyxToken); - name_token->text = name; - name_token->length = strlen(name); - - Package *p = package_lookup("runtime.vars"); - assert(p); - - AstStrLit *value_node = make_string_literal(context.ast_alloc, value_token); - add_entities_for_node(NULL, (AstNode *) value_node, NULL, NULL); - - AstBinding *binding = onyx_ast_node_new(context.ast_alloc, sizeof(AstBinding), Ast_Kind_Binding); - binding->token = name_token; - binding->node = (AstNode *) value_node; - - add_entities_for_node(NULL, (AstNode *) binding, p->scope, p); -} - -static void introduce_defined_variables() { - bh_arr_each(DefinedVariable, dv, context.options->defined_variables) { - create_and_add_defined_variable(dv->key, dv->value); - } -} - -// HACK -static u32 special_global_entities_remaining = 5; -static Entity *runtime_info_types_entity; -static Entity *runtime_info_foreign_entity; -static Entity *runtime_info_proc_tags_entity; -static Entity *runtime_info_global_tags_entity; -static Entity *runtime_info_stack_trace_entity; - -static void context_init(CompileOptions* opts) { - memset(&context, 0, sizeof context); - - types_init(); - prepare_builtins(); - - // HACK - special_global_entities_remaining = 5; - - context.options = opts; - context.cycle_detected = 0; - context.cycle_almost_detected = 0; - - OnyxFilePos internal_location = { 0 }; - internal_location.filename = ""; - internal_location.line = 1; - internal_location.column = 1; - context.global_scope = scope_create(global_heap_allocator, NULL, internal_location); - sh_new_arena(context.packages); - - // NOTE: This will be initialized upon the first call to entity_heap_insert. - context.entities.next_id = 0; - context.entities.entities = NULL; - - onyx_errors_init(&context.loaded_files); - - context.token_alloc = global_heap_allocator; - - // NOTE: Create the arena where tokens and AST nodes will exist - // Prevents nodes from being scattered across memory due to fragmentation - bh_arena_init(&context.ast_arena, global_heap_allocator, 16 * 1024 * 1024); // 16MB - context.ast_alloc = bh_arena_allocator(&context.ast_arena); - - context.wasm_module = bh_alloc_item(global_heap_allocator, OnyxWasmModule); - *context.wasm_module = onyx_wasm_module_create(global_heap_allocator); - - entity_heap_init(&context.entities); - - // NOTE: Add builtin entities to pipeline. - entity_heap_insert(&context.entities, ((Entity) { - .state = Entity_State_Parse_Builtin, - .type = Entity_Type_Load_File, - .package = NULL, - .include = create_load(context.ast_alloc, "core/builtin"), - })); - - entity_heap_insert(&context.entities, ((Entity) { - .state = Entity_State_Parse_Builtin, - .type = Entity_Type_Load_File, - .package = NULL, - .include = create_load(context.ast_alloc, "core/runtime/build_opts"), - })); - - if (context.options->runtime != Runtime_Custom) { - runtime_info_types_entity = entity_heap_insert(&context.entities, ((Entity) { - .state = Entity_State_Parse, - .type = Entity_Type_Load_File, - .package = NULL, - .include = create_load(context.ast_alloc, "core/runtime/info/types"), - })); - runtime_info_foreign_entity = entity_heap_insert(&context.entities, ((Entity) { - .state = Entity_State_Parse, - .type = Entity_Type_Load_File, - .package = NULL, - .include = create_load(context.ast_alloc, "core/runtime/info/foreign_blocks"), - })); - runtime_info_proc_tags_entity = entity_heap_insert(&context.entities, ((Entity) { - .state = Entity_State_Parse, - .type = Entity_Type_Load_File, - .package = NULL, - .include = create_load(context.ast_alloc, "core/runtime/info/proc_tags"), - })); - runtime_info_global_tags_entity = entity_heap_insert(&context.entities, ((Entity) { - .state = Entity_State_Parse, - .type = Entity_Type_Load_File, - .package = NULL, - .include = create_load(context.ast_alloc, "core/runtime/info/global_tags"), - })); - runtime_info_stack_trace_entity = entity_heap_insert(&context.entities, ((Entity) { - .state = Entity_State_Parse, - .type = Entity_Type_Load_File, - .package = NULL, - .include = create_load(context.ast_alloc, "core/runtime/info/stack_trace"), - })); - } - - builtin_heap_start.entity = NULL; - builtin_stack_top.entity = NULL; - builtin_tls_base.entity = NULL; - builtin_tls_size.entity = NULL; - builtin_closure_base.entity = NULL; - - add_entities_for_node(NULL, (AstNode *) &builtin_stack_top, context.global_scope, NULL); - add_entities_for_node(NULL, (AstNode *) &builtin_heap_start, context.global_scope, NULL); - add_entities_for_node(NULL, (AstNode *) &builtin_tls_base, context.global_scope, NULL); - add_entities_for_node(NULL, (AstNode *) &builtin_tls_size, context.global_scope, NULL); - add_entities_for_node(NULL, (AstNode *) &builtin_closure_base, context.global_scope, NULL); - add_entities_for_node(NULL, (AstNode *) &builtin_stack_trace, context.global_scope, NULL); - - // NOTE: Add all files passed by command line to the queue - bh_arr_each(const char *, filename, opts->files) { - AstInclude* load_node = create_load(context.ast_alloc, (char *) *filename); - add_entities_for_node(NULL, (AstNode *) load_node, context.global_scope, NULL); - } - - if (!context.options->no_core) { - entity_heap_insert(&context.entities, ((Entity) { - .state = Entity_State_Parse, - .type = Entity_Type_Load_File, - .package = NULL, - .include = create_load(context.ast_alloc, "core/module"), - })); - } - - if (context.options->generate_symbol_info_file) { - context.symbol_info = bh_alloc_item(global_heap_allocator, SymbolInfoTable); - bh_imap_init(&context.symbol_info->node_to_id, global_heap_allocator, 512); - bh_arr_new(global_heap_allocator, context.symbol_info->symbols, 128); - bh_arr_new(global_heap_allocator, context.symbol_info->symbols_resolutions, 128); - sh_new_arena(context.symbol_info->files); - } - - if (context.options->documentation_file) { - context.doc_info = bh_alloc_item(global_heap_allocator, OnyxDocInfo); - memset(context.doc_info, 0, sizeof(OnyxDocInfo)); - bh_arr_new(global_heap_allocator, context.doc_info->procedures, 128); - bh_arr_new(global_heap_allocator, context.doc_info->structures, 128); - bh_arr_new(global_heap_allocator, context.doc_info->enumerations, 128); - } - - if (context.options->verbose_output > 0) { - bh_printf("File search path:\n"); - bh_arr_each(const char *, p, context.options->included_folders) { - bh_printf("\t%s\n", *p); + if (context->options->verbose_output > 0) { + bh_printf("Mapped folders:\n"); + bh_arr_each(bh_mapped_folder, p, context->options->mapped_folders) { + bh_printf("\t%s: %s\n", p->name, p->folder); } bh_printf("\n"); } -} -static void context_free() { - bh_arena_free(&context.ast_arena); - bh_arr_free(context.loaded_files); -} - -static void parse_source_file(bh_file_contents* file_contents) { - // :Remove passing the allocators as parameters - OnyxTokenizer tokenizer = onyx_tokenizer_create(context.token_alloc, file_contents); - onyx_lex_tokens(&tokenizer); - - file_contents->line_count = tokenizer.line_number; - - OnyxParser parser = onyx_parser_create(context.ast_alloc, &tokenizer); - onyx_parse(&parser); - onyx_parser_free(&parser); -} -static b32 process_source_file(char* filename, OnyxFilePos error_pos) { - bh_arr_each(bh_file_contents, fc, context.loaded_files) { - // Duplicates are detected here and since these filenames will be the full path, - // string comparing them should be all that is necessary. - if (!strcmp(fc->filename, filename)) return 1; - } - - bh_file file; - bh_file_error err = bh_file_open(&file, filename); - if (err != BH_FILE_ERROR_NONE) { - if (context.cycle_detected) { - if (error_pos.filename == NULL) { - onyx_report_error(error_pos, Error_Command_Line_Arg, "Failed to open file %s", filename); - } else { - onyx_report_error(error_pos, Error_Critical, "Failed to open file %s", filename); - } - } - return 0; - } - - bh_file_contents fc = bh_file_read_contents(context.token_alloc, &file); - bh_file_close(&file); - - bh_arr_push(context.loaded_files, fc); - - if (context.options->verbose_output == 2) + if (context->options->verbose_output == 2) bh_printf("Processing source file: %s (%d bytes)\n", file.filename, fc.length); - parse_source_file(&bh_arr_last(context.loaded_files)); - return 1; -} - -static b32 process_load_entity(Entity* ent) { - assert(ent->type == Entity_Type_Load_File || ent->type == Entity_Type_Load_Path); - AstInclude* include = ent->include; - - if (include->kind == Ast_Kind_Load_File) { - // :RelativeFiles - const char* parent_file = include->token->pos.filename; - if (parent_file == NULL) parent_file = "."; - - char* parent_folder = bh_path_get_parent(parent_file, global_scratch_allocator); - - char* filename = bh_lookup_file(include->name, parent_folder, ".onyx", 1, context.options->included_folders, 1); - char* formatted_name = bh_strdup(global_heap_allocator, filename); - - return process_source_file(formatted_name, include->token->pos); - - } else if (include->kind == Ast_Kind_Load_All) { - const char* parent_file = include->token->pos.filename; - if (parent_file == NULL) parent_file = "."; - - char* parent_folder = bh_path_get_parent(parent_file, global_scratch_allocator); - char folder[512]; - if (bh_str_starts_with(include->name, "./")) { - bh_snprintf(folder, 511, "%s/%s", parent_folder, include->name + 2); - } else { - bh_snprintf(folder, 511, "%s", include->name); - } - - bh_path_convert_separators(folder); - // This does not take into account #load_path'd folders... - - bh_arr(char *) folders_to_process = NULL; - bh_arr_new(global_heap_allocator, folders_to_process, 2); - - bh_arr_push(folders_to_process, bh_strdup(global_scratch_allocator, folder)); - - while (bh_arr_length(folders_to_process) > 0) { - char *folder = bh_arr_pop(folders_to_process); - bh_dir dir = bh_dir_open(folder); - if (dir == NULL) { - onyx_report_error(include->token->pos, Error_Critical, "Could not find or open folder '%s'.", folder); - return 0; - } - - bh_dirent entry; - char fullpath[512]; - while (bh_dir_read(dir, &entry)) { - if (entry.type == BH_DIRENT_FILE && bh_str_ends_with(entry.name, ".onyx")) { - bh_snprintf(fullpath, 511, "%s/%s", folder, entry.name); - bh_path_convert_separators(fullpath); - - char* formatted_name = bh_path_get_full_name(fullpath, global_heap_allocator); - - AstInclude* new_include = onyx_ast_node_new(context.ast_alloc, sizeof(AstInclude), Ast_Kind_Load_File); - new_include->token = include->token; - new_include->name = formatted_name; - add_entities_for_node(NULL, (AstNode *) new_include, include->entity->scope, include->entity->package); - } - - if (entry.type == BH_DIRENT_DIRECTORY && include->recursive) { - if (!strcmp(entry.name, ".") || !strcmp(entry.name, "..")) continue; - - bh_snprintf(fullpath, 511, "%s/%s", folder, entry.name); - char* formatted_name = bh_path_get_full_name(fullpath, global_scratch_allocator); // Could this overflow the scratch allocator? - - bh_arr_push(folders_to_process, formatted_name); - } - } - - bh_dir_close(dir); - } - - return 1; - - } else if (include->kind == Ast_Kind_Load_Path) { - bh_arr_push(context.options->included_folders, include->name); - - } else if (include->kind == Ast_Kind_Library_Path) { - bh_arr_push(context.wasm_module->library_paths, include->name); - } - - return 1; -} - -static b32 process_entity(Entity* ent) { static char verbose_output_buffer[512]; - if (context.options->verbose_output == 3) { + if (context->options->verbose_output == 3) { if (ent->expr && ent->expr->token) snprintf(verbose_output_buffer, 511, "%20s | %24s (%d, %d) | %5d | %s:%i:%i \n", @@ -777,86 +34,18 @@ static b32 process_entity(Entity* ent) { (u32) ent->micro_attempts); } - EntityState before_state = ent->state; - switch (before_state) { - case Entity_State_Error: - if (ent->type != Entity_Type_Error) { - onyx_report_error(ent->expr->token->pos, Error_Critical, "Error entity unexpected. This is definitely a compiler bug"); - } else { - onyx_report_error(ent->error->token->pos, Error_Critical, "Static error occured: '%b'", ent->error->error_msg->text, ent->error->error_msg->length); - } - break; - - case Entity_State_Parse_Builtin: - process_load_entity(ent); - ent->state = Entity_State_Finalized; - break; - - case Entity_State_Introduce_Symbols: - // Currently, introducing symbols is handled in the symbol resolution - // function. Maybe there should be a different place where that happens? - symres_entity(ent); - break; - - case Entity_State_Parse: - if (!context.builtins_initialized) { - context.builtins_initialized = 1; - initialize_builtins(context.ast_alloc); - introduce_build_options(context.ast_alloc); - introduce_defined_variables(); - } - - // GROSS - if (special_global_entities_remaining == 0) { - special_global_entities_remaining--; - initalize_special_globals(); - } - - if (process_load_entity(ent)) { - // GROSS - if (ent == runtime_info_types_entity - || ent == runtime_info_proc_tags_entity - || ent == runtime_info_global_tags_entity - || ent == runtime_info_foreign_entity - || ent == runtime_info_stack_trace_entity) { - special_global_entities_remaining--; - } - - ent->state = Entity_State_Finalized; - } else { - ent->macro_attempts++; - } - break; - - case Entity_State_Resolve_Symbols: symres_entity(ent); break; - case Entity_State_Check_Types: check_entity(ent); break; - - case Entity_State_Code_Gen: { - if (context.options->action == ONYX_COMPILE_ACTION_CHECK) { - ent->state = Entity_State_Finalized; - break; - } - - emit_entity(ent); - break; - } - - default: break; - } - b32 changed = ent->state != before_state; - if (context.options->verbose_output == 3) { + if (context->options->verbose_output == 3) { if (changed) printf("SUCCESS to %20s | %s", entity_state_strings[ent->state], verbose_output_buffer); else printf("YIELD to %20s | %s", entity_state_strings[ent->state], verbose_output_buffer); } return changed; -} // Just having fun with some visual output - brendanfh 2020/12/14 -#if defined(_BH_LINUX) -static void output_dummy_progress_bar() { - EntityHeap* eh = &context.entities; +#if defined(_BH_LINUX) || defined(_BH_DARWIN) +static void output_dummy_progress_bar(Context *context) { + EntityHeap* eh = &context->entities; if (bh_arr_length(eh->entities) == 0) return; static const char* state_colors[] = { @@ -896,34 +85,13 @@ static void output_dummy_progress_bar() { } #endif -static void dump_cycles() { - context.cycle_detected = 1; - Entity* ent; - - while (1) { - ent = entity_heap_top(&context.entities); - entity_heap_remove_top(&context.entities); - if (ent->state < Entity_State_Code_Gen) process_entity(ent); - else break; - if (bh_arr_length(context.entities.entities) == 0) { - break; - } - } -} - -static i32 onyx_compile() { - u64 start_time = bh_time_curr(); - - if (context.options->fun_output) + if (context->options->fun_output) printf("\e[2J"); - while (!bh_arr_is_empty(context.entities.entities)) { - Entity* ent = entity_heap_top(&context.entities); - -#if defined(_BH_LINUX) - if (context.options->fun_output) { - output_dummy_progress_bar(); +#if defined(_BH_LINUX) || defined(_BH_DARWIN) + if (context->options->fun_output) { + output_dummy_progress_bar(context); if (ent->expr->token) { OnyxFilePos pos = ent->expr->token->pos; @@ -941,366 +109,29 @@ static i32 onyx_compile() { u64 nano_time = spec.tv_nsec + 1000000000 * (spec.tv_sec % 100); printf("%lu %d %d %d %d %d %d %d\n", nano_time, - bh_arr_length(context.entities.entities), - context.entities.state_count[Entity_State_Introduce_Symbols], - context.entities.state_count[Entity_State_Parse], - context.entities.state_count[Entity_State_Resolve_Symbols], - context.entities.state_count[Entity_State_Check_Types], - context.entities.state_count[Entity_State_Code_Gen], - context.entities.state_count[Entity_State_Finalized]); + bh_arr_length(context->entities.entities), + context->entities.state_count[Entity_State_Introduce_Symbols], + context->entities.state_count[Entity_State_Parse], + context->entities.state_count[Entity_State_Resolve_Symbols], + context->entities.state_count[Entity_State_Check_Types], + context->entities.state_count[Entity_State_Code_Gen], + context->entities.state_count[Entity_State_Finalized]); */ - // Mostly a preventative thing to ensure that even if somehow - // errors were left disabled, they are re-enabled in this cycle. - onyx_errors_enable(); - entity_heap_remove_top(&context.entities); - - u64 perf_start; - EntityType perf_entity_type; - EntityState perf_entity_state; - if (context.options->running_perf) { - perf_start = bh_time_curr_micro(); - perf_entity_type = ent->type; - perf_entity_state = ent->state; - } - - b32 changed = process_entity(ent); - - // NOTE: VERY VERY dumb cycle breaking. Basically, remember the first entity that did - // not change (i.e. did not make any progress). Then everytime an entity doesn't change, - // check if it is the same entity. If it is, it means all other entities that were processed - // between the two occurences didn't make any progress either, and there must be a cycle. - // - brendanfh 2021/02/06 - // - // Because of the recent changes to the compiler architecture (again), this condition - // does not always hold anymore. There can be nodes that get scheduled multiple times - // before the "key" node that will unblock the progress. This means a more sophisticated - // cycle detection algorithm must be used. - // - static Entity* watermarked_node = NULL; - static u32 highest_watermark = 0; - if (!changed) { - if (!watermarked_node) { - watermarked_node = ent; - highest_watermark = bh_max(highest_watermark, ent->macro_attempts); - } - else if (watermarked_node == ent) { - if (ent->macro_attempts > highest_watermark) { - entity_heap_insert_existing(&context.entities, ent); - - if (context.cycle_almost_detected == 3) { - dump_cycles(); - } else { - context.cycle_almost_detected += 1; - } - } - } - else if (watermarked_node->macro_attempts < ent->macro_attempts) { - watermarked_node = ent; - highest_watermark = bh_max(highest_watermark, ent->macro_attempts); - } - } else { - watermarked_node = NULL; - context.cycle_almost_detected = 0; - } - - if (onyx_has_errors()) { - onyx_errors_print(); - return ONYX_COMPILER_PROGRESS_ERROR; - } - - if (ent->state != Entity_State_Finalized && ent->state != Entity_State_Failed) - entity_heap_insert_existing(&context.entities, ent); - - if (context.options->running_perf) { - u64 perf_end = bh_time_curr_micro(); - - u64 duration = perf_end - perf_start; - context.microseconds_per_type[perf_entity_type] += duration; - context.microseconds_per_state[perf_entity_state] += duration; - } - } - - // - // There should not be any errors printing here, but there might be warnings. - onyx_errors_print(); - u64 duration = bh_time_duration(start_time); - if (context.options->verbose_output > 0) { - // TODO: Replace these with bh_printf when padded formatting is added. - printf("\nStatistics:\n"); - printf(" Time taken: %lf ms\n", (double) duration); - printf(" Processed %llu lines (%f lines/second).\n", context.lexer_lines_processed, ((f32) 1000 * context.lexer_lines_processed) / (duration)); - printf(" Processed %llu tokens (%f tokens/second).\n", context.lexer_tokens_processed, ((f32) 1000 * context.lexer_tokens_processed) / (duration)); - printf("\n"); - } - - if (context.options->generate_tag_file) { - onyx_docs_emit_tags("./tags"); + if (context->options->verbose_output > 0) { + printf("Type table size: %d bytes\n", context->wasm_module->type_info_size); } - if (context.options->generate_symbol_info_file) { - onyx_docs_emit_symbol_info(context.options->symbol_info_file); - } - - if (context.options->documentation_file != NULL) { - onyx_docs_emit_odoc(context.options->documentation_file); - } - - if (context.options->running_perf) { + if (context->options->running_perf) { fori (i, 0, Entity_State_Count) { - printf("| %27s | %10llu us |\n", entity_state_strings[i], context.microseconds_per_state[i]); + printf("| %27s | %10llu us |\n", entity_state_strings[i], context->stats.microseconds_per_state[i]); } printf("\n"); fori (i, 0, Entity_Type_Count) { - printf("| %27s | %10llu us |\n", entity_type_strings[i], context.microseconds_per_type[i]); + printf("| %27s | %10llu us |\n", entity_type_strings[i], context->stats.microseconds_per_type[i]); } printf("\n"); } - return ONYX_COMPILER_PROGRESS_SUCCESS; -} - -static void link_wasm_module() { - Package *runtime_var_package = package_lookup("runtime.vars"); - assert(runtime_var_package); - - AstTyped *link_options_node = (AstTyped *) symbol_raw_resolve(runtime_var_package->scope, "link_options"); - Type *link_options_type = type_build_from_ast(context.ast_alloc, builtin_link_options_type); - - assert(unify_node_and_type(&link_options_node, link_options_type) == TYPE_MATCH_SUCCESS); - - OnyxWasmLinkOptions link_opts; - // CLEANUP: Properly handle this case. - assert(onyx_wasm_build_link_options_from_node(&link_opts, link_options_node)); - - onyx_wasm_module_link(context.wasm_module, &link_opts); -} - -static CompilerProgress onyx_flush_module() { - link_wasm_module(); - - // NOTE: Output to file - bh_file output_file; - if (bh_file_create(&output_file, context.options->target_file) != BH_FILE_ERROR_NONE) - return ONYX_COMPILER_PROGRESS_FAILED_OUTPUT; - - if (context.options->verbose_output) - bh_printf("Outputting to WASM file: %s\n", output_file.filename); - - // APPARENTLY... the WebAssembly Threading proposal says that the data segment initializations - // in a WASM module are copied into the linear memory EVERY time the module is instantiated, not - // just the first time. This means that if we are happily chugging along and modifying global state - // and then we spawn a thread, that thread will completely wipe all changes to the global and return - // it to its original state. This is horrible obviously, but the only thing that is more horrible is - // that the best way around this is to create a second WASM module that simply initializes the given - // data section. Then have a section module that is actually your code. For right now, this is going - // to be fine since the browser is really the only place that multi-threading can be used to any - // degree of competency. But still... This is god awful and I hope that there is some other way to - // around this down the line. - if (context.options->use_multi_threading && !context.options->use_post_mvp_features) { - bh_file data_file; - if (bh_file_create(&data_file, bh_aprintf(global_scratch_allocator, "%s.data", context.options->target_file)) != BH_FILE_ERROR_NONE) - return ONYX_COMPILER_PROGRESS_FAILED_OUTPUT; - - OnyxWasmModule* data_module = bh_alloc_item(global_heap_allocator, OnyxWasmModule); - *data_module = onyx_wasm_module_create(global_heap_allocator); - - data_module->data = context.wasm_module->data; - context.wasm_module->data = NULL; - - onyx_wasm_module_write_to_file(data_module, data_file); - onyx_wasm_module_write_to_file(context.wasm_module, output_file); - - bh_file_close(&data_file); - } else { - onyx_wasm_module_write_to_file(context.wasm_module, output_file); - } - - bh_file_close(&output_file); - - return ONYX_COMPILER_PROGRESS_SUCCESS; -} - -#ifdef ONYX_RUNTIME_LIBRARY -static b32 onyx_run_module(bh_buffer code_buffer) { - onyx_run_initialize(context.options->debug_session); - - if (context.options->verbose_output > 0) - bh_printf("Running program:\n"); - - return onyx_run_wasm(code_buffer, context.options->passthrough_argument_count, context.options->passthrough_argument_data); -} - -static b32 onyx_run_wasm_file(const char *filename) { - bh_file_contents contents = bh_file_read_contents(bh_heap_allocator(), filename); - - bh_buffer code_buffer; - code_buffer.data = contents.data; - code_buffer.length = contents.length; - - return onyx_run_module(code_buffer); -} - -static b32 onyx_run() { - link_wasm_module(); - - bh_buffer code_buffer; - onyx_wasm_module_write_to_buffer(context.wasm_module, &code_buffer); - - return onyx_run_module(code_buffer); - -} -#endif - -static bh_managed_heap mh; - -CompilerProgress do_compilation(CompileOptions *compile_opts) { - bh_scratch_init(&global_scratch, bh_heap_allocator(), 256 * 1024); // NOTE: 256 KiB - global_scratch_allocator = bh_scratch_allocator(&global_scratch); - - bh_managed_heap_init(&mh); - global_heap_allocator = bh_managed_heap_allocator(&mh); - // global_heap_allocator = bh_heap_allocator(); - context_init(compile_opts); - - return onyx_compile(); -} - -void cleanup_compilation() { - context_free(); - - bh_scratch_free(&global_scratch); - bh_managed_heap_free(&mh); -} - -#ifdef _BH_LINUX - -#include - -static bh_file_watch watches; - -static void onyx_watch_stop(int sig) { - bh_file_watch_stop(&watches); -} - -static void onyx_watch(CompileOptions *compile_opts) { - signal(SIGINT, onyx_watch_stop); - - b32 running_watch = 1; - - do { - bh_printf("\e[2J\e[?25l\n"); - bh_printf("\e[3;1H"); - - if (do_compilation(compile_opts) == ONYX_COMPILER_PROGRESS_SUCCESS) { - onyx_flush_module(); - bh_printf("\e[92mNo errors.\n"); - } - - char time_buf[128] = {0}; - time_t now = time(NULL); - strftime(time_buf, 128, "%X", localtime(&now)); - bh_printf("\e[1;1H\e[30;105m Onyx " VERSION " \e[30;104m Built %s \e[0m", time_buf); - - i32 errors = bh_arr_length(context.errors.errors); - if (errors == 0) { - bh_printf("\e[30;102m Errors 0 \e[0m"); - } else { - bh_printf("\e[30;101m Error%s %d \e[0m", bh_num_plural(errors), errors); - } - - watches = bh_file_watch_new(); - - bh_arr_each(bh_file_contents, file, context.loaded_files) { - bh_file_watch_add(&watches, file->filename); - } - - cleanup_compilation(); - - if (!bh_file_watch_wait(&watches)) { - running_watch = 0; - } - - bh_file_watch_free(&watches); - } while(running_watch); - - - bh_printf("\e[2J\e[1;1H\e[?25h\n"); -} - -#endif - - - - -int main(int argc, char *argv[]) { - CompileOptions compile_opts = compile_opts_parse(bh_heap_allocator(), argc, argv); - - CompilerProgress compiler_progress = ONYX_COMPILER_PROGRESS_ERROR; - switch (compile_opts.action) { - case ONYX_COMPILE_ACTION_PRINT_HELP: { - if (compile_opts.help_subcommand) { - print_subcommand_help(compile_opts.help_subcommand); - } else { - bh_printf(top_level_docstring); - } - return 0; - } - - case ONYX_COMPILE_ACTION_PRINT_VERSION: { - bh_printf(VERSION_STRING); - return 0; - } - - case ONYX_COMPILE_ACTION_CHECK: - compiler_progress = do_compilation(&compile_opts); - break; - - case ONYX_COMPILE_ACTION_COMPILE: - compiler_progress = do_compilation(&compile_opts); - if (compiler_progress == ONYX_COMPILER_PROGRESS_SUCCESS) { - onyx_flush_module(); - } - break; - - #ifdef _BH_LINUX - case ONYX_COMPILE_ACTION_WATCH: - onyx_watch(&compile_opts); - break; - #endif - - #ifdef ONYX_RUNTIME_LIBRARY - case ONYX_COMPILE_ACTION_RUN: - compiler_progress = do_compilation(&compile_opts); - if (compiler_progress == ONYX_COMPILER_PROGRESS_SUCCESS) { - if (!onyx_run()) { - compiler_progress = ONYX_COMPILER_PROGRESS_ERROR; - } - } - break; - #endif - - #ifdef ONYX_RUNTIME_LIBRARY - case ONYX_COMPILE_ACTION_RUN_WASM: - global_heap_allocator = bh_heap_allocator(); - context_init(&compile_opts); - compiler_progress = ONYX_COMPILER_PROGRESS_SUCCESS; - if (!onyx_run_wasm_file(context.options->target_file)) { - compiler_progress = ONYX_COMPILER_PROGRESS_ERROR; - } - #endif - - default: break; - } - - if (compiler_progress == ONYX_COMPILER_PROGRESS_FAILED_OUTPUT) { - bh_printf_err("Failed to open file for writing: '%s'\n", compile_opts.target_file); - } - - cleanup_compilation(); - compile_opts_free(&compile_opts); - - return compiler_progress != ONYX_COMPILER_PROGRESS_SUCCESS; -} diff --git a/compiler/src/parser.c b/compiler/src/parser.c index a559fc19f..eca4c6f12 100644 --- a/compiler/src/parser.c +++ b/compiler/src/parser.c @@ -4,9 +4,6 @@ // and declarations to be introduced into scopes. #include "astnodes.h" -#undef BH_INTERNAL_ALLOCATOR -#define BH_INTERNAL_ALLOCATOR (global_heap_allocator) - #include "parser.h" #include "lex.h" #include "errors.h" @@ -21,13 +18,21 @@ static AstNode error_node = { Ast_Kind_Error, 0, NULL, NULL }; #define ENTITY_SUBMIT(node) (submit_entity_in_scope(parser, (AstNode *) (node), parser->current_scope, parser->package)) #define ENTITY_SUBMIT_IN_SCOPE(node, scope) (submit_entity_in_scope(parser, (AstNode *) (node), scope, parser->package)) +#undef ONYX_ERROR +#undef ONYX_WARNING +#define ONYX_ERROR(pos, rank, ...) (onyx_report_error(parser->context, (pos), (rank), __VA_ARGS__)) +#define ONYX_WARNING(pos, ...) (onyx_report_warning(parser->context, (pos), __VA_ARGS__)) + +#undef BH_INTERNAL_ALLOCATOR +#define BH_INTERNAL_ALLOCATOR (parser->context->gp_alloc) + void submit_entity_in_scope(OnyxParser* parser, AstNode* node, Scope* scope, Package* package) { if (bh_arr_length(parser->alternate_entity_placement_stack) == 0) { - add_entities_for_node(NULL, node, scope, package); + add_entities_for_node(&parser->context->entities, NULL, node, scope, package); } else { bh_arr(Entity *) *entity_array = bh_arr_last(parser->alternate_entity_placement_stack); - add_entities_for_node(entity_array, node, scope, package); + add_entities_for_node(&parser->context->entities, entity_array, node, scope, package); } } @@ -142,7 +147,7 @@ static OnyxToken* expect_token(OnyxParser* parser, TokenType token_type) { } if (token->type != token_type) { - onyx_report_error(token->pos, Error_Critical, "expected token '%s', got '%s'.", token_type_name(token_type), token_name(token)); + ONYX_ERROR(token->pos, Error_Critical, "expected token '%s', got '%s'.", token_type_name(token_type), token_name(token)); parser->hit_unexpected_token = 1; // :LinearTokenDependent parser->curr = &parser->tokenizer->tokens[bh_arr_length(parser->tokenizer->tokens) - 1]; @@ -203,7 +208,7 @@ static b32 next_tokens_are(OnyxParser* parser, i32 n, ...) { static void expect_no_stored_tags_pos(OnyxParser *parser, OnyxFilePos pos) { if (bh_arr_length(parser->stored_tags) > 0) { - onyx_report_error(pos, Error_Critical, "#tag is not allowed on this element."); + ONYX_ERROR(pos, Error_Critical, "#tag is not allowed on this element."); parser->hit_unexpected_token = 1; } } @@ -258,29 +263,159 @@ static void flush_stored_tags(OnyxParser *parser, bh_arr(AstTyped *) *out_arr) { *out_arr = arr; } +static void flush_doc_tokens(OnyxParser *parser, const char **out_string, OnyxToken **out_token) { + if (parser->last_documentation_token) { + if (out_token) *out_token = parser->last_documentation_token; + parser->last_documentation_token = NULL; + } + + if (bh_arr_is_empty(parser->documentation_tokens)) { + if (out_string) *out_string = ""; + return; + } + + if (!out_string) { + bh_arr_clear(parser->documentation_tokens); + return; + } + + // Build the full doc string from the tokens + + i32 doc_len = 0; + bh_arr_each(OnyxToken *, ptoken, parser->documentation_tokens) { + doc_len += (*ptoken)->length; + doc_len += 1; // For the \n + } + + bh_buffer str_buf; + bh_buffer_init(&str_buf, parser->allocator, doc_len + 1); // +1 for null byte + + bh_arr_each(OnyxToken *, ptoken, parser->documentation_tokens) { + bh_buffer_append(&str_buf, (*ptoken)->text, (*ptoken)->length); + bh_buffer_write_byte(&str_buf, '\n'); + } + bh_arr_clear(parser->documentation_tokens); + + bh_buffer_write_byte(&str_buf, 0); + *out_string = (const char *) str_buf.data; +} + + +static u64 parse_int_token(OnyxToken *int_token) { + u64 value = 0; + + token_toggle_end(int_token); + + char *buf = int_token->text; + i32 i = 0; + i64 base = 10; + + if (buf[0] == '0' && buf[1] == 'x') { + base = 16; + i = 2; + } + + while (i < int_token->length) { + char c = buf[i++]; + + if ('0' <= c && c <= '9') { value *= base; value += (c - '0'); } + if ('A' <= c && c <= 'Z') { value *= base; value += ((c - 'A') + 10); } + if ('a' <= c && c <= 'z') { value *= base; value += ((c - 'a') + 10); } + } + + token_toggle_end(int_token); + return value; +} + +static f64 parse_float_sign(char **s) { + if (**s == '-') { + *s += 1; + return -1; + } + + if (**s == '+') { + *s += 1; + return 1; + } + + return 1; +} + +static f64 parse_float_digit(char **s, i32 *digit_count) { + f64 value = 0; + while (**s) { + char c = **s; + if ('0' <= c && c <= '9') { + value = value * 10 + (c - '0'); + *s += 1; + *digit_count += 1; + + } + else if (c == '_') { *s += 1; continue; } + else { break; } + } + + return value; +} + +static f64 parse_float_token(OnyxToken *float_token) { + token_toggle_end(float_token); + + char *s = float_token->text; + i32 digit_count = 0; + + f64 sign = parse_float_sign(&s); + f64 value = parse_float_digit(&s, &digit_count); + + if (*s == '.') { + s++; + digit_count = 0; + f64 fraction = parse_float_digit(&s, &digit_count); + while (digit_count > 0) { + digit_count -= 1; + fraction /= 10; + } + value += fraction; + } + + value *= sign; + + if (*s == 'e') { + s++; + + digit_count = 0; + f64 exponent_sign = parse_float_sign(&s); + f64 exponent = parse_float_digit(&s, &digit_count); + + if (exponent_sign > 0) { + while (exponent > 0) { + value *= 10; + exponent -= 1; + } + } else { + while (exponent > 0) { + value /= 10; + exponent -= 1; + } + } + } + + token_toggle_end(float_token); + return value; +} -// TODO: Make parsing numeric literals not rely on the C standard libary. static AstNumLit* parse_int_literal(OnyxParser* parser) { AstNumLit* int_node = make_node(AstNumLit, Ast_Kind_NumLit); int_node->token = expect_token(parser, Token_Type_Literal_Integer); int_node->flags |= Ast_Flag_Comptime; - int_node->value.l = 0ll; - - token_toggle_end(int_node->token); - char* first_invalid = NULL; - i64 value = strtoll(int_node->token->text, &first_invalid, 0); - - int_node->value.l = value; - - int_node->type_node = (AstType *) &basic_type_int_unsized; + int_node->value.l = parse_int_token(int_node->token); + int_node->type_node = (AstType *) &parser->context->basic_types.type_int_unsized; // NOTE: Hex literals are unsigned. if (int_node->token->length >= 2 && int_node->token->text[1] == 'x') { int_node->was_hex_literal = 1; } - - token_toggle_end(int_node->token); return int_node; } @@ -288,21 +423,17 @@ static AstNumLit* parse_float_literal(OnyxParser* parser) { AstNumLit* float_node = make_node(AstNumLit, Ast_Kind_NumLit); float_node->token = expect_token(parser, Token_Type_Literal_Float); float_node->flags |= Ast_Flag_Comptime; - float_node->value.d = 0.0; - AstType* type = (AstType *) &basic_type_float_unsized; - token_toggle_end(float_node->token); + AstType* type = (AstType *) &parser->context->basic_types.type_float_unsized; + + float_node->value.d = parse_float_token(float_node->token); if (float_node->token->text[float_node->token->length - 1] == 'f') { - type = (AstType *) &basic_type_f32; - float_node->value.f = strtof(float_node->token->text, NULL); - } else { - float_node->value.d = strtod(float_node->token->text, NULL); + type = (AstType *) &parser->context->basic_types.type_f32; + float_node->value.f = float_node->value.d; } float_node->type_node = type; - - token_toggle_end(float_node->token); return float_node; } @@ -324,13 +455,25 @@ static b32 parse_possible_struct_literal(OnyxParser* parser, AstTyped* left, Ast sl->token = parser->curr; sl->stnode = left; - arguments_initialize(&sl->args); + arguments_initialize(parser->context, &sl->args); expect_token(parser, '.'); expect_token(parser, '{'); + if (consume_token_if_next(parser, Token_Type_Dot_Dot)) { + sl->extension_value = parse_expression(parser, 0); + + if (peek_token(0)->type != '}') { + expect_token(parser, ','); + } + } + parse_arguments(parser, '}', &sl->args); + if (sl->extension_value && bh_arr_length(sl->args.values) > 0) { + ONYX_ERROR(sl->token->pos, Error_Critical, "All initializers must be named when using '..value' in a struct literal."); + } + *ret = (AstTyped *) sl; return 1; } @@ -342,7 +485,7 @@ static b32 parse_possible_array_literal(OnyxParser* parser, AstTyped* left, AstT al->token = parser->curr; al->atnode = left; - bh_arr_new(global_heap_allocator, al->values, 4); + bh_arr_new(parser->context->gp_alloc, al->values, 4); fori (i, 0, 4) al->values[i] = NULL; expect_token(parser, '.'); @@ -402,39 +545,66 @@ static void parse_arguments(OnyxParser* parser, TokenType end_token, Arguments* } } +static b32 value_is_placeholder(AstTyped *arg) { + if (arg->kind != Ast_Kind_Symbol) return 0; + if (arg->token->length > 1) return 0; + if (arg->token->text[0] != '_') return 0; + return 1; +} + static AstCall* parse_function_call(OnyxParser *parser, AstTyped *callee) { AstCall* call_node = make_node(AstCall, Ast_Kind_Call); call_node->token = expect_token(parser, '('); call_node->callee = callee; - arguments_initialize(&call_node->args); + arguments_initialize(parser->context, &call_node->args); parse_arguments(parser, ')', &call_node->args); - while (consume_token_if_next(parser, '!')) { - AstCodeBlock* code_block = make_node(AstCodeBlock, Ast_Kind_Code_Block); - code_block->token = parser->curr; - code_block->type_node = builtin_code_type; + // while (consume_token_if_next(parser, '!')) { + // AstCodeBlock* code_block = make_node(AstCodeBlock, Ast_Kind_Code_Block); + // code_block->token = parser->curr; + // code_block->type_node = parser->context->builtins.code_type; - code_block->code = (AstNode *) parse_block(parser, 1, NULL); - ((AstBlock *) code_block->code)->rules = Block_Rule_Code_Block; - bh_arr_push(call_node->args.values, (AstTyped *) code_block); - } + // code_block->code = (AstNode *) parse_block(parser, 1, NULL); + // ((AstBlock *) code_block->code)->rules = Block_Rule_Code_Block; + // bh_arr_push(call_node->args.values, (AstTyped *) code_block); + // } // Wrap expressions in AstArgument bh_arr_each(AstTyped *, arg, call_node->args.values) { if ((*arg) == NULL) continue; - *arg = (AstTyped *) make_argument(parser->allocator, *arg); + + if (value_is_placeholder(*arg)) { + if (call_node->placeholder_argument_position > 0) { + ONYX_ERROR((*arg)->token->pos, Error_Critical, "Cannot have more than one placeholder argument ('_')."); + } + + call_node->placeholder_argument_position = (arg - call_node->args.values) + 1; + *arg = NULL; + } else { + *arg = (AstTyped *) make_argument(parser->context, *arg); + } } bh_arr_each(AstNamedValue *, named_value, call_node->args.named_values) { if ((*named_value)->value == NULL) continue; - (*named_value)->value = (AstTyped *) make_argument(parser->allocator, (AstTyped *) (*named_value)->value); + (*named_value)->value = (AstTyped *) make_argument(parser->context, (AstTyped *) (*named_value)->value); } return call_node; } +static b32 parse_placeholder(OnyxParser* parser) { + OnyxToken *sym = peek_token(0); + if (sym->type != Token_Type_Symbol) return 0; + if (sym->length != 1) return 0; + if (sym->text[0] != '_') return 0; + + consume_token(parser); + return 1; +} + static AstTyped* parse_factor(OnyxParser* parser) { AstTyped* retval = NULL; @@ -511,7 +681,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { case '.': { if (parse_possible_struct_literal(parser, NULL, &retval)) return retval; if (parse_possible_array_literal(parser, NULL, &retval)) return retval; - if (parse_possible_unary_field_access(parser, &retval)) return retval; + if (parse_possible_unary_field_access(parser, &retval)) break; goto no_match; } @@ -551,7 +721,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { AstSizeOf* so_node = make_node(AstSizeOf, Ast_Kind_Size_Of); so_node->token = expect_token(parser, Token_Type_Keyword_Sizeof); so_node->so_ast_type = (AstType *) parse_type(parser); - so_node->type_node = (AstType *) &basic_type_i32; + so_node->type_node = (AstType *) &parser->context->basic_types.type_i32; retval = (AstTyped *) so_node; break; @@ -561,7 +731,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { AstAlignOf* ao_node = make_node(AstAlignOf, Ast_Kind_Align_Of); ao_node->token = expect_token(parser, Token_Type_Keyword_Alignof); ao_node->ao_ast_type = (AstType *) parse_type(parser); - ao_node->type_node = (AstType *) &basic_type_i32; + ao_node->type_node = (AstType *) &parser->context->basic_types.type_i32; retval = (AstTyped *) ao_node; break; @@ -609,7 +779,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { case Token_Type_Literal_True: { AstNumLit* bool_node = make_node(AstNumLit, Ast_Kind_NumLit); - bool_node->type_node = (AstType *) &basic_type_bool; + bool_node->type_node = (AstType *) &parser->context->basic_types.type_bool; bool_node->token = expect_token(parser, Token_Type_Literal_True); bool_node->value.i = 1; bool_node->flags |= Ast_Flag_Comptime; @@ -619,7 +789,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { case Token_Type_Literal_False: { AstNumLit* bool_node = make_node(AstNumLit, Ast_Kind_NumLit); - bool_node->type_node = (AstType *) &basic_type_bool; + bool_node->type_node = (AstType *) &parser->context->basic_types.type_bool; bool_node->token = expect_token(parser, Token_Type_Literal_False); bool_node->value.i = 0; bool_node->flags |= Ast_Flag_Comptime; @@ -629,7 +799,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { case Token_Type_Keyword_Package: { if (!parser->allow_package_expressions) - onyx_report_warning(peek_token(-1)->pos, "Use of deprecated feature: package expression."); + ONYX_WARNING(peek_token(-1)->pos, "Use of deprecated feature: package expression."); retval = (AstTyped *) parse_package_expression(parser); break; @@ -644,7 +814,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { OnyxToken* do_token = expect_token(parser, Token_Type_Keyword_Do); AstDoBlock* do_block = make_node(AstDoBlock, Ast_Kind_Do_Block); do_block->token = do_token; - do_block->type_node = (AstType *) &basic_type_auto_return; + do_block->type_node = (AstType *) &parser->context->basic_types.type_auto_return; if (consume_token_if_next(parser, Token_Type_Right_Arrow)) { do_block->type_node = parse_type(parser); @@ -654,7 +824,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { AstBlock *tmp_block = make_node(AstBlock, Ast_Kind_Block); tmp_block->token = do_token; - tmp_block->binding_scope = scope_create(parser->allocator, parser->current_scope, parser->curr->pos); + tmp_block->binding_scope = scope_create(parser->context, parser->current_scope, parser->curr->pos); tmp_block->binding_scope->name = ""; parser->current_scope = tmp_block->binding_scope; @@ -685,10 +855,10 @@ static AstTyped* parse_factor(OnyxParser* parser) { AstCodeBlock* code_block = make_node(AstCodeBlock, Ast_Kind_Code_Block); code_block->token = expect_token(parser, '['); - assert(builtin_code_type != NULL); - code_block->type_node = builtin_code_type; + assert(parser->context->builtins.code_type != NULL); + code_block->type_node = parser->context->builtins.code_type; - bh_arr_new(global_heap_allocator, code_block->binding_symbols, 4); + bh_arr_new(parser->context->gp_alloc, code_block->binding_symbols, 4); while (!consume_token_if_next(parser, ']')) { if (parser->hit_unexpected_token) return (AstTyped *) code_block; @@ -714,6 +884,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { // :fallthrough } + case Token_Type_Keyword_Struct: case '?': { AstType *type = parse_type(parser); retval = (AstTyped *) type; @@ -729,16 +900,16 @@ static AstTyped* parse_factor(OnyxParser* parser) { case Token_Type_Literal_Char: { AstNumLit* char_lit = make_node(AstNumLit, Ast_Kind_NumLit); char_lit->flags |= Ast_Flag_Comptime; - char_lit->type_node = (AstType *) &basic_type_int_unsized; + char_lit->type_node = (AstType *) &parser->context->basic_types.type_int_unsized; char_lit->token = expect_token(parser, Token_Type_Literal_Char); char_lit->was_char_literal = 1; - i8 dest = '\0'; + char dest[2]; i32 length = string_process_escape_seqs((char *) &dest, char_lit->token->text, 1); - char_lit->value.i = (u32) dest; + char_lit->value.i = (u32) dest[0]; if (length != 1) { - onyx_report_error(char_lit->token->pos, Error_Critical, "Expected only a single character in character literal."); + ONYX_ERROR(char_lit->token->pos, Error_Critical, "Expected only a single character in character literal."); } retval = (AstTyped *) char_lit; @@ -750,7 +921,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { AstFileContents* fc = make_node(AstFileContents, Ast_Kind_File_Contents); fc->token = parser->prev - 1; fc->filename_expr = parse_expression(parser, 0); - fc->type = type_make_slice(parser->allocator, &basic_types[Basic_Kind_U8]); + fc->type = type_make_slice(parser->context, parser->context->types.basic[Basic_Kind_U8]); if (parser->current_function_stack && bh_arr_length(parser->current_function_stack) > 0) { bh_arr_push(bh_arr_last(parser->current_function_stack)->nodes_that_need_entities_after_clone, (AstNode *) fc); @@ -766,7 +937,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { OnyxToken* dir_token = parser->curr - 2; OnyxToken* str_token = bh_alloc(parser->allocator, sizeof(OnyxToken)); - str_token->text = bh_strdup(global_heap_allocator, (char *) dir_token->pos.filename); + str_token->text = bh_strdup(parser->context->gp_alloc, (char *) dir_token->pos.filename); str_token->length = strlen(dir_token->pos.filename); str_token->pos = dir_token->pos; str_token->type = Token_Type_Literal_String; @@ -782,30 +953,30 @@ static AstTyped* parse_factor(OnyxParser* parser) { else if (parse_possible_directive(parser, "line")) { OnyxToken* dir_token = parser->curr - 2; - AstNumLit* line_num = make_int_literal(parser->allocator, dir_token->pos.line); + AstNumLit* line_num = make_int_literal(parser->context, dir_token->pos.line); retval = (AstTyped *) line_num; break; } else if (parse_possible_directive(parser, "column")) { OnyxToken* dir_token = parser->curr - 2; - AstNumLit* col_num = make_int_literal(parser->allocator, dir_token->pos.column); + AstNumLit* col_num = make_int_literal(parser->context, dir_token->pos.column); retval = (AstTyped *) col_num; break; } else if (parse_possible_directive(parser, "char")) { AstNumLit* char_lit = make_node(AstNumLit, Ast_Kind_NumLit); char_lit->flags |= Ast_Flag_Comptime; - char_lit->type_node = (AstType *) &basic_type_int_unsized; + char_lit->type_node = (AstType *) &parser->context->basic_types.type_int_unsized; char_lit->token = expect_token(parser, Token_Type_Literal_String); char_lit->was_char_literal = 1; - i8 dest = '\0'; + char dest[2]; i32 length = string_process_escape_seqs((char *) &dest, char_lit->token->text, 1); - char_lit->value.i = (u32) dest; + char_lit->value.i = (u32) dest[0]; if (length != 1) { - onyx_report_error(char_lit->token->pos, Error_Critical, "Expected only a single character in character literal."); + ONYX_ERROR(char_lit->token->pos, Error_Critical, "Expected only a single character in character literal."); } retval = (AstTyped *) char_lit; @@ -826,7 +997,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { solid->poly_proc = (AstFunction *) parse_factor(parser); solid->known_polyvars = NULL; - bh_arr_new(global_heap_allocator, solid->known_polyvars, 2); + bh_arr_new(parser->context->gp_alloc, solid->known_polyvars, 2); expect_token(parser, '{'); while (!consume_token_if_next(parser, '}')) { @@ -856,7 +1027,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { AstDirectiveDefined* defined = make_node(AstDirectiveDefined, Ast_Kind_Directive_Defined); // :LinearTokenDependent defined->token = parser->curr - 1; - defined->type_node = (AstType *) &basic_type_bool; + defined->type_node = (AstType *) &parser->context->basic_types.type_bool; parser->allow_package_expressions = 1; expect_token(parser, '('); @@ -883,7 +1054,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { parser->parse_calls = 1; if (consume_token_if_next(parser, '(')) { - bh_arr_new(global_heap_allocator, insert->binding_exprs, 4); + bh_arr_new(parser->context->gp_alloc, insert->binding_exprs, 4); while (!consume_token_if_next(parser, ')')) { if (parser->hit_unexpected_token) break; @@ -914,7 +1085,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { else if (parse_possible_directive(parser, "first")) { AstDirectiveFirst *first = make_node(AstDirectiveFirst, Ast_Kind_Directive_First); first->token = parser->curr - 1; - first->type_node = (AstType *) &basic_type_bool; + first->type_node = (AstType *) &parser->context->basic_types.type_bool; retval = (AstTyped *) first; break; @@ -923,7 +1094,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { AstDirectiveExportName *export_name = make_node(AstDirectiveExportName, Ast_Kind_Directive_Export_Name); export_name->token = parser->curr - 1; export_name->func = (AstFunction *) parse_factor(parser); - export_name->type_node = builtin_string_type; + export_name->type_node = parser->context->builtins.string_type; retval = (AstTyped *) export_name; break; @@ -931,7 +1102,7 @@ static AstTyped* parse_factor(OnyxParser* parser) { else if (parse_possible_directive(parser, "this_package")) { AstPackage *this_package = make_node(AstPackage, Ast_Kind_Directive_This_Package); this_package->token = parser->curr - 1; - this_package->type_node = builtin_package_id_type; + this_package->type_node = parser->context->builtins.package_id_type; ENTITY_SUBMIT(this_package); retval = (AstTyped *) this_package; @@ -939,20 +1110,20 @@ static AstTyped* parse_factor(OnyxParser* parser) { } else if (parse_possible_directive(parser, "Self")) { if (parser->injection_point == NULL) { - onyx_report_error((parser->curr - 2)->pos, Error_Critical, "#Self is only allowed in an #inject block."); + ONYX_ERROR((parser->curr - 2)->pos, Error_Critical, "#Self is only allowed in an #inject block."); } retval = (AstTyped *) parser->injection_point; break; } - onyx_report_error(parser->curr->pos, Error_Critical, "Invalid directive in expression."); + ONYX_ERROR(parser->curr->pos, Error_Critical, "Invalid directive in expression."); return NULL; } default: no_match: - onyx_report_error(parser->curr->pos, Error_Critical, "Unexpected token '%s'.", token_name(parser->curr)); + ONYX_ERROR(parser->curr->pos, Error_Critical, "Unexpected token '%s'.", token_name(parser->curr)); return NULL; } @@ -1014,6 +1185,17 @@ static AstTyped* parse_factor(OnyxParser* parser) { break; } + case '!': { + AstUnaryOp* unop = make_node(AstUnaryOp, Ast_Kind_Unary_Op); + unop->token = expect_token(parser, '!'); + unop->operation = Unary_Op_Unwrap; + + unop->expr = retval; + + retval = (AstTyped *) unop; + break; + } + case Token_Type_Inserted_Semicolon: { // // This is a special case for -> method calls because they should be able to be split across @@ -1036,11 +1218,11 @@ static AstTyped* parse_factor(OnyxParser* parser) { method_call->left = retval; OnyxToken *method_name = expect_token(parser, Token_Type_Symbol); - AstNode *method = make_symbol(context.ast_alloc, method_name); + AstNode *method = make_symbol(parser->context, method_name); if (parser->curr->type != '(') { // CLEANUP: This error message is horrendous. - onyx_report_error(parser->curr->pos, Error_Critical, "Bad method call. Expected object->method(arguments), got something else."); + ONYX_ERROR(parser->curr->pos, Error_Critical, "Bad method call. Expected object->method(arguments), got something else."); break; } @@ -1050,6 +1232,17 @@ static AstTyped* parse_factor(OnyxParser* parser) { break; } + case Token_Type_Proc_Macro_Body: { + OnyxToken *tkn = expect_token(parser, Token_Type_Proc_Macro_Body); + AstProceduralExpansion *proc_expand = make_node(AstProceduralExpansion, Ast_Kind_Procedural_Expansion); + proc_expand->token = tkn - 1; + proc_expand->expansion_body = tkn; + proc_expand->proc_macro = retval; + + retval = (AstTyped *) proc_expand; + break; + } + default: goto factor_parsed; } } @@ -1149,6 +1342,7 @@ static BinaryOp binary_op_from_token_type(TokenType t) { case Token_Type_Pipe: return Binary_Op_Pipe; case Token_Type_Dot_Dot: return Binary_Op_Range; + case Token_Type_Dot_Dot_Equal: return Binary_Op_Range_Equal; case '[': return Binary_Op_Subscript; case Token_Type_Question_Question: return Binary_Op_Coalesce; default: return Binary_Op_Count; @@ -1159,8 +1353,20 @@ static BinaryOp binary_op_from_current_token(OnyxParser *parser) { BinaryOp op = binary_op_from_token_type(parser->curr->type); if (op == Binary_Op_Count && parser->curr->type == Token_Type_Inserted_Semicolon) { - if (peek_token(1)->type == Token_Type_Pipe) { + int n = 1; + + while (peek_token(n)->type == Token_Type_Comment) { + n++; + } + + if (peek_token(n)->type == Token_Type_Pipe) { + // This is a slight hack. Though we have peeked ahead n tokens in order + // to skip the potential comments, `consume_token` will eat the comments + // automatically, so we don't need to call `consume_token` n times, just + // once. + // - brendanfh, 2024/09/25 consume_token(parser); + op = Binary_Op_Pipe; } } @@ -1187,7 +1393,7 @@ static AstTyped* parse_compound_expression(OnyxParser* parser, b32 assignment_al AstCompound* compound = make_node(AstCompound, Ast_Kind_Compound); compound->token = parser->curr; - bh_arr_new(global_heap_allocator, compound->exprs, 2); + bh_arr_new(parser->context->gp_alloc, compound->exprs, 2); bh_arr_push(compound->exprs, first); while (consume_token_if_next(parser, ',')) { @@ -1210,7 +1416,7 @@ static AstTyped* parse_compound_expression(OnyxParser* parser, b32 assignment_al static AstTyped* parse_expression(OnyxParser* parser, b32 assignment_allowed) { bh_arr(AstBinaryOp*) tree_stack = NULL; - bh_arr_new(global_heap_allocator, tree_stack, 4); + bh_arr_new(parser->context->gp_alloc, tree_stack, 4); bh_arr_set_length(tree_stack, 0); AstTyped* left = parse_factor(parser); @@ -1257,6 +1463,10 @@ static AstTyped* parse_expression(OnyxParser* parser, b32 assignment_allowed) { AstBinaryOp* bin_op; if (bin_op_kind == Binary_Op_Pipe) bin_op = make_node(AstBinaryOp, Ast_Kind_Pipe); else if (bin_op_kind == Binary_Op_Range) bin_op = (AstBinaryOp *) make_node(AstRangeLiteral, Ast_Kind_Range_Literal); + else if (bin_op_kind == Binary_Op_Range_Equal) { + bin_op = (AstBinaryOp *) make_node(AstRangeLiteral, Ast_Kind_Range_Literal); + ((AstRangeLiteral *) bin_op)->inclusive = 1; + } else bin_op = make_node(AstBinaryOp, Ast_Kind_Binary_Op); bin_op->token = bin_op_tok; @@ -1421,7 +1631,7 @@ static AstFor* parse_for_stmt(OnyxParser* parser) { || next_tokens_are(parser, 2, Token_Type_Symbol, ',') ) { for_node->var = make_local( - parser->allocator, + parser->context, expect_token(parser, Token_Type_Symbol), NULL ); @@ -1432,9 +1642,9 @@ static AstFor* parse_for_stmt(OnyxParser* parser) { if (consume_token_if_next(parser, ',')) { for_node->index_var = make_local( - parser->allocator, + parser->context, expect_token(parser, Token_Type_Symbol), - (AstType *) &basic_type_u32 + (AstType *) &parser->context->basic_types.type_u32 ); if (consume_token_if_next(parser, ':')) { @@ -1448,7 +1658,7 @@ static AstFor* parse_for_stmt(OnyxParser* parser) { static char it_name[] = "it "; static OnyxToken it_token = { Token_Type_Symbol, 2, it_name, { 0 } }; - AstLocal* var_node = make_local(parser->allocator, &it_token, NULL); + AstLocal* var_node = make_local(parser->context, &it_token, NULL); for_node->var = var_node; } @@ -1462,11 +1672,14 @@ static AstSwitchCase* parse_case_stmt(OnyxParser* parser) { AstSwitchCase *sc_node = make_node(AstSwitchCase, Ast_Kind_Switch_Case); sc_node->token = expect_token(parser, Token_Type_Keyword_Case); - if (parse_possible_directive(parser, "default")) { + if ( + parse_possible_directive(parser, "default") || + parse_placeholder(parser) + ) { sc_node->is_default = 1; } else { - bh_arr_new(global_heap_allocator, sc_node->values, 1); + bh_arr_new(parser->context->gp_alloc, sc_node->values, 1); parser->parse_quick_functions = 0; AstTyped* value = parse_expression(parser, 1); @@ -1491,7 +1704,7 @@ static AstSwitchCase* parse_case_stmt(OnyxParser* parser) { is_pointer = 1; OnyxToken *capture_symbol = expect_token(parser, Token_Type_Symbol); - AstLocal *capture = make_local(parser->allocator, capture_symbol, NULL); + AstLocal *capture = make_local(parser->context, capture_symbol, NULL); sc_node->capture = capture; sc_node->capture_is_by_pointer = is_pointer; @@ -1557,7 +1770,7 @@ static i32 parse_possible_compound_symbol_declaration(OnyxParser* parser, AstNod // At this point, we are sure it is a compound declaration. AstCompound* local_compound = make_node(AstCompound, Ast_Kind_Compound); - bh_arr_new(global_heap_allocator, local_compound->exprs, token_offset / 2); + bh_arr_new(parser->context->gp_alloc, local_compound->exprs, token_offset / 2); AstLocal* first_local = NULL; AstLocal* prev_local = NULL; @@ -1566,11 +1779,11 @@ static i32 parse_possible_compound_symbol_declaration(OnyxParser* parser, AstNod if (parser->hit_unexpected_token) return 1; OnyxToken* local_sym = expect_token(parser, Token_Type_Symbol); - AstNode* sym_node = make_symbol(parser->allocator, local_sym); + AstNode* sym_node = make_symbol(parser->context, local_sym); bh_arr_push(local_compound->exprs, (AstTyped *) sym_node); if (!consume_token_if_next(parser, '~')) { - AstLocal* new_local = make_local(parser->allocator, local_sym, NULL); + AstLocal* new_local = make_local(parser->context, local_sym, NULL); if (prev_local == NULL) { first_local = new_local; } else { @@ -1584,19 +1797,17 @@ static i32 parse_possible_compound_symbol_declaration(OnyxParser* parser, AstNod expect_token(parser, ':'); - if (parser->curr->type == '=') { - AstBinaryOp* assignment = make_binary_op(parser->allocator, Binary_Op_Assign, (AstTyped *) local_compound, NULL); - assignment->token = expect_token(parser, '='); - assignment->right = parse_compound_expression(parser, 0); - - prev_local->next = (AstNode *) assignment; - - } else { + if (parser->curr->type != '=') { AstType* type_for_all = NULL; // See comment in parse_possible_symbol_declaration about "#auto" if (!parse_possible_directive(parser, "auto")) { type_for_all = parse_type(parser); + + // Placeholders (_) are discarded and allow for type inference. + if (value_is_placeholder((AstTyped *) type_for_all)) { + type_for_all = NULL; + } } forll (AstLocal, local, first_local, next) { @@ -1604,6 +1815,14 @@ static i32 parse_possible_compound_symbol_declaration(OnyxParser* parser, AstNod } } + if (parser->curr->type == '=') { + AstBinaryOp* assignment = make_binary_op(parser->context, Binary_Op_Assign, (AstTyped *) local_compound, NULL); + assignment->token = expect_token(parser, '='); + assignment->right = parse_compound_expression(parser, 0); + + prev_local->next = (AstNode *) assignment; + } + *ret = (AstNode *) first_local; return 1; } @@ -1646,10 +1865,15 @@ static i32 parse_possible_symbol_declaration(OnyxParser* parser, AstNode** ret) // typed on the first assignment. } else { type_node = parse_type(parser); + + // Placeholders (_) are discarded and allow for type inference. + if (value_is_placeholder((AstTyped *) type_node)) { + type_node = NULL; + } } } - AstLocal* local = make_local(parser->allocator, symbol, type_node); + AstLocal* local = make_local(parser->context, symbol, type_node); *ret = (AstNode *) local; if (parser->curr->type == '=') { @@ -1683,6 +1907,10 @@ static AstReturn* parse_return_stmt(OnyxParser* parser) { return_node->count += 1; } + if (parse_possible_directive(parser, "from_proc")) { + return_node->from_proc = 1; + } + AstTyped* expr = NULL; if (parser->curr->type != ';' && parser->curr->type != Token_Type_Inserted_Semicolon) { @@ -1740,11 +1968,13 @@ static AstNode* parse_statement(OnyxParser* parser) { case Token_Type_Literal_String: case Token_Type_Keyword_Cast: retval = (AstNode *) parse_compound_expression(parser, 1); + if (!retval) break; + if (retval->kind == Ast_Kind_Call || retval->kind == Ast_Kind_Method_Call) { if (parser->curr->type == '{') { AstCodeBlock* code_block = make_node(AstCodeBlock, Ast_Kind_Code_Block); code_block->token = parser->curr; - code_block->type_node = builtin_code_type; + code_block->type_node = parser->context->builtins.code_type; code_block->code = (AstNode *) parse_block(parser, 1, NULL); ((AstBlock *) code_block->code)->rules = Block_Rule_Code_Block; @@ -1753,13 +1983,13 @@ static AstNode* parse_statement(OnyxParser* parser) { if (dest->kind == Ast_Kind_Method_Call) { dest = (AstCall *) ((AstBinaryOp *) dest)->right; if (dest->kind != Ast_Kind_Call) { - onyx_report_error(retval->token->pos, Error_Critical, "Expected function call on right side of '->'."); + ONYX_ERROR(retval->token->pos, Error_Critical, "Expected function call on right side of '->'."); needs_semicolon = 0; break; } } - bh_arr_push(dest->args.values, (AstTyped *) make_argument(context.ast_alloc, (AstTyped *) code_block)); + bh_arr_push(dest->args.values, (AstTyped *) make_argument(parser->context, (AstTyped *) code_block)); needs_semicolon = 0; } } @@ -1816,8 +2046,24 @@ static AstNode* parse_statement(OnyxParser* parser) { case Token_Type_Keyword_Use: { needs_semicolon = 0; - OnyxToken *use_token = expect_token(parser, Token_Type_Keyword_Use); - parse_import_statement(parser, use_token); + if (next_tokens_are(parser, 3, Token_Type_Keyword_Use, Token_Type_Symbol, ':')) { + OnyxToken *use_token = expect_token(parser, Token_Type_Keyword_Use); + + AstLocal *out = NULL; + i32 res = parse_possible_symbol_declaration(parser, (AstNode **) &out); + if (res == 2) { + ONYX_ERROR(use_token->pos, Error_Critical, "You cannot 'use' a binding in this way. Remove the 'use'."); + parser->hit_unexpected_token = 1; + break; + } + + out->auto_dispose = 1; + retval = (AstNode *) out; + + } else { + OnyxToken *use_token = expect_token(parser, Token_Type_Keyword_Use); + parse_import_statement(parser, use_token); + } break; } @@ -1833,22 +2079,22 @@ static AstNode* parse_statement(OnyxParser* parser) { sym_token->text = bh_strdup(parser->allocator, "__saved_context "); sym_token->pos = ((OnyxFilePos) {0}); - AstNode *sym_node = make_symbol(parser->allocator, sym_token); + AstNode *sym_node = make_symbol(parser->context, sym_token); - AstLocal* context_tmp = make_local(parser->allocator, sym_token, NULL); + AstLocal* context_tmp = make_local(parser->context, sym_token, NULL); retval = (AstNode *) context_tmp; AstBinaryOp* assignment = make_node(AstBinaryOp, Ast_Kind_Binary_Op); assignment->token = directive_token; assignment->operation = Binary_Op_Assign; assignment->left = (AstTyped *) sym_node; - assignment->right = builtin_context_variable; + assignment->right = parser->context->builtins.context_variable; context_tmp->next = (AstNode *) assignment; AstBinaryOp* assignment2 = make_node(AstBinaryOp, Ast_Kind_Binary_Op); assignment2->token = directive_token + 1; assignment2->operation = Binary_Op_Assign; - assignment2->left = builtin_context_variable; + assignment2->left = parser->context->builtins.context_variable; assignment2->right = (AstTyped *) sym_node; AstDefer* defer_node = make_node(AstDefer, Ast_Kind_Defer); @@ -1896,6 +2142,15 @@ static AstNode* parse_statement(OnyxParser* parser) { break; } + if (parse_possible_directive(parser, "error")) { + AstDirectiveError *error = make_node(AstDirectiveError, Ast_Kind_Directive_Error); + error->token = parser->curr - 2; + error->error_msg = expect_token(parser, Token_Type_Literal_String); + + ENTITY_SUBMIT(error); + break; + } + if (next_tokens_are(parser, 2, '#', Token_Type_Symbol)) { retval = (AstNode *) parse_factor(parser); break; @@ -1907,15 +2162,41 @@ static AstNode* parse_statement(OnyxParser* parser) { } if (needs_semicolon) { - if (!consume_token_if_next(parser, ';')) { - onyx_report_error((parser->curr - 1)->pos, Error_Critical, "Expected a semi-colon after this token."); - parser->hit_unexpected_token = 1; + // Allows for not needing the semicolon when the '}' is on the same line. + // if x { print() } + if (peek_token(0)->type != '}') { + if (!consume_token_if_next(parser, ';')) { + ONYX_ERROR((parser->curr - 1)->pos, Error_Critical, "Expected a semi-colon after this token."); + parser->hit_unexpected_token = 1; + } } } return retval; } +static AstNode *parse_statements_until(OnyxParser *parser, TokenType end_token) { + AstNode **next = NULL; + AstNode *root = NULL; + while (!consume_token_if_next(parser, end_token)) { + AstNode *stmt = parse_statement(parser); + if (parser->hit_unexpected_token) { + break; + } + + if (!root) { + root = stmt; + next = &root->next; + } else { + *next = stmt; + while (stmt->next != NULL) stmt = stmt->next; + next = &stmt->next; + } + } + + return root; +} + static AstBlock* parse_block(OnyxParser* parser, b32 make_a_new_scope, char* block_name) { AstBlock* block = make_node(AstBlock, Ast_Kind_Block); block->rules = Block_Rule_Normal; @@ -1927,7 +2208,7 @@ static AstBlock* parse_block(OnyxParser* parser, b32 make_a_new_scope, char* blo } if (make_a_new_scope) { - block->binding_scope = scope_create(parser->allocator, parser->current_scope, parser->curr->pos); + block->binding_scope = scope_create(parser->context, parser->current_scope, parser->curr->pos); block->binding_scope->name = block_name; parser->current_scope = block->binding_scope; } @@ -1967,7 +2248,7 @@ static void parse_polymorphic_variable(OnyxParser* parser, AstType*** next_inser bh_arr(AstPolyParam) pv = NULL; if (parser->polymorph_context.poly_params == NULL) - onyx_report_error(parser->curr->pos, Error_Critical, "Polymorphic variable not valid here."); + ONYX_ERROR(parser->curr->pos, Error_Critical, "Polymorphic variable not valid here."); else pv = *parser->polymorph_context.poly_params; @@ -2000,44 +2281,66 @@ static void parse_polymorphic_variable(OnyxParser* parser, AstType*** next_inser } } -static AstType* parse_compound_type(OnyxParser* parser) { - // CLEANUP this is little weird having this here because it means that this parses: - // - // foo :: (x: (something_here: i32)) -> void --- - // +static AstType* parse_return_type(OnyxParser* parser, bh_arr(AstLocal *) *pimplicit_locals) { + b32 values_are_named = 0; + OnyxToken *last_named_value = NULL; + + if (!consume_token_if_next(parser, '(')) { + return parse_type(parser); + } + if (next_tokens_are(parser, 2, Token_Type_Symbol, ':')) { - consume_tokens(parser, 2); + values_are_named = 1; + last_named_value = expect_token(parser, Token_Type_Symbol); + consume_tokens(parser, 1); } - AstType* first = parse_type(parser); + AstType* return_type = parse_type(parser); - if (parser->curr->type == ',') { - AstCompoundType* ctype = make_node(AstCompoundType, Ast_Kind_Type_Compound); - ctype->token = parser->curr; + bh_arr(AstLocal *) implicit_locals = NULL; + if (pimplicit_locals && values_are_named) { + implicit_locals = *pimplicit_locals; + if (!implicit_locals) bh_arr_new(parser->context->gp_alloc, implicit_locals, 2); - bh_arr_new(global_heap_allocator, ctype->types, 2); - bh_arr_push(ctype->types, first); + bh_arr_push(implicit_locals, make_local(parser->context, last_named_value, return_type)); + } - while (consume_token_if_next(parser, ',')) { - if (parser->hit_unexpected_token) return (AstType *) ctype; + if (parser->curr->type != ',') { + expect_token(parser, ')'); + if (pimplicit_locals) *pimplicit_locals = implicit_locals; + return return_type; + } - if (next_tokens_are(parser, 2, Token_Type_Symbol, ':')) { - consume_tokens(parser, 2); - } + AstCompoundType* ctype = make_node(AstCompoundType, Ast_Kind_Type_Compound); + ctype->token = parser->curr; + + bh_arr_new(parser->context->gp_alloc, ctype->types, 2); + bh_arr_push(ctype->types, return_type); - bh_arr_push(ctype->types, parse_type(parser)); + while (consume_token_if_next(parser, ',')) { + if (parser->hit_unexpected_token) return (AstType *) ctype; + + if (values_are_named) { + last_named_value = expect_token(parser, Token_Type_Symbol); + expect_token(parser, ':'); } - return (AstType *) ctype; + return_type = parse_type(parser); + bh_arr_push(ctype->types, return_type); - } else { - return first; + if (pimplicit_locals && values_are_named) { + bh_arr_push(implicit_locals, make_local(parser->context, last_named_value, return_type)); + } } + + expect_token(parser, ')'); + if (pimplicit_locals) *pimplicit_locals = implicit_locals; + return (AstType *) ctype; } static AstType* parse_function_type(OnyxParser* parser, OnyxToken* proc_token) { bh_arr(AstType *) params = NULL; - bh_arr_new(global_scratch_allocator, params, 4); + bh_arr_new(parser->context->scratch_alloc, params, 4); bh_arr_set_length(params, 0); expect_token(parser, '('); @@ -2054,9 +2357,8 @@ static AstType* parse_function_type(OnyxParser* parser, OnyxToken* proc_token) { expect_token(parser, ','); } - AstType* return_type = (AstType *) &basic_type_void; - if (consume_token_if_next(parser, Token_Type_Right_Arrow)) - return_type = parse_type(parser); + expect_token(parser, Token_Type_Right_Arrow); + AstType* return_type = parse_return_type(parser, NULL); i64 param_count = bh_arr_length(params); AstFunctionType* new = onyx_ast_node_new(parser->allocator, @@ -2076,7 +2378,9 @@ static AstType* parse_type(OnyxParser* parser) { AstType* root = NULL; AstType** next_insertion = &root; - while (1) { + b32 type_can_be_done = 0; + + while (next_insertion != NULL) { if (parser->hit_unexpected_token) return root; switch ((u16) parser->curr->type) { @@ -2089,6 +2393,7 @@ static AstType* parse_type(OnyxParser* parser) { *next_insertion = (AstType *) new; next_insertion = &new->elem; + type_can_be_done = 0; break; } @@ -2127,11 +2432,13 @@ static AstType* parse_type(OnyxParser* parser) { expect_token(parser, ']'); *next_insertion = (AstType *) new; next_insertion = &((AstSliceType *) new)->elem; + type_can_be_done = 0; break; } case '$': { parse_polymorphic_variable(parser, &next_insertion); + type_can_be_done = 1; break; } @@ -2153,7 +2460,7 @@ static AstType* parse_type(OnyxParser* parser) { OnyxToken* paren_token = expect_token(parser, '('); bh_arr(AstNode *) params = NULL; - bh_arr_new(global_heap_allocator, params, 2); + bh_arr_new(parser->context->gp_alloc, params, 2); while (!consume_token_if_next(parser, ')')) { if (parser->hit_unexpected_token) break; @@ -2176,6 +2483,7 @@ static AstType* parse_type(OnyxParser* parser) { if (peek_token(0)->type != '.') next_insertion = NULL; + type_can_be_done = 1; break; } @@ -2183,6 +2491,7 @@ static AstType* parse_type(OnyxParser* parser) { AstStructType* s_node = parse_struct(parser); *next_insertion = (AstType *) s_node; next_insertion = NULL; + type_can_be_done = 1; break; } @@ -2196,57 +2505,52 @@ static AstType* parse_type(OnyxParser* parser) { case '-': { *next_insertion = (AstType *) parse_expression(parser, 0); next_insertion = NULL; + type_can_be_done = 1; break; } case '(': { OnyxToken* matching = find_matching_paren(parser->curr); - - // :LinearTokenDependent - if ((matching + 1)->type == Token_Type_Right_Arrow) { - *next_insertion = parse_function_type(parser, parser->curr); - - } else { - expect_token(parser, '('); - *next_insertion = parse_compound_type(parser); - expect_token(parser, ')'); - } - + *next_insertion = parse_function_type(parser, parser->curr); next_insertion = NULL; + type_can_be_done = 1; break; } case Token_Type_Keyword_Typeof: { *next_insertion = (AstType *) parse_typeof(parser); next_insertion = NULL; + type_can_be_done = 1; break; } case '?': { - assert(builtin_optional_type); + assert(parser->context->builtins.optional_type); bh_arr(AstNode *) params = NULL; - bh_arr_new(global_heap_allocator, params, 1); + bh_arr_new(parser->context->gp_alloc, params, 1); bh_arr_set_length(params, 1); AstPolyCallType* pc_type = make_node(AstPolyCallType, Ast_Kind_Poly_Call_Type); pc_type->token = expect_token(parser, '?'); - pc_type->callee = builtin_optional_type; + pc_type->callee = parser->context->builtins.optional_type; pc_type->params = params; *next_insertion = (AstType *) pc_type; next_insertion = (AstType **) ¶ms[0]; + type_can_be_done = 0; break; } case '#': { if (parse_possible_directive(parser, "Self")) { if (parser->injection_point == NULL) { - onyx_report_error((parser->curr - 2)->pos, Error_Critical, "#Self is only allowed in an #inject block."); + ONYX_ERROR((parser->curr - 2)->pos, Error_Critical, "#Self is only allowed in an #inject block."); } *next_insertion = (AstType *) parser->injection_point; next_insertion = NULL; + type_can_be_done = 1; break; } } @@ -2258,6 +2562,7 @@ static AstType* parse_type(OnyxParser* parser) { field->expr = (AstTyped *) *next_insertion; *next_insertion = (AstType *) field; + type_can_be_done = 1; break; } @@ -2265,8 +2570,14 @@ static AstType* parse_type(OnyxParser* parser) { next_insertion = NULL; break; } + } - if (next_insertion == NULL) break; + if (!type_can_be_done) { + if (root) { + ONYX_ERROR(root->token->pos, Error_Critical, "Incomplete type when parsing."); + } else { + ONYX_ERROR(parser->curr->pos, Error_Critical, "Expected a type here."); + } } return root; @@ -2285,14 +2596,14 @@ static AstTypeOf* parse_typeof(OnyxParser* parser) { static void type_create_scope(OnyxParser *parser, Scope ** scope, OnyxToken* token) { if (scope && !*scope) { - *scope = scope_create(context.ast_alloc, parser->current_scope, token->pos); + *scope = scope_create(parser->context, parser->current_scope, token->pos); if (bh_arr_length(parser->current_symbol_stack) == 0) { (*scope)->name = ""; } else { OnyxToken* current_symbol = bh_arr_last(parser->current_symbol_stack); - (*scope)->name = bh_aprintf(global_heap_allocator, "%b", current_symbol->text, current_symbol->length); + (*scope)->name = bh_aprintf(parser->context->gp_alloc, "%b", current_symbol->text, current_symbol->length); } } } @@ -2315,7 +2626,7 @@ static AstStructType* parse_struct(OnyxParser* parser) { // Parse polymorphic parameters if (consume_token_if_next(parser, '(')) { bh_arr(AstPolyStructParam) poly_params = NULL; - bh_arr_new(global_heap_allocator, poly_params, 1); + bh_arr_new(parser->context->gp_alloc, poly_params, 1); while (!consume_token_if_next(parser, ')')) { if (parser->hit_unexpected_token) return NULL; @@ -2348,7 +2659,7 @@ static AstStructType* parse_struct(OnyxParser* parser) { parse_constraints(parser, &s_node->constraints); } - bh_arr_new(global_heap_allocator, s_node->members, 4); + bh_arr_new(parser->context->gp_alloc, s_node->members, 4); // Parse directives while (parser->curr->type == '#') { @@ -2373,7 +2684,7 @@ static AstStructType* parse_struct(OnyxParser* parser) { OnyxToken* directive_token = expect_token(parser, '#'); OnyxToken* symbol_token = expect_token(parser, Token_Type_Symbol); - onyx_report_error(directive_token->pos, Error_Critical, "unknown directive '#%b'.", symbol_token->text, symbol_token->length); + ONYX_ERROR(directive_token->pos, Error_Critical, "unknown directive '#%b'.", symbol_token->text, symbol_token->length); } } @@ -2383,7 +2694,7 @@ static AstStructType* parse_struct(OnyxParser* parser) { b32 member_is_used = 0; bh_arr(OnyxToken *) member_list_temp = NULL; - bh_arr_new(global_heap_allocator, member_list_temp, 4); + bh_arr_new(parser->context->gp_alloc, member_list_temp, 4); while (!consume_token_if_next(parser, '}')) { if (parser->hit_unexpected_token) return s_node; @@ -2431,6 +2742,11 @@ static AstStructType* parse_struct(OnyxParser* parser) { bh_arr_clear(member_list_temp); while (!consume_token_if_next(parser, ':')) { if (parser->hit_unexpected_token) return NULL; + if (parser->curr->type == Token_Type_Doc_Comment) { + consume_token(parser); + continue; + } + bh_arr_push(member_list_temp, expect_token(parser, Token_Type_Symbol)); if (parser->curr->type != ':') @@ -2456,8 +2772,8 @@ static AstStructType* parse_struct(OnyxParser* parser) { // them out of discussion for now. Initialized members should be treated special and // deserve their own line. if (bh_arr_length(member_list_temp) > 1) { - if (member_is_used) onyx_report_error((member_list_temp[0] - 1)->pos, Error_Critical, "'use' is only allowed for a single struct member declaration. Try splitting this compound declaration into multiple lines."); - if (initial_value) onyx_report_error(initial_value->token->pos, Error_Critical, "Intialized values are only allowed on single struct member declarations. Try splitting this compound initializer into multiple lines."); + if (member_is_used) ONYX_ERROR((member_list_temp[0] - 1)->pos, Error_Critical, "'use' is only allowed for a single struct member declaration. Try splitting this compound declaration into multiple lines."); + if (initial_value) ONYX_ERROR(initial_value->token->pos, Error_Critical, "Intialized values are only allowed on single struct member declarations. Try splitting this compound initializer into multiple lines."); } bh_arr_each(OnyxToken *, member_name, member_list_temp) { @@ -2510,12 +2826,12 @@ static AstUnionType* parse_union(OnyxParser* parser) { AstType *backing_type = parse_type(parser); u_node->tag_backing_type = backing_type; } else { - u_node->tag_backing_type = (AstType *) &basic_type_u32; + u_node->tag_backing_type = NULL; } if (consume_token_if_next(parser, '(')) { bh_arr(AstPolyStructParam) poly_params = NULL; - bh_arr_new(global_heap_allocator, poly_params, 1); + bh_arr_new(parser->context->gp_alloc, poly_params, 1); while (!consume_token_if_next(parser, ')')) { if (parser->hit_unexpected_token) return NULL; @@ -2549,7 +2865,7 @@ static AstUnionType* parse_union(OnyxParser* parser) { } parser->current_scope = scope_symbols_in_unions_should_be_bound_to; - bh_arr_new(global_heap_allocator, u_node->variants, 4); + bh_arr_new(parser->context->gp_alloc, u_node->variants, 4); expect_token(parser, '{'); while (!consume_token_if_next(parser, '}')) { @@ -2580,6 +2896,10 @@ static AstUnionType* parse_union(OnyxParser* parser) { variant->meta_tags = meta_tags; variant->token = expect_token(parser, Token_Type_Symbol); + if (consume_token_if_next(parser, Token_Type_Keyword_As)) { + variant->explicit_tag_value = parse_factor(parser); + } + expect_token(parser, ':'); variant->type_node = parse_type(parser); @@ -2607,7 +2927,7 @@ static AstInterface* parse_interface(OnyxParser* parser) { AstInterface *interface = make_node(AstInterface, Ast_Kind_Interface); interface->token = expect_token(parser, Token_Type_Keyword_Interface); - bh_arr_new(global_heap_allocator, interface->params, 2); + bh_arr_new(parser->context->gp_alloc, interface->params, 2); expect_token(parser, '('); while (!consume_token_if_next(parser, ')')) { @@ -2629,8 +2949,8 @@ static AstInterface* parse_interface(OnyxParser* parser) { return interface; } - bh_arr_new(global_heap_allocator, interface->exprs, 2); - bh_arr_new(global_heap_allocator, interface->sentinels, 2); + bh_arr_new(parser->context->gp_alloc, interface->exprs, 2); + bh_arr_new(parser->context->gp_alloc, interface->sentinels, 2); type_create_scope(parser, &interface->scope, interface->token); parser->current_scope = interface->scope; @@ -2724,7 +3044,7 @@ static AstConstraint* parse_constraint(OnyxParser* parser) { constraint->token = constraint->interface->token; - bh_arr_new(global_heap_allocator, constraint->args, 2); + bh_arr_new(parser->context->gp_alloc, constraint->args, 2); expect_token(parser, '('); while (!consume_token_if_next(parser, ')')) { @@ -2750,7 +3070,7 @@ static AstConstraint* parse_constraint(OnyxParser* parser) { } static void parse_constraints(OnyxParser* parser, ConstraintContext *out_constraints) { - bh_arr_new(global_heap_allocator, out_constraints->constraints, 2); + bh_arr_new(parser->context->gp_alloc, out_constraints->constraints, 2); expect_token(parser, Token_Type_Keyword_Where); @@ -2767,7 +3087,7 @@ static AstCaptureBlock *parse_capture_list(OnyxParser* parser, TokenType end_tok AstCaptureBlock *captures = make_node(AstCaptureBlock, Ast_Kind_Capture_Block); captures->token = parser->curr - 1; - bh_arr_new(global_heap_allocator, captures->captures, 2); + bh_arr_new(parser->context->gp_alloc, captures->captures, 2); while (!consume_token_if_next(parser, end_token)) { if (parser->hit_unexpected_token) break; @@ -2796,16 +3116,10 @@ static void parse_function_params(OnyxParser* parser, AstFunction* func) { assert(parser->polymorph_context.poly_params != NULL); bh_arr(AstParam) param_buffer=NULL; - bh_arr_new(global_heap_allocator, param_buffer, 2); + bh_arr_new(parser->context->gp_alloc, param_buffer, 2); OnyxToken* symbol; while (!consume_token_if_next(parser, ')')) { - if (consume_token_if_next(parser, '[') && !func->captures) { - func->captures = parse_capture_list(parser, ']'); - consume_token_if_next(parser, ','); - continue; - } - do { if (parser->hit_unexpected_token) return; @@ -2819,7 +3133,7 @@ static void parse_function_params(OnyxParser* parser, AstFunction* func) { symbol = expect_token(parser, Token_Type_Symbol); curr_param.vararg_kind = VA_Kind_Not_VA; - curr_param.local = make_local(parser->allocator, symbol, NULL); + curr_param.local = make_local(parser->context, symbol, NULL); curr_param.local->kind = Ast_Kind_Param; if (param_use) { @@ -2927,7 +3241,7 @@ static AstOverloadedFunction* parse_overloaded_function(OnyxParser* parser, Onyx // This could be checked elsewhere? if (locked && local) { - onyx_report_error(token->pos, Error_Critical, "Only one of '#locked' and '#local' can be use at a time."); + ONYX_ERROR(token->pos, Error_Critical, "Only one of '#locked' and '#local' can be use at a time."); } AstOverloadedFunction* ofunc = make_node(AstOverloadedFunction, Ast_Kind_Overloaded_Function); @@ -2936,7 +3250,7 @@ static AstOverloadedFunction* parse_overloaded_function(OnyxParser* parser, Onyx ofunc->locked = locked; ofunc->only_local_functions = local; - bh_arr_new(global_heap_allocator, ofunc->overloads, 4); + bh_arr_new(parser->context->gp_alloc, ofunc->overloads, 4); if (peek_token(0)->type == Token_Type_Right_Arrow) { expect_token(parser, Token_Type_Right_Arrow); @@ -2976,26 +3290,47 @@ static AstFunction* parse_function_definition(OnyxParser* parser, OnyxToken* tok flush_stored_tags(parser, &func_def->tags); - bh_arr_new(global_heap_allocator, func_def->params, 4); + bh_arr_new(parser->context->gp_alloc, func_def->params, 4); bh_arr(AstPolyParam) polymorphic_vars = NULL; - bh_arr_new(global_heap_allocator, polymorphic_vars, 4); - // defer bh_arr_free(polymorphic_vars); + bh_arr_new(parser->context->gp_alloc, polymorphic_vars, 4); + void *prev_poly_params = parser->polymorph_context.poly_params; parser->polymorph_context.poly_params = &polymorphic_vars; parse_function_params(parser, func_def); - parser->polymorph_context.poly_params = NULL; + parser->polymorph_context.poly_params = prev_poly_params; + + if (bh_arr_length(polymorphic_vars) > 0) { + func_def->kind = Ast_Kind_Polymorphic_Proc; + func_def->poly_params = polymorphic_vars; - func_def->return_type = (AstType *) &basic_type_void; + } else { + bh_arr_free(polymorphic_vars); + } + + func_def->return_type = (AstType *) &parser->context->basic_types.type_void; char* name = NULL; if (bh_arr_length(parser->current_symbol_stack) > 0) { OnyxToken *current_symbol = bh_arr_last(parser->current_symbol_stack); - name = bh_aprintf(global_heap_allocator, "%b", current_symbol->text, current_symbol->length); + name = bh_aprintf(parser->context->gp_alloc, "%b", current_symbol->text, current_symbol->length); + } + + if (consume_token_if_next(parser, Token_Type_Keyword_Use)) { + expect_token(parser, '('); + func_def->captures = parse_capture_list(parser, ')'); + consume_token_if_next(parser, ','); + + if (bh_arr_length(parser->current_function_stack) > 1) { + AstFunction *parent_func = parser->current_function_stack[bh_arr_length(parser->current_function_stack) - 2]; + if (parent_func->kind == Ast_Kind_Polymorphic_Proc) { + func_def->flags |= Ast_Flag_Function_Is_Lambda_Inside_PolyProc; + } + } } if (consume_token_if_next(parser, Token_Type_Fat_Right_Arrow)) { - func_def->return_type = (AstType *) &basic_type_auto_return; + func_def->return_type = (AstType *) &parser->context->basic_types.type_auto_return; if (parser->curr->type == '{') { func_def->body = parse_block(parser, 1, name); @@ -3020,9 +3355,13 @@ static AstFunction* parse_function_definition(OnyxParser* parser, OnyxToken* tok if (consume_token_if_next(parser, Token_Type_Right_Arrow)) { if (parse_possible_directive(parser, "auto")) { - func_def->return_type = (AstType *) &basic_type_auto_return; + func_def->return_type = (AstType *) &parser->context->basic_types.type_auto_return; } else { - func_def->return_type = parse_type(parser); + func_def->return_type = parse_return_type(parser, &func_def->named_return_locals); + + if (value_is_placeholder((AstTyped *) func_def->return_type)) { + func_def->return_type = (AstType *) &parser->context->basic_types.type_auto_return; + } } } @@ -3059,7 +3398,7 @@ static AstFunction* parse_function_definition(OnyxParser* parser, OnyxToken* tok OnyxToken* directive_token = expect_token(parser, '#'); OnyxToken* symbol_token = expect_token(parser, Token_Type_Symbol); - onyx_report_error(directive_token->pos, Error_Critical, "unknown directive '#%b'.", symbol_token->text, symbol_token->length); + ONYX_ERROR(directive_token->pos, Error_Critical, "unknown directive '#%b'.", symbol_token->text, symbol_token->length); } } @@ -3069,59 +3408,31 @@ static AstFunction* parse_function_definition(OnyxParser* parser, OnyxToken* tok func_def->closing_brace = parser->curr - 1; function_defined: - if (bh_arr_length(polymorphic_vars) > 0) { - func_def->kind = Ast_Kind_Polymorphic_Proc; - func_def->poly_params = polymorphic_vars; - - } else { - bh_arr_free(polymorphic_vars); - } - bh_arr_pop(parser->current_function_stack); return func_def; } static b32 parse_possible_function_definition_no_consume(OnyxParser* parser) { - if (parser->curr->type == '(') { - OnyxToken* matching_paren = find_matching_paren(parser->curr); - if (matching_paren == NULL) return 0; - - if (next_tokens_are(parser, 3, '(', ')', Token_Type_Fat_Right_Arrow)) return 0; - - // :LinearTokenDependent - OnyxToken* token_after_paren = matching_paren + 1; + if (parser->curr->type != '(') return 0; - // Allow for: - // foo :: () - // -> i32 {} - // - // bar :: () - // { } - if (token_after_paren->type == Token_Type_Inserted_Semicolon) - token_after_paren += 1; - - if (token_after_paren->type != Token_Type_Right_Arrow - && token_after_paren->type != '{' - && token_after_paren->type != Token_Type_Keyword_Do - && token_after_paren->type != Token_Type_Empty_Block - && token_after_paren->type != Token_Type_Keyword_Where - && token_after_paren->type != Token_Type_Fat_Right_Arrow) - return 0; + if (peek_token(1)->type == ')') { + return 1; + } - // :LinearTokenDependent - b32 is_params = (parser->curr + 1) == matching_paren; - OnyxToken* tmp_token = parser->curr; - while (!is_params && tmp_token < matching_paren) { - if (tmp_token->type == ':') is_params = 1; + int offset = 1; - tmp_token++; - } +keep_going: + if (peek_token(offset)->type == Token_Type_Keyword_Use) offset += 1; + if (peek_token(offset)->type == '$') offset += 1; + if (peek_token(offset)->type == Token_Type_Symbol) { + offset += 1; + if (peek_token(offset)->type == ',') { + offset += 1; + goto keep_going; - if (peek_token(1)->type == '[' && (matching_paren - 1)->type == ']') { - is_params = 1; + } else if (peek_token(offset)->type == ':') { + return 1; } - - return is_params; } return 0; @@ -3155,22 +3466,33 @@ static b32 parse_possible_quick_function_definition_no_consume(OnyxParser* parse if (parser->curr->type != '(') return 0; - OnyxToken* matching_paren = find_matching_paren(parser->curr); - if (matching_paren == NULL) return 0; + i32 offset = 1; + while (peek_token(offset)->type != ')') { + if (peek_token(offset)->type != Token_Type_Symbol) return 0; + offset += 1; + + if (peek_token(offset)->type == ')') break; + + if (peek_token(offset)->type != ',') return 0; + offset += 1; + } // :LinearTokenDependent - OnyxToken* token_after_paren = matching_paren + 1; - if (token_after_paren->type != Token_Type_Fat_Right_Arrow) - return 0; + OnyxToken* token_after_paren = peek_token(offset + 1); + if (token_after_paren->type == Token_Type_Fat_Right_Arrow) + return 1; - return 1; + if (token_after_paren->type == Token_Type_Keyword_Use) + return 1; + + return 0; } static b32 parse_possible_quick_function_definition(OnyxParser* parser, AstTyped** ret) { if (!parse_possible_quick_function_definition_no_consume(parser)) return 0; bh_arr(QuickParam) params=NULL; - bh_arr_new(global_heap_allocator, params, 4); + bh_arr_new(parser->context->gp_alloc, params, 4); OnyxToken* proc_token; AstCaptureBlock *captures = NULL; @@ -3186,33 +3508,40 @@ static b32 parse_possible_quick_function_definition(OnyxParser* parser, AstTyped while (!consume_token_if_next(parser, ')')) { if (parser->hit_unexpected_token) return 0; - if (consume_token_if_next(parser, '[') && !captures) { - captures = parse_capture_list(parser, ']'); + QuickParam param = { 0 }; + if (consume_token_if_next(parser, '$')) param.is_baked = 1; + param.token = expect_token(parser, Token_Type_Symbol); - } else { - QuickParam param = { 0 }; - if (consume_token_if_next(parser, '$')) param.is_baked = 1; - param.token = expect_token(parser, Token_Type_Symbol); - - bh_arr_push(params, param); - } + bh_arr_push(params, param); if (parser->curr->type != ')') { expect_token(parser, ','); } } + + if (consume_token_if_next(parser, Token_Type_Keyword_Use)) { + expect_token(parser, '('); + captures = parse_capture_list(parser, ')'); + } } expect_token(parser, Token_Type_Fat_Right_Arrow); bh_arr(AstNode *) poly_params=NULL; - bh_arr_new(global_heap_allocator, poly_params, bh_arr_length(params)); + bh_arr_new(parser->context->gp_alloc, poly_params, bh_arr_length(params)); bh_arr_each(QuickParam, param, params) { char text[512]; memset(text, 0, 512); strncat(text, "__type_", 511); + token_toggle_end(param->token); - strncat(text, param->token->text, 511); + if (!strcmp(param->token->text, "_")) { + int index = param - params; + int len = strnlen(text, 511); + snprintf(text + len, 511 - len, "%d", index); + } else { + strncat(text, param->token->text, 511); + } token_toggle_end(param->token); OnyxToken* new_token = bh_alloc(parser->allocator, sizeof(OnyxToken)); @@ -3221,16 +3550,16 @@ static b32 parse_possible_quick_function_definition(OnyxParser* parser, AstTyped new_token->text = bh_strdup(parser->allocator, text); new_token->pos = param->token->pos; - AstNode* type_node = make_symbol(parser->allocator, new_token); + AstNode* type_node = make_symbol(parser->context, new_token); type_node->flags |= Ast_Flag_Symbol_Is_PolyVar; bh_arr_push(poly_params, type_node); } AstFunction* poly_proc = make_node(AstFunction, Ast_Kind_Polymorphic_Proc); - bh_arr_new(global_heap_allocator, poly_proc->params, bh_arr_length(params)); + bh_arr_new(parser->context->gp_alloc, poly_proc->params, bh_arr_length(params)); fori (i, 0, bh_arr_length(params)) { - AstLocal* param_local = make_local(parser->allocator, params[i].token, (AstType *) poly_params[i]); + AstLocal* param_local = make_local(parser->context, params[i].token, (AstType *) poly_params[i]); param_local->kind = Ast_Kind_Param; bh_arr_push(poly_proc->params, ((AstParam) { @@ -3250,16 +3579,16 @@ static b32 parse_possible_quick_function_definition(OnyxParser* parser, AstTyped char* name = NULL; if (bh_arr_length(parser->current_symbol_stack) > 0) { OnyxToken *current_symbol = bh_arr_last(parser->current_symbol_stack); - name = bh_aprintf(global_heap_allocator, "%b", current_symbol->text, current_symbol->length); + name = bh_aprintf(parser->context->gp_alloc, "%b", current_symbol->text, current_symbol->length); } body_block = parse_block(parser, 1, name); - return_type = (AstType *) &basic_type_auto_return; + return_type = (AstType *) &parser->context->basic_types.type_auto_return; } else { AstTyped* body = parse_expression(parser, 0); if (body == NULL) { - onyx_report_error(parser->curr->pos, Error_Critical, "Expected an expression here."); + ONYX_ERROR(parser->curr->pos, Error_Critical, "Expected an expression here."); parser->hit_unexpected_token = 1; return 0; } @@ -3283,7 +3612,7 @@ static b32 parse_possible_quick_function_definition(OnyxParser* parser, AstTyped poly_proc->return_type = (AstType *) return_type; poly_proc->captures = captures; - bh_arr_new(global_heap_allocator, poly_proc->poly_params, bh_arr_length(params)); + bh_arr_new(parser->context->gp_alloc, poly_proc->poly_params, bh_arr_length(params)); fori (i, 0, bh_arr_length(params)) { bh_arr_push(poly_proc->poly_params, ((AstPolyParam) { .kind = PPK_Poly_Type, @@ -3325,7 +3654,7 @@ static AstEnumType* parse_enum_declaration(OnyxParser* parser) { AstEnumType* enum_node = make_node(AstEnumType, Ast_Kind_Enum_Type); enum_node->token = expect_token(parser, Token_Type_Keyword_Enum); - bh_arr_new(global_heap_allocator, enum_node->values, 4); + bh_arr_new(parser->context->gp_alloc, enum_node->values, 4); while (parser->curr->type == '#') { if (parser->hit_unexpected_token) return enum_node; @@ -3336,11 +3665,11 @@ static AstEnumType* parse_enum_declaration(OnyxParser* parser) { OnyxToken* directive_token = expect_token(parser, '#'); OnyxToken* symbol_token = expect_token(parser, Token_Type_Symbol); - onyx_report_error(directive_token->pos, Error_Critical, "unknown directive '#%b'.", symbol_token->text, symbol_token->length); + ONYX_ERROR(directive_token->pos, Error_Critical, "unknown directive '#%b'.", symbol_token->text, symbol_token->length); } } - AstType* backing = (AstType *) &basic_type_u32; + AstType* backing = (AstType *) &parser->context->basic_types.type_u32; if (consume_token_if_next(parser, '(')) { AstTyped* backing_sym = make_node(AstTyped, Ast_Kind_Symbol); backing_sym->token = expect_token(parser, Token_Type_Symbol); @@ -3354,6 +3683,10 @@ static AstEnumType* parse_enum_declaration(OnyxParser* parser) { while (!consume_token_if_next(parser, '}')) { if (parser->hit_unexpected_token) return enum_node; + if (parser->curr->type == Token_Type_Doc_Comment) { + consume_token(parser); + continue; + } AstEnumValue* evalue = make_node(AstEnumValue, Ast_Kind_Enum_Value); evalue->token = expect_token(parser, Token_Type_Symbol); @@ -3382,7 +3715,7 @@ static AstIf* parse_static_if_stmt(OnyxParser* parser, b32 parse_block_as_statem static_if_node->cond = parse_expression(parser, 0); - bh_arr_new(global_heap_allocator, static_if_node->true_entities, 2); + bh_arr_new(parser->context->gp_alloc, static_if_node->true_entities, 2); bh_arr_push(parser->alternate_entity_placement_stack, &static_if_node->true_entities); if (parse_block_as_statements) { @@ -3401,7 +3734,7 @@ static AstIf* parse_static_if_stmt(OnyxParser* parser, b32 parse_block_as_statem bh_arr_pop(parser->alternate_entity_placement_stack); if (consume_token_if_next(parser, Token_Type_Keyword_Else)) { - bh_arr_new(global_heap_allocator, static_if_node->false_entities, 2); + bh_arr_new(parser->context->gp_alloc, static_if_node->false_entities, 2); bh_arr_push(parser->alternate_entity_placement_stack, &static_if_node->false_entities); if (parse_block_as_statements) { @@ -3458,7 +3791,7 @@ static AstMacro* parse_macro(OnyxParser* parser) { return macro; } - onyx_report_error(parser->curr->pos, Error_Critical, "'macro' expects to be followed by a producure definition."); + ONYX_ERROR(parser->curr->pos, Error_Critical, "'macro' expects to be followed by a producure definition."); return NULL; } @@ -3469,7 +3802,7 @@ static AstDirectiveInit* parse_init_directive(OnyxParser *parser, OnyxToken *tok parser->parse_calls = 0; while (parse_possible_directive(parser, "after")) { if (parser->hit_unexpected_token) return init; - if (init->dependencies == NULL) bh_arr_new(global_heap_allocator, init->dependencies, 2); + if (init->dependencies == NULL) bh_arr_new(parser->context->gp_alloc, init->dependencies, 2); AstTyped *dependency = parse_expression(parser, 0); bh_arr_push(init->dependencies, (AstDirectiveInit *) dependency); @@ -3495,9 +3828,9 @@ static AstForeignBlock* parse_foreign_block(OnyxParser* parser, OnyxToken *token // // This has a fun implication that there cannot be foreign blocks in the builtin // or type_info packages, as those are loaded before foreign_block_type has a value. - fb->type_node = foreign_block_type; + fb->type_node = parser->context->builtins.foreign_block_type; - bh_arr_new(global_heap_allocator, fb->captured_entities, 4); + bh_arr_new(parser->context->gp_alloc, fb->captured_entities, 4); bh_arr_push(parser->alternate_entity_placement_stack, &fb->captured_entities); expect_token(parser, '{'); @@ -3510,6 +3843,31 @@ static AstForeignBlock* parse_foreign_block(OnyxParser* parser, OnyxToken *token return fb; } +static AstCompilerExtension* parse_compiler_extension(OnyxParser* parser, OnyxToken *token) { + AstCompilerExtension *ext = make_node(AstCompilerExtension, Ast_Kind_Compiler_Extension); + ext->token = token; + + ext->name = expect_token(parser, Token_Type_Literal_String); + + bh_arr_new(parser->context->gp_alloc, ext->proc_macros, 2); + expect_token(parser, '{'); + while (!consume_token_if_next(parser, '}')) { + if (parser->hit_unexpected_token) break; + + AstProceduralMacro *pmacro = make_node(AstProceduralMacro, Ast_Kind_Procedural_Macro); + pmacro->token = expect_token(parser, Token_Type_Symbol); + pmacro->extension = ext; + + bh_arr_push(ext->proc_macros, pmacro); + + if (parser->curr->type != '}') + expect_token(parser, ','); + } + + ENTITY_SUBMIT(ext); + return ext; +} + static AstTyped* parse_top_level_expression(OnyxParser* parser) { if (parser->curr->type == Token_Type_Keyword_Global) return parse_global_declaration(parser); if (parser->curr->type == Token_Type_Keyword_Struct) return (AstTyped *) parse_struct(parser); @@ -3543,6 +3901,7 @@ static AstTyped* parse_top_level_expression(OnyxParser* parser) { AstDistinctType *distinct = make_node(AstDistinctType, Ast_Kind_Distinct_Type); distinct->token = parser->curr - 2; distinct->base_type = parse_type(parser); + type_create_scope(parser, &distinct->scope, distinct->token); return (AstTyped *) distinct; } @@ -3550,33 +3909,13 @@ static AstTyped* parse_top_level_expression(OnyxParser* parser) { AstForeignBlock *foreign = parse_foreign_block(parser, parser->curr - 2); return (AstTyped *) foreign; } - } - return parse_expression(parser, 1); -} - -static char* generate_name_within_scope(OnyxParser* parser, OnyxToken* symbol) { - char name[512]; - memset(name, 0, 512); - - bh_arr(char *) names=NULL; - bh_arr_new(global_heap_allocator, names, 4); - - Scope* scope = parser->current_scope; - while (scope != NULL) { - bh_arr_push(names, scope->name); - scope = scope->parent; - } - - bh_arr_each(char *, n, names) { - if (*n == NULL) continue; - - strncat(name, *n, 511); - strncat(name, ".", 511); + if (parse_possible_directive(parser, "compiler_extension") || parse_possible_directive(parser, "extension")) { + return (AstTyped *) parse_compiler_extension(parser, parser->curr - 2); + } } - bh_arr_free(names); - return bh_aprintf(global_heap_allocator, "%s%b", name, symbol->text, symbol->length); + return parse_expression(parser, 1); } static AstBinding* parse_top_level_binding(OnyxParser* parser, OnyxToken* symbol) { @@ -3594,7 +3933,8 @@ static AstBinding* parse_top_level_binding(OnyxParser* parser, OnyxToken* symbol if (func->intrinsic_name == NULL) func->intrinsic_name = symbol; - func->name = generate_name_within_scope(parser, symbol); + func->name = generate_name_within_scope(parser->context, parser->current_scope, symbol); + func->assembly_name = func->name; func->flags &= ~Ast_Flag_Function_Is_Lambda; break; } @@ -3603,13 +3943,13 @@ static AstBinding* parse_top_level_binding(OnyxParser* parser, OnyxToken* symbol AstMacro* macro = (AstMacro *) node; AstFunction* func = (AstFunction *) macro->body; - func->name = generate_name_within_scope(parser, symbol); + func->name = generate_name_within_scope(parser->context, parser->current_scope, symbol); break; } case Ast_Kind_Directive_Init: break; - case Ast_Kind_Global: ((AstGlobal *) node)->name = generate_name_within_scope(parser, symbol); + case Ast_Kind_Global: ((AstGlobal *) node)->name = generate_name_within_scope(parser->context, parser->current_scope, symbol); case Ast_Kind_Overloaded_Function: case Ast_Kind_StrLit: @@ -3625,7 +3965,7 @@ static AstBinding* parse_top_level_binding(OnyxParser* parser, OnyxToken* symbol case Ast_Kind_Distinct_Type: case Ast_Kind_Union_Type: case Ast_Kind_Poly_Union_Type: - ((AstStructType *) node)->name = generate_name_within_scope(parser, symbol); + ((AstStructType *) node)->name = generate_name_within_scope(parser->context, parser->current_scope, symbol); goto default_case; case Ast_Kind_Type_Alias: @@ -3656,8 +3996,85 @@ static AstBinding* parse_top_level_binding(OnyxParser* parser, OnyxToken* symbol return binding; } + +static void parse_implicit_injection(OnyxParser* parser) { + if (parser->injection_point) { + ONYX_ERROR(parser->curr->pos, Error_Critical, "Implicit injection is not allowed here."); + parser->hit_unexpected_token = 1; + return; + } + + AstFieldAccess *injection_expression = (AstFieldAccess *) parse_type(parser); + + if (peek_token(0)->type == Token_Type_Proc_Macro_Body) { + AstProceduralExpansion *proc_expand = make_node(AstProceduralExpansion, Ast_Kind_Procedural_Expansion); + proc_expand->token = injection_expression->token; + proc_expand->expansion_body = expect_token(parser, Token_Type_Proc_Macro_Body); + proc_expand->proc_macro = (AstTyped *) injection_expression; + + ENTITY_SUBMIT(proc_expand); + return; + } + + // Experimental syntax for overload adding. + // + // overload :: #match {} + // overload <- (...) { ... } + // + // if (peek_token(0)->type == Token_Type_Left_Arrow) { + // AstDirectiveAddOverload *add_overload = make_node(AstDirectiveAddOverload, Ast_Kind_Directive_Add_Overload); + // add_overload->overloaded_function = (AstNode *) injection_expression; + // add_overload->token = expect_token(parser, Token_Type_Left_Arrow); + // add_overload->order = parser->overload_count++; + // add_overload->overload = parse_expression(parser, 0); + + // if (add_overload->overload) { + // add_overload->overload->flags &= ~Ast_Flag_Function_Is_Lambda; + // } + + // ENTITY_SUBMIT(add_overload); + // return; + // } + + if (injection_expression->kind != Ast_Kind_Field_Access) { + ONYX_ERROR(parser->curr->pos, Error_Critical, "Expected binding target to end in something like '.xyz'."); + parser->hit_unexpected_token = 1; + return; + } + + AstInjection *inject = make_node(AstInjection, Ast_Kind_Injection); + inject->token = injection_expression->token; + inject->full_loc = (AstTyped *) injection_expression; + + AstTyped *target = injection_expression->expr; + parser->injection_point = target; + + if (next_tokens_are(parser, 2, ':', ':')) { + consume_token(parser); + inject->binding = parse_top_level_binding(parser, inject->token); + if (inject->binding) { + flush_doc_tokens(parser, &inject->binding->documentation_string, &inject->binding->documentation_token_old); + } + + } else { + AstMemRes* memres = parse_memory_reservation(parser, inject->token, 0); + + inject->binding = make_node(AstBinding, Ast_Kind_Binding); + inject->binding->token = inject->token; + inject->binding->node = (AstNode *) memres; + } + + ENTITY_SUBMIT(inject); + + parser->injection_point = NULL; + return; +} + + static void parse_top_level_statement(OnyxParser* parser) { AstFlags private_kind = 0; + + retry_because_inserted_semicolon: if (bh_arr_length(parser->scope_flags) > 0) private_kind = bh_arr_last(parser->scope_flags); @@ -3702,6 +4119,14 @@ static void parse_top_level_statement(OnyxParser* parser) { } case Token_Type_Symbol: { + // Handle implicit injections as 'Foo.bar ::' or 'Foo(T).bar ::' + if ( peek_token(1)->type == '.' + || peek_token(1)->type == '(' + || peek_token(1)->type == Token_Type_Left_Arrow) { + parse_implicit_injection(parser); + return; + } + OnyxToken* symbol = expect_token(parser, Token_Type_Symbol); if (next_tokens_are(parser, 2, ':', ':')) { @@ -3711,6 +4136,7 @@ static void parse_top_level_statement(OnyxParser* parser) { binding = parse_top_level_binding(parser, symbol); bh_arr_pop(parser->current_symbol_stack); + // bh_printf("%b: %d\n", symbol->text, symbol->length, private_kind); if (binding != NULL) binding->flags |= private_kind; goto submit_binding_to_entities; @@ -3736,6 +4162,14 @@ static void parse_top_level_statement(OnyxParser* parser) { ENTITY_SUBMIT(retval); return; } + + ONYX_ERROR(parser->curr->pos, Error_Critical, "Unexpected '(' at top-level."); + parser->hit_unexpected_token = 1; + break; + } + + case Token_Type_Doc_Comment: { + bh_arr_push(parser->documentation_tokens, expect_token(parser, Token_Type_Doc_Comment)); break; } @@ -3840,6 +4274,10 @@ static void parse_top_level_statement(OnyxParser* parser) { operator->order = parser->overload_count++; } + if (next_tokens_are(parser, 2, ':', ':')) { + consume_tokens(parser, 2); + } + operator->overload = parse_expression(parser, 0); ENTITY_SUBMIT(operator); @@ -3887,7 +4325,7 @@ static void parse_top_level_statement(OnyxParser* parser) { consume_token_if_next(parser, Token_Type_Inserted_Semicolon); if (peek_token(0)->type == '{') { if (parser->injection_point) { - onyx_report_error(dir_token->pos, Error_Critical, "#inject blocks cannot be nested."); + ONYX_ERROR(dir_token->pos, Error_Critical, "#inject blocks cannot be nested."); return; } @@ -3903,16 +4341,15 @@ static void parse_top_level_statement(OnyxParser* parser) { // See comment above if (next_tokens_are(parser, 2, ':', ':')) { - consume_tokens(parser, 2); + consume_token(parser); } AstInjection *inject = make_node(AstInjection, Ast_Kind_Injection); inject->token = dir_token; inject->full_loc = (AstTyped *) injection_point; - inject->to_inject = parse_top_level_expression(parser); - if (parser->last_documentation_token) { - inject->documentation = parser->last_documentation_token; - parser->last_documentation_token = NULL; + inject->binding = parse_top_level_binding(parser, injection_point->token); + if (inject->binding) { + flush_doc_tokens(parser, &inject->binding->documentation_string, &inject->binding->documentation_token_old); } ENTITY_SUBMIT(inject); @@ -3955,6 +4392,24 @@ static void parse_top_level_statement(OnyxParser* parser) { ENTITY_SUBMIT(library); return; } + else if (parse_possible_directive(parser, "js")) { + AstJsNode *jsNode = make_node(AstJsNode, Ast_Kind_Js_Code); + jsNode->token = parser->curr - 2; + jsNode->order = 0xffffffff; + + if (parse_possible_directive(parser, "order")) { + jsNode->order_expr = parse_expression(parser, 0); + } + + if (parse_possible_directive(parser, "file")) { + jsNode->filepath = parse_expression(parser, 0); + } else { + jsNode->code = parse_expression(parser, 0); + } + + ENTITY_SUBMIT(jsNode); + return; + } else if (parse_possible_directive(parser, "doc")) { // This is a future feature I want to add to the language, proper docstrings. // For now (and so I start documenting thing...), #doc can be used anywhere @@ -3962,15 +4417,29 @@ static void parse_top_level_statement(OnyxParser* parser) { parser->last_documentation_token = expect_token(parser, Token_Type_Literal_String); return; } + else if (parse_possible_directive(parser, "wasm_section")) { + AstDirectiveWasmSection *section = make_node(AstDirectiveWasmSection, Ast_Kind_Directive_Wasm_Section); + section->token = parser->curr - 2; + section->section_name = parse_expression(parser, 0); + + if (parse_possible_directive(parser, "file")) { + section->from_file = 1; + } + + section->section_contents = parse_expression(parser, 0); + + ENTITY_SUBMIT(section); + return; + } else { OnyxToken* directive_token = expect_token(parser, '#'); OnyxToken* symbol_token = parser->curr; consume_token(parser); - onyx_report_error(directive_token->pos, Error_Critical, "Unknown directive '#%b'.", symbol_token->text, symbol_token->length); + ONYX_ERROR(directive_token->pos, Error_Critical, "Unknown directive '#%b'.", symbol_token->text, symbol_token->length); if (symbol_token->type > Token_Type_Keyword_Start && symbol_token->type < Token_Type_Keyword_End) { - onyx_report_error(directive_token->pos, Error_Critical, "Did you mean the keyword, '%s'?", + ONYX_ERROR(directive_token->pos, Error_Critical, "Did you mean the keyword, '%s'?", token_name(symbol_token)); } @@ -3981,11 +4450,14 @@ static void parse_top_level_statement(OnyxParser* parser) { } case ';': - case Token_Type_Inserted_Semicolon: break; + case Token_Type_Inserted_Semicolon: + consume_token(parser); + goto retry_because_inserted_semicolon; + default: - onyx_report_error(parser->curr->pos, Error_Critical, "Unexpected token in top-level statement, '%s'", token_name(parser->curr)); + ONYX_ERROR(parser->curr->pos, Error_Critical, "Unexpected token in top-level statement, '%s'", token_name(parser->curr)); parser->hit_unexpected_token = 1; break; } @@ -3996,10 +4468,7 @@ static void parse_top_level_statement(OnyxParser* parser) { { if (!binding) return; - if (parser->last_documentation_token) { - binding->documentation = parser->last_documentation_token; - parser->last_documentation_token = NULL; - } + flush_doc_tokens(parser, &binding->documentation_string, &binding->documentation_token_old); // // If this binding is inside an #inject block, @@ -4011,8 +4480,7 @@ static void parse_top_level_statement(OnyxParser* parser) { injection->token = parser->injection_point->token; injection->dest = parser->injection_point; injection->symbol = binding->token; - injection->to_inject = (AstTyped *) binding->node; - injection->documentation = binding->documentation; + injection->binding = binding; ENTITY_SUBMIT(injection); return; @@ -4032,7 +4500,7 @@ static void parse_top_level_statement(OnyxParser* parser) { } static b32 parse_package_name(OnyxParser *parser, AstPackage *package) { - bh_arr_new(global_heap_allocator, package->path, 2); + bh_arr_new(parser->context->gp_alloc, package->path, 2); while (parser->curr->type == Token_Type_Symbol) { if (parser->hit_unexpected_token) return 0; @@ -4048,7 +4516,7 @@ static b32 parse_package_name(OnyxParser *parser, AstPackage *package) { total_package_name_length += (*token)->length + 1; } - char* package_name = bh_alloc_array(context.ast_alloc, char, total_package_name_length); + char* package_name = bh_alloc_array(parser->context->ast_alloc, char, total_package_name_length); *package_name = '\0'; bh_arr_each(OnyxToken *, token, package->path) { @@ -4068,7 +4536,7 @@ static b32 parse_package_name(OnyxParser *parser, AstPackage *package) { static AstPackage* parse_package_expression(OnyxParser* parser) { AstPackage* package_node = make_node(AstPackage, Ast_Kind_Package); package_node->flags |= Ast_Flag_Comptime; - package_node->type_node = builtin_package_id_type; + package_node->type_node = parser->context->builtins.package_id_type; package_node->token = expect_token(parser, Token_Type_Keyword_Package); if (!parse_package_name(parser, package_node)) return NULL; @@ -4079,7 +4547,7 @@ static AstPackage* parse_package_expression(OnyxParser* parser) { static void parse_import_statement(OnyxParser* parser, OnyxToken *token) { AstPackage* package_node = make_node(AstPackage, Ast_Kind_Package); package_node->flags |= Ast_Flag_Comptime; - package_node->type_node = builtin_package_id_type; + package_node->type_node = parser->context->builtins.package_id_type; package_node->token = token; if (peek_token(0)->type == Token_Type_Keyword_Package) { @@ -4119,7 +4587,7 @@ static void parse_import_statement(OnyxParser* parser, OnyxToken *token) { goto import_parsed; } - bh_arr_new(global_heap_allocator, import_node->only, 4); + bh_arr_new(parser->context->gp_alloc, import_node->only, 4); while (!consume_token_if_next(parser, '}')) { if (parser->hit_unexpected_token) return; @@ -4145,7 +4613,7 @@ static void parse_import_statement(OnyxParser* parser, OnyxToken *token) { static Package* parse_file_package(OnyxParser* parser) { if (parser->curr->type != Token_Type_Keyword_Package) { - return package_lookup_or_create("main", context.global_scope, parser->allocator, parser->curr->pos); + return package_lookup_or_create(parser->context, "main", parser->context->global_scope, parser->curr->pos); } AstPackage* package_node = parse_package_expression(parser); @@ -4159,19 +4627,19 @@ static Package* parse_file_package(OnyxParser* parser) { token_toggle_end(*symbol); strncat(aggregate_name, (*symbol)->text, 2047); - Package* newpackage = package_lookup_or_create(aggregate_name, context.global_scope, parser->allocator, package_node->token->pos); + Package* newpackage = package_lookup_or_create(parser->context, aggregate_name, parser->context->global_scope, package_node->token->pos); newpackage->parent_id = prevpackage ? prevpackage->id : 0xffffffff; AstPackage* pnode = make_node(AstPackage, Ast_Kind_Package); pnode->token = *symbol; pnode->package = newpackage; pnode->package_name = newpackage->name; - pnode->type_node = builtin_package_id_type; + pnode->type_node = parser->context->builtins.package_id_type; pnode->flags |= Ast_Flag_Comptime; if (prevpackage != NULL) { - symbol_subpackage_introduce(prevpackage, (*symbol)->text, pnode); - package_reinsert_use_packages(prevpackage); + symbol_subpackage_introduce(parser->context, prevpackage, (*symbol)->text, pnode); + package_reinsert_use_packages(parser->context, prevpackage); } token_toggle_end(*symbol); @@ -4188,7 +4656,7 @@ static Package* parse_file_package(OnyxParser* parser) { static void parse_top_level_statements_until(OnyxParser* parser, TokenType tt) { while (parser->curr->type != tt) { if (parser->hit_unexpected_token) break; - if (onyx_has_errors()) break; + if (onyx_has_errors(parser->context)) break; parse_top_level_statement(parser); consume_token_if_next(parser, ';'); } @@ -4197,18 +4665,19 @@ static void parse_top_level_statements_until(OnyxParser* parser, TokenType tt) { // NOTE: This returns a void* so I don't need to cast it everytime I use it void* onyx_ast_node_new(bh_allocator alloc, i32 size, AstKind kind) { - void* node = bh_alloc(alloc, size); + AstNode* node = bh_alloc(alloc, size); memset(node, 0, size); - *(AstKind *) node = kind; + node->kind = kind; return node; } -OnyxParser onyx_parser_create(bh_allocator alloc, OnyxTokenizer *tokenizer) { +OnyxParser onyx_parser_create(Context *context, OnyxTokenizer *tokenizer) { OnyxParser parser; - parser.allocator = alloc; + parser.allocator = context->ast_alloc; + parser.context = context; parser.tokenizer = tokenizer; parser.curr = tokenizer->tokens; parser.prev = NULL; @@ -4226,17 +4695,19 @@ OnyxParser onyx_parser_create(bh_allocator alloc, OnyxTokenizer *tokenizer) { parser.injection_point = NULL; parser.last_documentation_token = NULL; parser.allow_package_expressions = 0; + parser.documentation_tokens = NULL; parser.polymorph_context = (PolymorphicContext) { .root_node = NULL, .poly_params = NULL, }; - bh_arr_new(global_heap_allocator, parser.alternate_entity_placement_stack, 4); - bh_arr_new(global_heap_allocator, parser.current_symbol_stack, 4); - bh_arr_new(global_heap_allocator, parser.scope_flags, 4); - bh_arr_new(global_heap_allocator, parser.stored_tags, 4); - bh_arr_new(global_heap_allocator, parser.current_function_stack, 4); + bh_arr_new(context->gp_alloc, parser.alternate_entity_placement_stack, 4); + bh_arr_new(context->gp_alloc, parser.current_symbol_stack, 4); + bh_arr_new(context->gp_alloc, parser.scope_flags, 4); + bh_arr_new(context->gp_alloc, parser.stored_tags, 4); + bh_arr_new(context->gp_alloc, parser.current_function_stack, 4); + bh_arr_new(context->gp_alloc, parser.documentation_tokens, 8); return parser; } @@ -4247,22 +4718,61 @@ void onyx_parser_free(OnyxParser* parser) { bh_arr_free(parser->scope_flags); bh_arr_free(parser->stored_tags); bh_arr_free(parser->current_function_stack); + bh_arr_free(parser->documentation_tokens); +} + +AstTyped *onyx_parse_expression(OnyxParser *parser, Scope *scope) { + parser->current_scope = scope; + AstTyped *expr = parse_expression(parser, 0); + + return expr; +} + +AstNode *onyx_parse_statement(OnyxParser *parser, Scope *scope) { + parser->current_scope = scope; + AstNode *stmt = parse_statements_until(parser, Token_Type_End_Stream); + + return stmt; +} + +void onyx_parse_top_level_statements(OnyxParser *parser, Scope *scope) { + if (peek_token(0)->type == Token_Type_Keyword_Package) { + parser->package = parse_file_package(parser); + assert(parser->package); + } + + parser->current_scope = scope; + parse_top_level_statements_until(parser, Token_Type_End_Stream); } void onyx_parse(OnyxParser *parser) { // NOTE: Skip comments at the beginning of the file while (consume_token_if_next(parser, Token_Type_Comment)); + while (parser->curr->type == Token_Type_Doc_Comment) { + bh_arr_push(parser->documentation_tokens, expect_token(parser, Token_Type_Doc_Comment)); + } + parser->package = parse_file_package(parser); - parser->file_scope = scope_create(parser->allocator, parser->package->private_scope, parser->tokenizer->tokens[0].pos); + assert(parser->package); + + { + const char *doc_string = NULL; + flush_doc_tokens(parser, &doc_string, NULL); + if (doc_string && strlen(doc_string) > 0) { + bh_arr_push(parser->package->doc_strings, doc_string); + } + } + + parser->file_scope = scope_create(parser->context, parser->package->private_scope, parser->tokenizer->tokens[0].pos); parser->current_scope = parser->file_scope; consume_token_if_next(parser, ';'); if (parse_possible_directive(parser, "allow_stale_code") && !parser->package->is_included_somewhere - && !context.options->no_stale_code) { - bh_arr_new(global_heap_allocator, parser->package->buffered_entities, 32); + && !parser->context->options->no_stale_code) { + bh_arr_new(parser->context->gp_alloc, parser->package->buffered_entities, 32); bh_arr_push(parser->alternate_entity_placement_stack, &parser->package->buffered_entities); } @@ -4272,7 +4782,7 @@ void onyx_parse(OnyxParser *parser) { OnyxToken *doc_string = expect_token(parser, Token_Type_Literal_String); consume_token_if_next(parser, ';'); - bh_arr_push(parser->package->doc_strings, doc_string); + bh_arr_push(parser->package->doc_string_tokens, doc_string); } parse_top_level_statements_until(parser, Token_Type_End_Stream); diff --git a/compiler/src/polymorph.h b/compiler/src/polymorph.h index 953a15d9e..de47331dd 100644 --- a/compiler/src/polymorph.h +++ b/compiler/src/polymorph.h @@ -3,37 +3,20 @@ // Polymorphic Procedures // -// This flag is used by some of the procedures that try working with polymorphic things, -// but need to wait until more information is known. Instead of passing a out parameter -// into each of these procedures, a single global variable is used instead. If the type -// checker ever gets multi-threaded, this would have to become a threadlocal variable. -static b32 flag_to_yield = 0; - -// This flag is used in the very special case that you are passing a polymorphic procedure -// to a polymorphic procedure, and you have enough information to instantiate said procedure -// in order to resolve the type of one of the return values. -static b32 doing_nested_polymorph_lookup = 0; - -// The name is pretty self-descriptive, but this is a node that is returned from things -// like polymorphic_proc_lookup when it is determined that everything works so far, but -// the caller must yield in order to finish checking this polymorphic procedure. -AstTyped node_that_signals_a_yield = { Ast_Kind_Function, 0 }; -AstTyped node_that_signals_failure = { Ast_Kind_Error, 0 }; - -static void ensure_polyproc_cache_is_created(AstFunction* pp) { +static void ensure_polyproc_cache_is_created(Context *context, AstFunction* pp) { if (pp->concrete_funcs == NULL) sh_new_arena(pp->concrete_funcs); - if (pp->active_queries.hashes == NULL) bh_imap_init(&pp->active_queries, global_heap_allocator, 31); + if (pp->active_queries.hashes == NULL) bh_imap_init(&pp->active_queries, context->gp_alloc, 31); } -void insert_poly_sln_into_scope(Scope* scope, AstPolySolution *sln) { +void insert_poly_sln_into_scope(Context *context, Scope* scope, AstPolySolution *sln) { AstNode *node = NULL; switch (sln->kind) { case PSK_Type: - node = onyx_ast_node_new(context.ast_alloc, sizeof(AstTypeRawAlias), Ast_Kind_Type_Raw_Alias); + node = onyx_ast_node_new(context->ast_alloc, sizeof(AstTypeRawAlias), Ast_Kind_Type_Raw_Alias); ((AstTypeRawAlias *) node)->token = sln->poly_sym->token; ((AstTypeRawAlias *) node)->to = sln->type; - ((AstTypeRawAlias *) node)->type = &basic_types[Basic_Kind_Type_Index]; + ((AstTypeRawAlias *) node)->type = context->types.basic[Basic_Kind_Type_Index]; ((AstTypeRawAlias *) node)->type_id = sln->type->id; break; @@ -41,7 +24,7 @@ void insert_poly_sln_into_scope(Scope* scope, AstPolySolution *sln) { // CLEANUP: Maybe clone this? // assert(sln->value->flags & Ast_Flag_Comptime); if ((sln->value->flags & Ast_Flag_Comptime) == 0) { - onyx_report_error(sln->value->token->pos, Error_Critical, "Expected value to be compile time known."); + ONYX_ERROR(sln->value->token->pos, Error_Critical, "Expected value to be compile time known."); return; } @@ -51,19 +34,19 @@ void insert_poly_sln_into_scope(Scope* scope, AstPolySolution *sln) { case PSK_Undefined: assert("Unexpected PSK_Undefined" && 0); break; } - symbol_introduce(scope, sln->poly_sym->token, node); + symbol_introduce(context, scope, sln->poly_sym->token, node); } -static void insert_poly_slns_into_scope(Scope* scope, bh_arr(AstPolySolution) slns) { +static void insert_poly_slns_into_scope(Context *context, Scope* scope, bh_arr(AstPolySolution) slns) { bh_arr_each(AstPolySolution, sln, slns) { - insert_poly_sln_into_scope(scope, sln); + insert_poly_sln_into_scope(context, scope, sln); } } // NOTE: This might return a volatile string. Do not store it without copying it. -static char* build_poly_solution_key(AstPolySolution* sln) { +static char* build_poly_solution_key(Context *context, AstPolySolution* sln) { if (sln->kind == PSK_Type) { - return (char *) type_get_unique_name(sln->type); + return bh_aprintf(context->ast_alloc, "@%d", sln->type->id); } else if (sln->kind == PSK_Value) { static char buffer[256]; @@ -88,17 +71,18 @@ static char* build_poly_solution_key(AstPolySolution* sln) { } // NOTE: This returns a volatile string. Do not store it without copying it. -static char* build_poly_slns_unique_key(bh_arr(AstPolySolution) slns) { +static char* build_poly_slns_unique_key(Context *context, bh_arr(AstPolySolution) slns) { static char key_buf[1024]; fori (i, 0, 1024) key_buf[i] = 0; bh_arr_each(AstPolySolution, sln, slns) { + if (sln != slns) strncat(key_buf, "$", 1023); + token_toggle_end(sln->poly_sym->token); strncat(key_buf, sln->poly_sym->token->text, 1023); strncat(key_buf, "=", 1023); - strncat(key_buf, build_poly_solution_key(sln), 1023); - strncat(key_buf, ";", 1023); + strncat(key_buf, build_poly_solution_key(context, sln), 1023); token_toggle_end(sln->poly_sym->token); } @@ -109,11 +93,11 @@ static char* build_poly_slns_unique_key(bh_arr(AstPolySolution) slns) { // NOTE: This function adds a solidified function to the entity heap for it to be processed // later. It optionally can start the function header entity at the code generation state if // the header has already been processed. -static b32 add_solidified_function_entities(AstSolidifiedFunction *solidified_func) { +static b32 add_solidified_function_entities(Context *context, AstSolidifiedFunction *solidified_func) { solidified_func->func->flags |= Ast_Flag_From_Polymorphism; Entity func_header_entity = { - .state = Entity_State_Resolve_Symbols, + .state = Entity_State_Check_Types, .type = Entity_Type_Function_Header, .function = solidified_func->func, .package = NULL, @@ -121,15 +105,15 @@ static b32 add_solidified_function_entities(AstSolidifiedFunction *solidified_fu }; Entity func_entity = { - .state = Entity_State_Resolve_Symbols, + .state = Entity_State_Check_Types, .type = Entity_Type_Function, .function = solidified_func->func, .package = NULL, .scope = solidified_func->func->poly_scope, }; - Entity* entity_header = entity_heap_insert(&context.entities, func_header_entity); - Entity* entity_body = entity_heap_insert(&context.entities, func_entity); + Entity* entity_header = entity_heap_insert(&context->entities, func_header_entity); + Entity* entity_body = entity_heap_insert(&context->entities, func_entity); solidified_func->func_header_entity = entity_header; solidified_func->func->entity_header = entity_header; @@ -143,6 +127,7 @@ static b32 add_solidified_function_entities(AstSolidifiedFunction *solidified_fu // generate the header of the function, which is useful for cases such as checking if a // set of arguments works for a polymorphic overload option. static AstSolidifiedFunction generate_solidified_function( + Context *context, AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn, @@ -156,16 +141,16 @@ static AstSolidifiedFunction generate_solidified_function( if (tkn) poly_scope_pos = tkn->pos; if (header_only) { - solidified_func.func = (AstFunction *) clone_function_header(context.ast_alloc, pp); + solidified_func.func = (AstFunction *) clone_function_header(context, pp); solidified_func.func->flags |= Ast_Flag_Incomplete_Body; } else { - solidified_func.func = (AstFunction *) ast_clone(context.ast_alloc, pp); + solidified_func.func = (AstFunction *) ast_clone(context, pp); } assert(pp->parent_scope_of_poly_proc); - solidified_func.func->poly_scope = scope_create(context.ast_alloc, pp->parent_scope_of_poly_proc, poly_scope_pos); - insert_poly_slns_into_scope(solidified_func.func->poly_scope, slns); + solidified_func.func->poly_scope = scope_create(context, pp->parent_scope_of_poly_proc, poly_scope_pos); + insert_poly_slns_into_scope(context, solidified_func.func->poly_scope, slns); solidified_func.func->flags |= Ast_Flag_From_Polymorphism; solidified_func.func->generated_from = tkn; @@ -178,12 +163,12 @@ static AstSolidifiedFunction generate_solidified_function( u32 removed_params = 0; bh_arr_each(AstPolyParam, param, pp->poly_params) { if (param->implicit_interface) { - AstConstraint *constraint = onyx_ast_node_new(context.ast_alloc, sizeof(AstConstraint), Ast_Kind_Constraint); + AstConstraint *constraint = onyx_ast_node_new(context->ast_alloc, sizeof(AstConstraint), Ast_Kind_Constraint); constraint->interface = (AstInterface *) param->implicit_interface; constraint->token = constraint->interface->token; - bh_arr_new(global_heap_allocator, constraint->args, 1); - bh_arr_push(constraint->args, (AstTyped *) ast_clone(context.ast_alloc, param->poly_sym)); + bh_arr_new(context->gp_alloc, constraint->args, 1); + bh_arr_push(constraint->args, (AstTyped *) ast_clone(context, param->poly_sym)); bh_arr_push(solidified_func.func->constraints.constraints, constraint); } @@ -197,16 +182,16 @@ static AstSolidifiedFunction generate_solidified_function( return solidified_func; } -static void ensure_solidified_function_has_body(AstFunction* pp, AstSolidifiedFunction *solidified_func) { +static void ensure_solidified_function_has_body(Context *context, AstFunction* pp, AstSolidifiedFunction *solidified_func) { if (solidified_func->func->flags & Ast_Flag_Incomplete_Body) { - clone_function_body(context.ast_alloc, solidified_func->func, pp); + clone_function_body(context, solidified_func->func, pp); // HACK: I'm asserting that this function should return without an error, because // the only case where it can return an error is if there was a problem with the // header. This should never be the case in this situation, since the header would // have to have successfully passed type checking before it would become a solidified // procedure. - assert(add_solidified_function_entities(solidified_func)); + assert(add_solidified_function_entities(context, solidified_func)); solidified_func->func->flags &= ~Ast_Flag_Incomplete_Body; } @@ -241,9 +226,9 @@ typedef struct PolySolveElem { // This function utilizes a basic breadth-first search of the type_expr and actual type // trees, always moving along them in parallel, so when the target is reached (if it is // ever reached), the "actual" is the matched type/value. -static PolySolveResult solve_poly_type(AstNode* target, AstType* type_expr, Type* actual) { +static PolySolveResult solve_poly_type(Context *context, AstNode* target, AstType* type_expr, Type* actual) { bh_arr(PolySolveElem) elem_queue = NULL; - bh_arr_new(global_heap_allocator, elem_queue, 4); + bh_arr_new(context->gp_alloc, elem_queue, 4); PolySolveResult result = { PSK_Undefined, { NULL } }; @@ -260,7 +245,7 @@ static PolySolveResult solve_poly_type(AstNode* target, AstType* type_expr, Type // This check does not strictly need the `type_auto_return` check, // but it does prevent bugs if the auto return type placeholder is // accidentally inserted into the real type. - if (elem.type_expr == (AstType *) target && elem.actual != &type_auto_return) { + if (elem.type_expr == (AstType *) target && elem.actual != context->types.auto_return) { result.kind = elem.kind; assert(elem.kind != PSK_Undefined); @@ -314,7 +299,7 @@ static PolySolveResult solve_poly_type(AstNode* target, AstType* type_expr, Type // CLEANUP: Making an integer literal every time is very very very gross. This should // at least be cached or something. - .value = (AstTyped *) make_int_literal(context.ast_alloc, elem.actual->Array.count) + .value = (AstTyped *) make_int_literal(context, elem.actual->Array.count) })); bh_arr_push(elem_queue, ((PolySolveElem) { @@ -381,7 +366,7 @@ static PolySolveResult solve_poly_type(AstNode* target, AstType* type_expr, Type } case Ast_Kind_Call: { - AstPolyCallType *pct = convert_call_to_polycall((AstCall *) elem.type_expr); + AstPolyCallType *pct = convert_call_to_polycall(context, (AstCall *) elem.type_expr); elem.type_expr = (AstType *) pct; // fallthrough @@ -489,32 +474,32 @@ static AstTyped* lookup_param_in_arguments(AstFunction* func, AstPolyParam* para return NULL; } -static AstTyped* try_lookup_based_on_partial_function_type(AstFunction *pp, AstFunctionType *ft) { +static AstTyped* try_lookup_based_on_partial_function_type(Context *context, AstFunction *pp, AstFunctionType *ft) { if (ft->partial_function_type == NULL) { AstType *old_return_type = ft->return_type; - ft->return_type = (AstType *) &basic_type_void; - ft->partial_function_type = type_build_from_ast(context.ast_alloc, (AstType *) ft); + ft->return_type = (AstType *) &context->basic_types.type_void; + ft->partial_function_type = type_build_from_ast(context, (AstType *) ft); ft->return_type = old_return_type; if (!ft->partial_function_type) { - doing_nested_polymorph_lookup = 1; + context->polymorph.doing_nested_polymorph_lookup = 1; return NULL; } assert(ft->partial_function_type); } - AstTyped *result = (AstTyped *) polymorphic_proc_lookup(pp, PPLM_By_Function_Type, ft->partial_function_type, pp->token); + AstTyped *result = (AstTyped *) polymorphic_proc_lookup(context, pp, PPLM_By_Function_Type, ft->partial_function_type, pp->token); // If the result is not ready (NULL, yield flag, no type, or `type_auto_return` as return type), wait. if (result && ( result->type == NULL - || (result->type->kind == Type_Kind_Function && result->type->Function.return_type == &type_auto_return))) + || (result->type->kind == Type_Kind_Function && result->type->Function.return_type == context->types.auto_return))) { - doing_nested_polymorph_lookup = 1; + context->polymorph.doing_nested_polymorph_lookup = 1; result = NULL; } - if (result == &node_that_signals_a_yield) { - doing_nested_polymorph_lookup = 1; + if (result == &context->node_that_signals_a_yield) { + context->polymorph.doing_nested_polymorph_lookup = 1; result = NULL; } @@ -525,7 +510,7 @@ static AstTyped* try_lookup_based_on_partial_function_type(AstFunction *pp, AstF // information. It is asssumed that the "param" is of kind PPK_Poly_Type. This function uses // either the arguments provided, or a function type to compare against to pattern match for // the type that the parameter but be. -static void solve_for_polymorphic_param_type(PolySolveResult* resolved, AstFunction* func, AstPolyParam* param, PolyProcLookupMethod pp_lookup, ptr actual, OnyxError* err_msg) { +static void solve_for_polymorphic_param_type(Context *context, PolySolveResult* resolved, AstFunction* func, AstPolyParam* param, PolyProcLookupMethod pp_lookup, ptr actual, OnyxError* err_msg) { Type* actual_type = NULL; b32 can_strip_pointer = 0; @@ -563,11 +548,11 @@ static void solve_for_polymorphic_param_type(PolySolveResult* resolved, AstFunct } if (all_types) - typed_param = try_lookup_based_on_partial_function_type((AstFunction *) potential, ft); + typed_param = try_lookup_based_on_partial_function_type(context, (AstFunction *) potential, ft); skip_nested_polymorph_case: - actual_type = query_expression_type(typed_param); + actual_type = query_expression_type(context, typed_param); if (actual_type == NULL) { if (typed_param) err_msg->pos = typed_param->token->pos; @@ -594,7 +579,7 @@ static void solve_for_polymorphic_param_type(PolySolveResult* resolved, AstFunct default: return; } - PolySolveResult res = solve_poly_type(param->poly_sym, param->type_expr, actual_type); + PolySolveResult res = solve_poly_type(context, param->poly_sym, param->type_expr, actual_type); // If we are trying to match against an "address of" node that was // placed because of a method call, there's a small chance that the @@ -605,16 +590,16 @@ static void solve_for_polymorphic_param_type(PolySolveResult* resolved, AstFunct // outer most pointer node. If everything succeeds, the pointer node // will be removed when the actual value is checked later. if (res.kind == PSK_Undefined && can_strip_pointer) { - res = solve_poly_type(param->poly_sym, param->type_expr, actual_type->Pointer.elem); + res = solve_poly_type(context, param->poly_sym, param->type_expr, actual_type->Pointer.elem); } if (res.kind == PSK_Undefined) { err_msg->pos = param->poly_sym->token->pos; - err_msg->text = bh_aprintf(global_scratch_allocator, + err_msg->text = bh_aprintf(context->scratch_alloc, "Unable to solve for polymorphic variable '%b', given the type '%s'.", param->poly_sym->token->text, param->poly_sym->token->length, - type_get_name(actual_type)); + type_get_name(context, actual_type)); } *resolved = res; @@ -627,7 +612,7 @@ static void solve_for_polymorphic_param_type(PolySolveResult* resolved, AstFunct // CLEANUP: This function is kind of gross at the moment, because it handles different cases for // the argument kind. When type expressions (type_expr) become first-class types in the type // system, this code should be able to be a lot cleaner. -static void solve_for_polymorphic_param_value(PolySolveResult* resolved, AstFunction* func, AstPolyParam* param, PolyProcLookupMethod pp_lookup, ptr actual, OnyxError* err_msg) { +static void solve_for_polymorphic_param_value(Context *context, PolySolveResult* resolved, AstFunction* func, AstPolyParam* param, PolyProcLookupMethod pp_lookup, ptr actual, OnyxError* err_msg) { if (pp_lookup != PPLM_By_Arguments) { err_msg->text = "Function type cannot be used to solved for baked parameter value."; return; @@ -647,28 +632,28 @@ static void solve_for_polymorphic_param_value(PolySolveResult* resolved, AstFunc Type* param_type = NULL; AstType *param_type_expr = func->params[param->idx].local->type_node; - if (param_type_expr == (AstType *) &basic_type_type_expr) { + if (param_type_expr == (AstType *) &context->basic_types.type_type_expr) { if (!node_is_type((AstNode *) value)) { if (err_msg) { err_msg->pos = value->token->pos; - err_msg->text = bh_aprintf(global_scratch_allocator, + err_msg->text = bh_aprintf(context->scratch_alloc, "Expected type expression here, got a '%s'.", - type_get_name(value->type)); + type_get_name(context, value->type)); } return; } - Type* resolved_type = type_build_from_ast(context.ast_alloc, (AstType *) value); - if (resolved_type == NULL) flag_to_yield = 1; + Type* resolved_type = type_build_from_ast(context, (AstType *) value); + if (resolved_type == NULL) context->polymorph.flag_to_yield = 1; *resolved = ((PolySolveResult) { PSK_Type, .actual = resolved_type }); } else { - resolve_expression_type(value); + resolve_expression_type(context, value); - param_type = type_build_from_ast(context.ast_alloc, param_type_expr); + param_type = type_build_from_ast(context, param_type_expr); if (param_type == NULL) { - flag_to_yield = 1; + context->polymorph.flag_to_yield = 1; err_msg->text = "Waiting to know type for polymorphic value."; return; } @@ -678,24 +663,24 @@ static void solve_for_polymorphic_param_value(PolySolveResult* resolved, AstFunc value_to_use = (AstTyped *) get_function_from_node((AstNode *) value); } - TypeMatch tm = unify_node_and_type(&value_to_use, param_type); + TypeMatch tm = unify_node_and_type(context, &value_to_use, param_type); if (tm == TYPE_MATCH_FAILED) { if (err_msg) { err_msg->pos = param->poly_sym->token->pos; - err_msg->text = bh_aprintf(global_scratch_allocator, + err_msg->text = bh_aprintf(context->scratch_alloc, "The procedure '%s' expects a value of type '%s' for %d%s baked parameter '%b', got '%s'.", - get_function_name(func), - type_get_name(param_type), + get_function_name(context, func), + type_get_name(context, param_type), param->idx + 1, bh_num_suffix(param->idx + 1), param->poly_sym->token->text, param->poly_sym->token->length, - node_get_type_name(value_to_use)); + node_get_type_name(context, value_to_use)); } return; } - if (tm == TYPE_MATCH_YIELD) flag_to_yield = 1; + if (tm == TYPE_MATCH_YIELD) context->polymorph.flag_to_yield = 1; if ((value_to_use->flags & Ast_Flag_Comptime) == 0) { if (err_msg) { @@ -713,7 +698,7 @@ static void solve_for_polymorphic_param_value(PolySolveResult* resolved, AstFunc } } -TypeMatch find_polymorphic_sln(AstPolySolution *out, AstPolyParam *param, AstFunction *func, PolyProcLookupMethod pp_lookup, ptr actual, OnyxError* err_msg) { +TypeMatch find_polymorphic_sln(Context *context, AstPolySolution *out, AstPolyParam *param, AstFunction *func, PolyProcLookupMethod pp_lookup, ptr actual, OnyxError* err_msg) { if (err_msg) { err_msg->pos = func->token->pos; err_msg->rank = Error_Critical; @@ -722,19 +707,19 @@ TypeMatch find_polymorphic_sln(AstPolySolution *out, AstPolyParam *param, AstFun // NOTE: Solve for the polymorphic parameter's value PolySolveResult resolved = { PSK_Undefined }; switch (param->kind) { - case PPK_Poly_Type: solve_for_polymorphic_param_type (&resolved, func, param, pp_lookup, actual, err_msg); break; - case PPK_Baked_Value: solve_for_polymorphic_param_value(&resolved, func, param, pp_lookup, actual, err_msg); break; + case PPK_Poly_Type: solve_for_polymorphic_param_type (context, &resolved, func, param, pp_lookup, actual, err_msg); break; + case PPK_Baked_Value: solve_for_polymorphic_param_value(context, &resolved, func, param, pp_lookup, actual, err_msg); break; default: if (err_msg) err_msg->text = "Invalid polymorphic parameter kind. This is a compiler bug."; } - if (doing_nested_polymorph_lookup) { - doing_nested_polymorph_lookup = 0; + if (context->polymorph.doing_nested_polymorph_lookup) { + context->polymorph.doing_nested_polymorph_lookup = 0; return TYPE_MATCH_SPECIAL; } - if (flag_to_yield) { - flag_to_yield = 0; + if (context->polymorph.flag_to_yield) { + context->polymorph.flag_to_yield = 0; return TYPE_MATCH_YIELD; } @@ -756,7 +741,7 @@ TypeMatch find_polymorphic_sln(AstPolySolution *out, AstPolyParam *param, AstFun // NOTE: If no error message has been assigned to why this polymorphic parameter // resolution was unsuccessful, provide a basic dummy one. if (err_msg && err_msg->text == NULL) - err_msg->text = bh_aprintf(global_scratch_allocator, + err_msg->text = bh_aprintf(context->scratch_alloc, "Unable to solve for polymorphic variable '%b'.", param->poly_sym->token->text, param->poly_sym->token->length); @@ -769,8 +754,8 @@ TypeMatch find_polymorphic_sln(AstPolySolution *out, AstPolyParam *param, AstFun // NOTE: The job of this function is to take a polymorphic procedure, as well as a method of // solving for the polymorphic variables, in order to return an array of the solutions for all // of the polymorphic variables. -static bh_arr(AstPolySolution) find_polymorphic_slns(AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken *tkn, b32 necessary) { - ensure_polyproc_cache_is_created(pp); +static bh_arr(AstPolySolution) find_polymorphic_slns(Context *context, AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken *tkn, b32 necessary) { + ensure_polyproc_cache_is_created(context, pp); if (bh_imap_has(&pp->active_queries, (u64) actual)) { AstPolyQuery *query = (AstPolyQuery *) bh_imap_get(&pp->active_queries, (u64) actual); assert(query->kind == Ast_Kind_Polymorph_Query); @@ -779,68 +764,67 @@ static bh_arr(AstPolySolution) find_polymorphic_slns(AstFunction* pp, PolyProcLo if (query->entity->state == Entity_State_Finalized) return query->slns; if (query->entity->state == Entity_State_Failed) return NULL; - flag_to_yield = 1; + context->polymorph.flag_to_yield = 1; return NULL; } bh_arr(AstPolySolution) slns = NULL; - bh_arr_new(global_heap_allocator, slns, bh_arr_length(pp->poly_params)); + bh_arr_new(context->gp_alloc, slns, bh_arr_length(pp->poly_params)); // NOTE: "known solutions" are given through a '#solidify' directive. If this polymorphic // procedure is the result of a partially applied solidification, this array will be non- // empty and these solutions will be used. bh_arr_each(AstPolySolution, known_sln, pp->known_slns) bh_arr_push(slns, *known_sln); - AstPolyQuery *query = onyx_ast_node_new(context.ast_alloc, sizeof(AstPolyQuery), Ast_Kind_Polymorph_Query); + AstPolyQuery *query = onyx_ast_node_new(context->ast_alloc, sizeof(AstPolyQuery), Ast_Kind_Polymorph_Query); query->token = pp->token; query->proc = pp; query->pp_lookup = pp_lookup; query->given = actual; query->error_loc = tkn; query->slns = slns; - query->function_header = clone_function_header(context.ast_alloc, pp); + query->function_header = clone_function_header(context, pp); query->function_header->flags |= Ast_Flag_Header_Check_No_Error; query->function_header->scope = NULL; query->error_on_fail = necessary; - query->successful_symres = 1; bh_imap_put(&pp->active_queries, (u64) actual, (u64) query); - add_entities_for_node(NULL, (AstNode *) query, NULL, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) query, NULL, NULL); - flag_to_yield = 1; + context->polymorph.flag_to_yield = 1; return NULL; } // NOTE: The job of this function is to be a wrapper to other functions, providing an error // message if a solution could not be found. This can't be merged with polymorphic_proc_solidify // because polymorphic_proc_try_solidify uses the aforementioned function. -AstFunction* polymorphic_proc_lookup(AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken* tkn) { +AstFunction* polymorphic_proc_lookup(Context *context, AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken* tkn) { // Ensure the polymorphic procedure is ready to be solved for. assert(pp->entity); - if (pp->entity->state < Entity_State_Check_Types) return (AstFunction *) &node_that_signals_a_yield; + if (pp->entity->state < Entity_State_Check_Types) return (AstFunction *) &context->node_that_signals_a_yield; - ensure_polyproc_cache_is_created(pp); + ensure_polyproc_cache_is_created(context, pp); - bh_arr(AstPolySolution) slns = find_polymorphic_slns(pp, pp_lookup, actual, tkn, 1); + bh_arr(AstPolySolution) slns = find_polymorphic_slns(context, pp, pp_lookup, actual, tkn, 1); if (slns == NULL) { - if (flag_to_yield) { - flag_to_yield = 0; - return (AstFunction *) &node_that_signals_a_yield; + if (context->polymorph.flag_to_yield) { + context->polymorph.flag_to_yield = 0; + return (AstFunction *) &context->node_that_signals_a_yield; } return NULL; } - AstFunction* result = polymorphic_proc_solidify(pp, slns, tkn); + AstFunction* result = polymorphic_proc_solidify(context, pp, slns, tkn); return result; } -AstFunction* polymorphic_proc_solidify(AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn) { - ensure_polyproc_cache_is_created(pp); +AstFunction* polymorphic_proc_solidify(Context *context, AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn) { + ensure_polyproc_cache_is_created(context, pp); // NOTE: Check if a version of this polyproc has already been created. - char* unique_key = build_poly_slns_unique_key(slns); + char* unique_key = build_poly_slns_unique_key(context, slns); i32 index = shgeti(pp->concrete_funcs, unique_key); if (index != -1) { AstSolidifiedFunction solidified_func = pp->concrete_funcs[index].value; @@ -848,7 +832,7 @@ AstFunction* polymorphic_proc_solidify(AstFunction* pp, bh_arr(AstPolySolution) // NOTE: If this solution was originally created from a "build_only_header" call, then the body // will not have been or type checked, or anything. This ensures that the body is copied, the // entities are created and entered into the pipeline. - ensure_solidified_function_has_body(pp, &solidified_func); + ensure_solidified_function_has_body(context, pp, &solidified_func); // NOTE: Again, if this came from a "build_only_header" call, then there was no known token and // the "generated_from" member will be null. It is best to set it here so errors reported in that @@ -859,18 +843,27 @@ AstFunction* polymorphic_proc_solidify(AstFunction* pp, bh_arr(AstPolySolution) return solidified_func.func; } - AstSolidifiedFunction solidified_func = generate_solidified_function(pp, slns, tkn, 0); - add_solidified_function_entities(&solidified_func); + AstSolidifiedFunction solidified_func = generate_solidified_function(context, pp, slns, tkn, 0); + add_solidified_function_entities(context, &solidified_func); // NOTE: Cache the function for later use, reducing duplicate functions. shput(pp->concrete_funcs, unique_key, solidified_func); - return (AstFunction *) &node_that_signals_a_yield; + if (solidified_func.func->name) { + solidified_func.func->assembly_name = bh_aprintf( + context->gp_alloc, + "%s$%s", + solidified_func.func->name, + unique_key + ); + } + + return (AstFunction *) &context->node_that_signals_a_yield; } // NOTE: This can return either a AstFunction or an AstFunction, depending if enough parameters were // supplied to remove all the polymorphic variables from the function. -AstNode* polymorphic_proc_try_solidify(AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn) { +AstNode* polymorphic_proc_try_solidify(Context *context, AstFunction* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn) { i32 valid_argument_count = 0; bh_arr_each(AstPolySolution, sln, slns) { @@ -887,10 +880,10 @@ AstNode* polymorphic_proc_try_solidify(AstFunction* pp, bh_arr(AstPolySolution) valid_argument_count++; } else { if (pp->name) { - onyx_report_error(tkn->pos, Error_Critical, "'%b' is not a type variable of '%s'.", + ONYX_ERROR(tkn->pos, Error_Critical, "'%b' is not a type variable of '%s'.", sln->poly_sym->token->text, sln->poly_sym->token->length, pp->name); } else { - onyx_report_error(tkn->pos, Error_Critical, "'%b' is not a type variable of '%b'.", + ONYX_ERROR(tkn->pos, Error_Critical, "'%b' is not a type variable of '%b'.", sln->poly_sym->token->text, sln->poly_sym->token->length, pp->token->text, pp->token->length); } @@ -899,22 +892,22 @@ AstNode* polymorphic_proc_try_solidify(AstFunction* pp, bh_arr(AstPolySolution) } if (valid_argument_count == bh_arr_length(pp->poly_params)) { - return (AstNode *) polymorphic_proc_solidify(pp, slns, tkn); + return (AstNode *) polymorphic_proc_solidify(context, pp, slns, tkn); } else { // HACK: Some of these initializations assume that the entity for this polyproc has // made it through the symbol resolution phase. // - brendanfh 2020/12/25 - AstFunction* new_pp = onyx_ast_node_new(context.ast_alloc, sizeof(AstFunction), Ast_Kind_Polymorphic_Proc); + AstFunction* new_pp = onyx_ast_node_new(context->ast_alloc, sizeof(AstFunction), Ast_Kind_Polymorphic_Proc); memcpy(new_pp, pp, sizeof(AstFunction)); new_pp->token = tkn; - new_pp->poly_params = bh_arr_copy(context.ast_alloc, pp->poly_params); + new_pp->poly_params = bh_arr_copy(context->ast_alloc, pp->poly_params); - ensure_polyproc_cache_is_created(pp); + ensure_polyproc_cache_is_created(context, pp); new_pp->concrete_funcs = pp->concrete_funcs; new_pp->known_slns = NULL; - bh_arr_new(global_heap_allocator, new_pp->known_slns, bh_arr_length(pp->known_slns) + bh_arr_length(slns)); + bh_arr_new(context->gp_alloc, new_pp->known_slns, bh_arr_length(pp->known_slns) + bh_arr_length(slns)); bh_arr_each(AstPolySolution, sln, pp->known_slns) bh_arr_push(new_pp->known_slns, *sln); bh_arr_each(AstPolySolution, sln, slns) bh_arr_push(new_pp->known_slns, *sln); @@ -923,24 +916,24 @@ AstNode* polymorphic_proc_try_solidify(AstFunction* pp, bh_arr(AstPolySolution) } } -AstFunction* polymorphic_proc_build_only_header(AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual) { - ensure_polyproc_cache_is_created(pp); - bh_arr(AstPolySolution) slns = find_polymorphic_slns(pp, pp_lookup, actual, NULL, 0); - if (flag_to_yield) { - flag_to_yield = 0; - return (AstFunction *) &node_that_signals_a_yield; +AstFunction* polymorphic_proc_build_only_header(Context *context, AstFunction* pp, PolyProcLookupMethod pp_lookup, ptr actual) { + ensure_polyproc_cache_is_created(context, pp); + bh_arr(AstPolySolution) slns = find_polymorphic_slns(context, pp, pp_lookup, actual, NULL, 0); + if (context->polymorph.flag_to_yield) { + context->polymorph.flag_to_yield = 0; + return (AstFunction *) &context->node_that_signals_a_yield; } if (slns == NULL) return NULL; - ensure_polyproc_cache_is_created(pp); + ensure_polyproc_cache_is_created(context, pp); - return polymorphic_proc_build_only_header_with_slns(pp, slns, 0); + return polymorphic_proc_build_only_header_with_slns(context, pp, slns, 0); } -AstFunction* polymorphic_proc_build_only_header_with_slns(AstFunction* pp, bh_arr(AstPolySolution) slns, b32 error_if_failed) { +AstFunction* polymorphic_proc_build_only_header_with_slns(Context *context, AstFunction* pp, bh_arr(AstPolySolution) slns, b32 error_if_failed) { AstSolidifiedFunction solidified_func; - char* unique_key = build_poly_slns_unique_key(slns); + char* unique_key = build_poly_slns_unique_key(context, slns); i32 index = shgeti(pp->concrete_funcs, unique_key); if (index != -1) { solidified_func = pp->concrete_funcs[index].value; @@ -949,33 +942,33 @@ AstFunction* polymorphic_proc_build_only_header_with_slns(AstFunction* pp, bh_ar // NOTE: This function is only going to have the header of it correctly created. // Nothing should happen to this function's body or else the original will be corrupted. // - brendanfh 2021/01/10 - solidified_func = generate_solidified_function(pp, slns, NULL, 1); + solidified_func = generate_solidified_function(context, pp, slns, NULL, 1); } if (solidified_func.func_header_entity) { if (solidified_func.func_header_entity->state == Entity_State_Finalized) return solidified_func.func; if (solidified_func.func_header_entity->state == Entity_State_Failed) return NULL; - return (AstFunction *) &node_that_signals_a_yield; + return (AstFunction *) &context->node_that_signals_a_yield; } BH_MASK_SET(solidified_func.func->flags, !error_if_failed, Ast_Flag_Header_Check_No_Error); Entity func_header_entity = { - .state = Entity_State_Resolve_Symbols, + .state = Entity_State_Check_Types, .type = Entity_Type_Temp_Function_Header, .function = solidified_func.func, .package = NULL, .scope = solidified_func.func->poly_scope, }; - Entity* func_header_entity_ptr = entity_heap_insert(&context.entities, func_header_entity); + Entity* func_header_entity_ptr = entity_heap_insert(&context->entities, func_header_entity); solidified_func.func_header_entity = func_header_entity_ptr; // NOTE: Cache the function for later use. shput(pp->concrete_funcs, unique_key, solidified_func); - return (AstFunction *) &node_that_signals_a_yield; + return (AstFunction *) &context->node_that_signals_a_yield; } typedef struct AutoPolymorphVariable { @@ -985,11 +978,11 @@ typedef struct AutoPolymorphVariable { AstType **replace; } AutoPolymorphVariable; -// This should be called after all the parameter types have been symresed, but before anything +// This should be called after all the parameter types have been had symbols resolved, but before anything // happens to the body. -b32 potentially_convert_function_to_polyproc(AstFunction *func) { +b32 potentially_convert_function_to_polyproc(Context *context, AstFunction *func) { bh_arr(AutoPolymorphVariable) auto_vars = NULL; - bh_arr_new(global_heap_allocator, auto_vars, 2); + bh_arr_new(context->gp_alloc, auto_vars, 2); u32 param_idx = 0; bh_arr_each(AstParam, param, func->params) { @@ -1039,7 +1032,7 @@ b32 potentially_convert_function_to_polyproc(AstFunction *func) { if (bh_arr_length(auto_vars) == 0) return 0; - bh_arr_new(global_heap_allocator, func->poly_params, bh_arr_length(auto_vars)); + bh_arr_new(context->gp_alloc, func->poly_params, bh_arr_length(auto_vars)); param_idx = 0; bh_arr_each(AutoPolymorphVariable, apv, auto_vars) { @@ -1048,11 +1041,11 @@ b32 potentially_convert_function_to_polyproc(AstFunction *func) { pp.kind = PPK_Poly_Type; pp.implicit_interface = NULL; - AstPolyCallType* pcall = onyx_ast_node_new(context.ast_alloc, sizeof(AstPolyCallType), Ast_Kind_Poly_Call_Type); + AstPolyCallType* pcall = onyx_ast_node_new(context->ast_alloc, sizeof(AstPolyCallType), Ast_Kind_Poly_Call_Type); pcall->callee = *apv->replace; pcall->token = pcall->callee->token; pcall->flags |= Ast_Flag_Poly_Call_From_Auto; - bh_arr_new(global_heap_allocator, pcall->params, apv->variable_count); + bh_arr_new(context->gp_alloc, pcall->params, apv->variable_count); AstType *dealiased_base_type = (AstType *) strip_aliases((AstNode *) apv->base_type); @@ -1065,13 +1058,13 @@ b32 potentially_convert_function_to_polyproc(AstFunction *func) { *apv->replace = (AstType *) pcall; fori (i, 0, apv->variable_count) { - OnyxToken* name_token = bh_alloc_item(context.ast_alloc, OnyxToken); - name_token->text = bh_aprintf(context.ast_alloc, "__autopoly_var_%d\0", param_idx); + OnyxToken* name_token = bh_alloc_item(context->ast_alloc, OnyxToken); + name_token->text = bh_aprintf(context->ast_alloc, "__autopoly_var_%d\0", param_idx); name_token->length = strlen(name_token->text); name_token->type = Token_Type_Symbol; name_token->pos = pcall->token->pos; - pp.poly_sym = make_symbol(context.ast_alloc, name_token); + pp.poly_sym = make_symbol(context, name_token); pp.poly_sym->flags |= Ast_Flag_Symbol_Is_PolyVar; bh_arr_push(pcall->params, pp.poly_sym); bh_arr_push(func->poly_params, pp); @@ -1079,7 +1072,7 @@ b32 potentially_convert_function_to_polyproc(AstFunction *func) { } } - convert_function_to_polyproc(func); + convert_function_to_polyproc(context, func); bh_arr_each(AstParam, param, func->params) { param->local->flags |= Ast_Flag_Param_Symbol_Dirty; @@ -1102,18 +1095,18 @@ b32 potentially_convert_function_to_polyproc(AstFunction *func) { // The above documentation is very incorrect but I don't want to fix it right now. Basically, polymorphic // structures now have a delay instantiation phase and are not forced to be completed immediately. -char* build_poly_struct_name(char *name, Type* type) { +char* build_poly_struct_name(Context *context, char *name, Type* type) { char name_buf[256]; fori (i, 0, 256) name_buf[i] = 0; // Special case for `? T` if (type->kind == Type_Kind_Union - && type->Union.constructed_from == builtin_optional_type) { + && type->Union.constructed_from == context->builtins.optional_type) { strncat(name_buf, "? ", 255); - strncat(name_buf, type_get_name(type->Union.poly_sln[0].type), 255); + strncat(name_buf, type_get_name(context, type->Union.poly_sln[0].type), 255); - return bh_aprintf(global_heap_allocator, "%s", name_buf); + return bh_aprintf(context->gp_alloc, "%s", name_buf); } bh_arr(AstPolySolution) slns = NULL; @@ -1131,7 +1124,7 @@ char* build_poly_struct_name(char *name, Type* type) { switch (ptype->kind) { case PSK_Undefined: assert(0); break; - case PSK_Type: strncat(name_buf, type_get_name(ptype->type), 255); break; + case PSK_Type: strncat(name_buf, type_get_name(context, ptype->type), 255); break; case PSK_Value: { // FIX AstNode* value = strip_aliases((AstNode *) ptype->value); @@ -1157,10 +1150,10 @@ char* build_poly_struct_name(char *name, Type* type) { } strncat(name_buf, ")", 255); - return bh_aprintf(global_heap_allocator, "%s", name_buf); + return bh_aprintf(context->gp_alloc, "%s", name_buf); } -Type* polymorphic_struct_lookup(AstPolyStructType* ps_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos, b32 error_if_failed) { +Type* polymorphic_struct_lookup(Context *context, AstPolyStructType* ps_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos, b32 error_if_failed) { if (ps_type->scope == NULL) { return NULL; } @@ -1172,7 +1165,7 @@ Type* polymorphic_struct_lookup(AstPolyStructType* ps_type, bh_arr(AstPolySoluti } if (bh_arr_length(slns) != bh_arr_length(ps_type->poly_params)) { - onyx_report_error(pos, Error_Critical, "Wrong number of arguments for '%s'. Expected %d, got %d.", + ONYX_ERROR(pos, Error_Critical, "Wrong number of arguments for '%s'. Expected %d, got %d.", ps_type->name, bh_arr_length(ps_type->poly_params), bh_arr_length(slns)); @@ -1186,7 +1179,7 @@ Type* polymorphic_struct_lookup(AstPolyStructType* ps_type, bh_arr(AstPolySoluti i++; } - char* unique_key = build_poly_slns_unique_key(slns); + char* unique_key = build_poly_slns_unique_key(context, slns); i32 index = shgeti(ps_type->concrete_structs, unique_key); if (index != -1) { AstStructType* concrete_struct = ps_type->concrete_structs[index].value; @@ -1196,44 +1189,44 @@ Type* polymorphic_struct_lookup(AstPolyStructType* ps_type, bh_arr(AstPolySoluti } if (concrete_struct->entity_type->state == Entity_State_Failed) { - return (Type *) &node_that_signals_failure; + return (Type *) &context->node_that_signals_failure; } - Type* cs_type = type_build_from_ast(context.ast_alloc, (AstType *) concrete_struct); + Type* cs_type = type_build_from_ast(context, (AstType *) concrete_struct); if (!cs_type) return NULL; cs_type->Struct.constructed_from = (AstType *) ps_type; - if (cs_type->Struct.poly_sln == NULL) cs_type->Struct.poly_sln = bh_arr_copy(global_heap_allocator, slns); - if (cs_type->Struct.name == NULL) cs_type->Struct.name = build_poly_struct_name(ps_type->name, cs_type); + if (cs_type->Struct.poly_sln == NULL) cs_type->Struct.poly_sln = bh_arr_copy(context->gp_alloc, slns); + if (cs_type->Struct.name == NULL) cs_type->Struct.name = build_poly_struct_name(context, ps_type->name, cs_type); return cs_type; } - Scope* sln_scope = scope_create(context.ast_alloc, ps_type->scope, ps_type->token->pos); - insert_poly_slns_into_scope(sln_scope, slns); + Scope* sln_scope = scope_create(context, ps_type->scope, ps_type->token->pos); + insert_poly_slns_into_scope(context, sln_scope, slns); - AstStructType* concrete_struct = (AstStructType *) ast_clone(context.ast_alloc, ps_type->base_struct); - concrete_struct->scope = scope_create(context.ast_alloc, sln_scope, ps_type->token->pos); + AstStructType* concrete_struct = (AstStructType *) ast_clone(context, ps_type->base_struct); + concrete_struct->scope = scope_create(context, sln_scope, ps_type->token->pos); concrete_struct->polymorphic_error_loc = pos; BH_MASK_SET(concrete_struct->flags, !error_if_failed, Ast_Flag_Header_Check_No_Error); i64 arg_count = bh_arr_length(ps_type->poly_params); - bh_arr_new(global_heap_allocator, concrete_struct->polymorphic_argument_types, arg_count); + bh_arr_new(context->gp_alloc, concrete_struct->polymorphic_argument_types, arg_count); bh_arr_set_length(concrete_struct->polymorphic_argument_types, arg_count); - concrete_struct->polymorphic_arguments = bh_arr_copy(global_heap_allocator, slns); + concrete_struct->polymorphic_arguments = bh_arr_copy(context->gp_alloc, slns); fori (i, 0, (i64) bh_arr_length(ps_type->poly_params)) { - concrete_struct->polymorphic_argument_types[i] = (AstType *) ast_clone(context.ast_alloc, ps_type->poly_params[i].type_node); + concrete_struct->polymorphic_argument_types[i] = (AstType *) ast_clone(context, ps_type->poly_params[i].type_node); } shput(ps_type->concrete_structs, unique_key, concrete_struct); - add_entities_for_node(NULL, (AstNode *) concrete_struct, sln_scope, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) concrete_struct, sln_scope, NULL); return NULL; } -Type* polymorphic_union_lookup(AstPolyUnionType* pu_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos, b32 error_if_failed) { +Type* polymorphic_union_lookup(Context *context, AstPolyUnionType* pu_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos, b32 error_if_failed) { if (pu_type->scope == NULL) { return NULL; } @@ -1245,7 +1238,7 @@ Type* polymorphic_union_lookup(AstPolyUnionType* pu_type, bh_arr(AstPolySolution } if (bh_arr_length(slns) != bh_arr_length(pu_type->poly_params)) { - onyx_report_error(pos, Error_Critical, "Wrong number of arguments for '%s'. Expected %d, got %d.", + ONYX_ERROR(pos, Error_Critical, "Wrong number of arguments for '%s'. Expected %d, got %d.", pu_type->name, bh_arr_length(pu_type->poly_params), bh_arr_length(slns)); @@ -1259,7 +1252,7 @@ Type* polymorphic_union_lookup(AstPolyUnionType* pu_type, bh_arr(AstPolySolution i++; } - char* unique_key = build_poly_slns_unique_key(slns); + char* unique_key = build_poly_slns_unique_key(context, slns); i32 index = shgeti(pu_type->concrete_unions, unique_key); if (index != -1) { AstUnionType* concrete_union = pu_type->concrete_unions[index].value; @@ -1269,39 +1262,39 @@ Type* polymorphic_union_lookup(AstPolyUnionType* pu_type, bh_arr(AstPolySolution } if (concrete_union->entity->state == Entity_State_Failed) { - return (Type *) &node_that_signals_failure; + return (Type *) &context->node_that_signals_failure; } - Type* cu_type = type_build_from_ast(context.ast_alloc, (AstType *) concrete_union); + Type* cu_type = type_build_from_ast(context, (AstType *) concrete_union); if (!cu_type) return NULL; cu_type->Union.constructed_from = (AstType *) pu_type; - if (cu_type->Union.poly_sln == NULL) cu_type->Union.poly_sln = bh_arr_copy(global_heap_allocator, slns); - cu_type->Union.name = build_poly_struct_name(pu_type->name, cu_type); + if (cu_type->Union.poly_sln == NULL) cu_type->Union.poly_sln = bh_arr_copy(context->gp_alloc, slns); + cu_type->Union.name = build_poly_struct_name(context, pu_type->name, cu_type); return cu_type; } - Scope* sln_scope = scope_create(context.ast_alloc, pu_type->scope, pu_type->token->pos); - insert_poly_slns_into_scope(sln_scope, slns); + Scope* sln_scope = scope_create(context, pu_type->scope, pu_type->token->pos); + insert_poly_slns_into_scope(context, sln_scope, slns); - AstUnionType* concrete_union = (AstUnionType *) ast_clone(context.ast_alloc, pu_type->base_union); - concrete_union->scope = scope_create(context.ast_alloc, sln_scope, pu_type->token->pos); + AstUnionType* concrete_union = (AstUnionType *) ast_clone(context, pu_type->base_union); + concrete_union->scope = scope_create(context, sln_scope, pu_type->token->pos); concrete_union->polymorphic_error_loc = pos; BH_MASK_SET(concrete_union->flags, !error_if_failed, Ast_Flag_Header_Check_No_Error); i64 arg_count = bh_arr_length(pu_type->poly_params); - bh_arr_new(global_heap_allocator, concrete_union->polymorphic_argument_types, arg_count); + bh_arr_new(context->gp_alloc, concrete_union->polymorphic_argument_types, arg_count); bh_arr_set_length(concrete_union->polymorphic_argument_types, arg_count); - concrete_union->polymorphic_arguments = bh_arr_copy(global_heap_allocator, slns); + concrete_union->polymorphic_arguments = bh_arr_copy(context->gp_alloc, slns); concrete_union->name = pu_type->name; fori (i, 0, (i64) bh_arr_length(pu_type->poly_params)) { - concrete_union->polymorphic_argument_types[i] = (AstType *) ast_clone(context.ast_alloc, pu_type->poly_params[i].type_node); + concrete_union->polymorphic_argument_types[i] = (AstType *) ast_clone(context, pu_type->poly_params[i].type_node); } shput(pu_type->concrete_unions, unique_key, concrete_union); - add_entities_for_node(NULL, (AstNode *) concrete_union, sln_scope, NULL); + add_entities_for_node(&context->entities, NULL, (AstNode *) concrete_union, sln_scope, NULL); return NULL; } diff --git a/compiler/src/symres.c b/compiler/src/symres.c deleted file mode 100644 index 8425a0723..000000000 --- a/compiler/src/symres.c +++ /dev/null @@ -1,1997 +0,0 @@ -#define BH_DEBUG -#include "parser.h" -#include "utils.h" -#include "astnodes.h" -#include "errors.h" -#include "doc.h" - -// :EliminatingSymres - notes the places where too much work is being done in symbol resolution - -// Variables used during the symbol resolution phase. -static Scope* current_scope = NULL; -static b32 report_unresolved_symbols = 1; -static b32 resolved_a_symbol = 0; - -static Entity* current_entity = NULL; - -#define SYMRES(kind, ...) do { \ - SymresStatus ss = symres_ ## kind (__VA_ARGS__); \ - if (ss > Symres_Errors_Start) return ss; \ - } while (0) - -#define SYMRES_INVISIBLE(kind, node, ...) do { \ - (node)->flags |= Ast_Flag_Symbol_Invisible; \ - SymresStatus ss = symres_ ## kind (__VA_ARGS__); \ - (node)->flags &= ~Ast_Flag_Symbol_Invisible; \ - if (ss > Symres_Errors_Start) return ss; \ - } while (0) - -typedef enum SymresStatus { - Symres_Success, - Symres_Complete, - Symres_Goto_Parse, - - Symres_Errors_Start, - Symres_Yield_Macro, - Symres_Yield_Micro, - Symres_Error, -} SymresStatus; - -static SymresStatus symres_type(AstType** type); -static SymresStatus symres_local(AstLocal** local); -static SymresStatus symres_call(AstCall** pcall); -static SymresStatus symres_size_of(AstSizeOf* so); -static SymresStatus symres_align_of(AstAlignOf* so); -static SymresStatus symres_field_access(AstFieldAccess** fa); -static SymresStatus symres_compound(AstCompound* compound); -static SymresStatus symres_expression(AstTyped** expr); -static SymresStatus symres_return(AstReturn* ret); -static SymresStatus symres_if(AstIfWhile* ifnode); -static SymresStatus symres_while(AstIfWhile* whilenode); -static SymresStatus symres_for(AstFor* fornode); -static SymresStatus symres_case(AstSwitchCase *casenode); -static SymresStatus symres_switch(AstSwitch* switchnode); -static SymresStatus symres_directive_solidify(AstDirectiveSolidify** psolid); -static SymresStatus symres_directive_defined(AstDirectiveDefined** pdefined); -static SymresStatus symres_directive_insert(AstDirectiveInsert* insert); -static SymresStatus symres_statement_chain(AstNode** walker); -static SymresStatus symres_statement(AstNode** stmt, b32 *remove); -static SymresStatus symres_block(AstBlock* block); -static SymresStatus symres_function_header(AstFunction* func); -static SymresStatus symres_function(AstFunction* func); -static SymresStatus symres_global(AstGlobal* global); -static SymresStatus symres_overloaded_function(AstOverloadedFunction* ofunc); -static SymresStatus symres_package(AstPackage* package); -static SymresStatus symres_enum(AstEnumType* enum_node); -static SymresStatus symres_memres_type(AstMemRes** memres); -static SymresStatus symres_memres(AstMemRes** memres); -static SymresStatus symres_struct_defaults(AstType* st); -static SymresStatus symres_static_if(AstIf* static_if); -static SymresStatus symres_macro(AstMacro* macro); -static SymresStatus symres_constraint(AstConstraint* constraint); -static SymresStatus symres_polyquery(AstPolyQuery *query); - -static void scope_enter(Scope* new_scope) { - current_scope = new_scope; -} - -static void scope_leave() { - current_scope = current_scope->parent; -} - -static SymresStatus symres_symbol(AstNode** symbol_node) { - OnyxToken* token = (*symbol_node)->token; - AstNode* res = symbol_resolve(current_scope, token); - - if (!res) { // :SymresStall - if (report_unresolved_symbols) { - token_toggle_end(token); - char *closest = find_closest_symbol_in_scope_and_parents(current_scope, token->text); - token_toggle_end(token); - - if (closest) onyx_report_error(token->pos, Error_Critical, "Unable to resolve symbol '%b'. Did you mean '%s'?", token->text, token->length, closest); - else onyx_report_error(token->pos, Error_Critical, "Unable to resolve symbol '%b'.", token->text, token->length); - - return Symres_Error; - } else { - return Symres_Yield_Macro; - } - - } else { - track_resolution_for_symbol_info(*symbol_node, res); - *symbol_node = res; - resolved_a_symbol = 1; - } - - return Symres_Success; -} - -static SymresStatus symres_struct_type(AstStructType* s_node) { - if (s_node->flags & Ast_Flag_Type_Is_Resolved) return Symres_Success; - - s_node->flags |= Ast_Flag_Type_Is_Resolved; - s_node->flags |= Ast_Flag_Comptime; - - assert(s_node->scope); - scope_enter(s_node->scope); - - if (s_node->min_size_) SYMRES(expression, &s_node->min_size_); - if (s_node->min_alignment_) SYMRES(expression, &s_node->min_alignment_); - - if (s_node->polymorphic_argument_types) { - assert(s_node->polymorphic_arguments); - - SymresStatus ss = Symres_Success, result; - fori (i, 0, (i64) bh_arr_length(s_node->polymorphic_argument_types)) { - result = symres_type(&s_node->polymorphic_argument_types[i]); - if (result > ss) ss = result; - - if (s_node->polymorphic_arguments[i].value) { - result = symres_expression(&s_node->polymorphic_arguments[i].value); - if (result > ss) ss = result; - } - } - } - - if (s_node->constraints.constraints) { - bh_arr_each(AstConstraint *, constraint, s_node->constraints.constraints) { - SYMRES(constraint, *constraint); - } - } - - fori (i, 0, bh_arr_length(s_node->members)) { - AstStructMember *member = s_node->members[i]; - track_declaration_for_symbol_info(member->token->pos, (AstNode *) member); - - if (member->type_node) { - SymresStatus ss = symres_type(&member->type_node); - if (ss != Symres_Success) { - s_node->flags &= ~Ast_Flag_Type_Is_Resolved; - scope_leave(); - return ss; - } - } - } - - scope_leave(); - return Symres_Success; -} - -static SymresStatus symres_union_type(AstUnionType* u_node) { - if (u_node->flags & Ast_Flag_Type_Is_Resolved) return Symres_Success; - u_node->flags |= Ast_Flag_Comptime; - - SYMRES(type, &u_node->tag_backing_type); - - if (u_node->meta_tags) { - bh_arr_each(AstTyped *, meta, u_node->meta_tags) { - SYMRES(expression, meta); - } - } - - u_node->flags |= Ast_Flag_Type_Is_Resolved; - - assert(u_node->scope); - scope_enter(u_node->scope); - - if (u_node->polymorphic_argument_types) { - assert(u_node->polymorphic_arguments); - - SymresStatus ss = Symres_Success, result; - fori (i, 0, (i64) bh_arr_length(u_node->polymorphic_argument_types)) { - result = symres_type(&u_node->polymorphic_argument_types[i]); - if (result > ss) ss = result; - - if (u_node->polymorphic_arguments[i].value) { - result = symres_expression(&u_node->polymorphic_arguments[i].value); - if (result > ss) ss = result; - } - } - } - - if (u_node->constraints.constraints) { - bh_arr_each(AstConstraint *, constraint, u_node->constraints.constraints) { - SYMRES(constraint, *constraint); - } - } - - fori (i, 0, bh_arr_length(u_node->variants)) { - AstUnionVariant *variant = u_node->variants[i]; - track_declaration_for_symbol_info(variant->token->pos, (AstNode *) variant); - - assert(variant->type_node); - SymresStatus ss = symres_type(&variant->type_node); - if (ss != Symres_Success) { - u_node->flags &= ~Ast_Flag_Type_Is_Resolved; - scope_leave(); - return ss; - } - } - - scope_leave(); - return Symres_Success; -} - -static SymresStatus symres_type(AstType** type) { - // Don't make this kill all symbol resolution if the type is null. - if (!type || !*type) return Symres_Success; - - switch ((*type)->kind) { - case Ast_Kind_Symbol: SYMRES(symbol, (AstNode **) type); break; - case Ast_Kind_Basic_Type: break; - case Ast_Kind_Type_Alias: SYMRES(type, &((AstTypeAlias *) *type)->to); break; - case Ast_Kind_Field_Access: { - SYMRES(field_access, (AstFieldAccess **) type); - break; - } - - case Ast_Kind_Pointer_Type: SYMRES(type, &((AstPointerType *) *type)->elem); break; - case Ast_Kind_Slice_Type: SYMRES(type, &((AstSliceType *) *type)->elem); break; - case Ast_Kind_DynArr_Type: SYMRES(type, &((AstDynArrType *) *type)->elem); break; - case Ast_Kind_VarArg_Type: SYMRES(type, &((AstVarArgType *) *type)->elem); break; - case Ast_Kind_Multi_Pointer_Type: SYMRES(type, &((AstMultiPointerType *) *type)->elem); break; - - case Ast_Kind_Function_Type: { - AstFunctionType* ftype = (AstFunctionType *) *type; - - if (ftype->param_count > 0) { - fori (i, 0, (i64) ftype->param_count) { - SYMRES(type, &ftype->params[i]); - } - } - - SYMRES(type, &ftype->return_type); - break; - } - - case Ast_Kind_Struct_Type: SYMRES(struct_type, (AstStructType *) *type); break; - case Ast_Kind_Union_Type: SYMRES(union_type, (AstUnionType *) *type); break; - case Ast_Kind_Array_Type: { - AstArrayType* a_node = (AstArrayType *) *type; - - if (a_node->count_expr) SYMRES(expression, &a_node->count_expr); - SYMRES(type, &a_node->elem); - break; - } - - case Ast_Kind_Enum_Type: break; - - case Ast_Kind_Poly_Struct_Type: { - AstPolyStructType* pst_node = (AstPolyStructType *) *type; - assert(pst_node->scope); - break; - } - - case Ast_Kind_Poly_Union_Type: { - AstPolyUnionType* put_node = (AstPolyUnionType *) *type; - assert(put_node->scope); - break; - } - - case Ast_Kind_Poly_Call_Type: { - AstPolyCallType* pc_node = (AstPolyCallType *) *type; - - SYMRES(type, &pc_node->callee); - - bh_arr_each(AstNode *, param, pc_node->params) { - if (node_is_type(*param)) { - SYMRES(type, (AstType **) param); - } else { - SYMRES(expression, (AstTyped **) param); - } - } - break; - } - - case Ast_Kind_Type_Compound: { - AstCompoundType* ctype = (AstCompoundType *) *type; - - bh_arr_each(AstType *, type, ctype->types) SYMRES(type, type); - break; - } - - case Ast_Kind_Alias: { - AstAlias* alias = (AstAlias *) *type; - SYMRES_INVISIBLE(type, alias, (AstType **) &alias->alias); - - break; - } - - case Ast_Kind_Typeof: { - AstTypeOf* type_of = (AstTypeOf *) *type; - SYMRES(expression, &type_of->expr); - break; - } - - case Ast_Kind_Distinct_Type: { - AstDistinctType *distinct = (AstDistinctType *) *type; - SYMRES(type, &distinct->base_type); - break; - } - - default: break; - } - - return Symres_Success; -} - -static SymresStatus symres_local(AstLocal** local) { - SYMRES(type, &(*local)->type_node); - - if ((*local)->token != NULL) - symbol_introduce(current_scope, (*local)->token, (AstNode *) *local); - - return Symres_Success; -} - -static SymresStatus symres_arguments(Arguments* args) { - bh_arr_each(AstTyped *, arg, args->values) - SYMRES(expression, arg); - - bh_arr_each(AstNamedValue *, named_arg, args->named_values) - SYMRES(expression, &(*named_arg)->value); - - return Symres_Success; -} - -static SymresStatus symres_call(AstCall** pcall) { - AstCall *call = *pcall; - SYMRES(expression, (AstTyped **) &call->callee); - SYMRES(arguments, &call->args); - - AstNode* callee = strip_aliases((AstNode *) call->callee); - if (callee->kind == Ast_Kind_Poly_Struct_Type || - callee->kind == Ast_Kind_Poly_Union_Type) { - *pcall = (AstCall *) convert_call_to_polycall(call); - SYMRES(type, (AstType **) pcall); - return Symres_Success; - } - - return Symres_Success; -} - -static SymresStatus symres_size_of(AstSizeOf* so) { - SYMRES(type, &so->type_node); - SYMRES(type, &so->so_ast_type); - - return Symres_Success; -} - -static SymresStatus symres_align_of(AstAlignOf* ao) { - SYMRES(type, &ao->type_node); - SYMRES(type, &ao->ao_ast_type); - - return Symres_Success; -} - -static SymresStatus symres_field_access(AstFieldAccess** fa) { - if ((*fa)->expr == NULL) return Symres_Error; - SYMRES(expression, &(*fa)->expr); - if ((*fa)->expr == NULL) return Symres_Error; - - AstTyped* expr = (AstTyped *) strip_aliases((AstNode *) (*fa)->expr); - - b32 force_a_lookup = 0; - - if (expr->kind == Ast_Kind_Struct_Type || - expr->kind == Ast_Kind_Poly_Struct_Type || - expr->kind == Ast_Kind_Enum_Type || - expr->kind == Ast_Kind_Type_Raw_Alias || - expr->kind == Ast_Kind_Union_Type || - expr->kind == Ast_Kind_Poly_Union_Type || - expr->kind == Ast_Kind_Interface) { - force_a_lookup = 1; - } - - // - // If we are trying to access a field on an alias, we have to make sure - // the alias is "ready" to have a symbol looked up inside of it. This means - // the alias should have passed symbol resolution. If not, force a lookup - // and yield if the alias was not ready. - if ((*fa)->expr->kind == Ast_Kind_Alias) { - if ((*fa)->expr->entity && (*fa)->expr->entity->state < Entity_State_Check_Types) { - force_a_lookup = 1; - } - } - - AstNode* resolution = try_symbol_resolve_from_node((AstNode *) expr, (*fa)->token); - if (resolution) { - track_resolution_for_symbol_info((AstNode *) *fa, resolution); - *((AstNode **) fa) = resolution; - - } else if (expr->kind == Ast_Kind_Package) { - if (report_unresolved_symbols) { - token_toggle_end((*fa)->token); - char *closest = find_closest_symbol_in_node((AstNode *) expr, (*fa)->token->text); - token_toggle_end((*fa)->token); - - AstPackage *package = (AstPackage *) strip_aliases((AstNode *) (*fa)->expr); - char *package_name = "unknown (compiler bug)"; - if (package && package->package) { - package_name = package->package->name; - } - - if (closest) { - onyx_report_error((*fa)->token->pos, Error_Critical, "'%b' was not found in package '%s'. Did you mean '%s'?", - (*fa)->token->text, - (*fa)->token->length, - package_name, - closest); - } else { - onyx_report_error((*fa)->token->pos, Error_Critical, "'%b' was not found in package '%s'. Perhaps it is defined in a file that was not loaded?", - (*fa)->token->text, - (*fa)->token->length, - package_name); - } - return Symres_Error; - - } else { - return Symres_Yield_Macro; - } - - } else if (force_a_lookup) { - if (context.cycle_detected || context.cycle_almost_detected >= 2) { - onyx_report_error((*fa)->token->pos, Error_Critical, "'%b' does not exist here. This is a bad error message.", - (*fa)->token->text, - (*fa)->token->length); - return Symres_Error; - } - - return Symres_Yield_Macro; - } - - return Symres_Success; -} - -static SymresStatus symres_compound(AstCompound* compound) { - bh_arr_each(AstTyped *, expr, compound->exprs) { - SYMRES(expression, expr); - } - - return Symres_Success; -} - -static SymresStatus symres_if_expression(AstIfExpression* if_expr) { - SYMRES(expression, &if_expr->cond); - SYMRES(expression, &if_expr->true_expr); - SYMRES(expression, &if_expr->false_expr); - return Symres_Success; -} - -static SymresStatus symres_pipe(AstBinaryOp** pipe) { - AstCall* call_node = (AstCall *) (*pipe)->right; - SYMRES(expression, (AstTyped **) &call_node); - SYMRES(expression, &(*pipe)->left); - - if (call_node->kind != Ast_Kind_Call) { - onyx_report_error((*pipe)->token->pos, Error_Critical, "Pipe operator expected call on right side."); - return Symres_Error; - } - - if ((*pipe)->left == NULL) return Symres_Error; - - // :EliminatingSymres - bh_arr_insertn(call_node->args.values, 0, 1); - call_node->args.values[0] = (AstTyped *) make_argument(context.ast_alloc, (*pipe)->left); - call_node->next = (*pipe)->next; - - // NOTE: Not a BinaryOp node - *pipe = (AstBinaryOp *) call_node; - - return Symres_Success; -} - -// CLEANUP: This is an experimental feature and might be removed in the future. -// I noticed a common pattern when writing in Onyx is something that looks like this: -// -// foo.member_function(^foo, ...) -// -// I decided it would be worth adding a bit of syntactic sugar for such as call. I -// decided to use the '->' operator for this purpose. The snippet below is the exact -// same as the snippet above (after the nodes have been processed by the function below) -// -// foo->member_function(...) -static SymresStatus symres_method_call(AstBinaryOp** mcall) { - // :EliminatingSymres - - // We have to check this no matter what, because if we return to symbol resolution - // the left hand side could be something different. In particular this was a problem - // when expanding `some_map["value"]->unwrap()`, as the left hand side expands to a - // macro. - SYMRES(expression, &(*mcall)->left); - - if (((*mcall)->flags & Ast_Flag_Has_Been_Symres) == 0) { - if ((*mcall)->left == NULL) return Symres_Error; - - if ((*mcall)->right->kind != Ast_Kind_Call) { - onyx_report_error((*mcall)->token->pos, Error_Critical, "'->' expected procedure call on right side."); - return Symres_Error; - } - - // - // This is a small hack that makes chaining method calls - // work. Because check_method_call replaces the method call - // and marks it as completed, if there are multiple references - // to the same method call node, one of them will be left dangling. - // To remedy this, an alias node an be placed around the method call - // so that when check_method_call replaces it, it is replaced - // within the alias, and all references are updated. - if ((*mcall)->left->kind == Ast_Kind_Method_Call) { - AstAlias *left_alias = onyx_ast_node_new(context.ast_alloc, sizeof(AstAlias), Ast_Kind_Alias); - left_alias->token = (*mcall)->left->token; - left_alias->alias = (*mcall)->left; - - (*mcall)->left = (AstTyped *) left_alias; - } - - AstFieldAccess* implicit_field_access = make_field_access(context.ast_alloc, (*mcall)->left, NULL); - implicit_field_access->token = ((AstCall *) (*mcall)->right)->callee->token; - ((AstCall *) (*mcall)->right)->callee = (AstTyped *) implicit_field_access; - (*mcall)->flags |= Ast_Flag_Has_Been_Symres; - } - - SYMRES(expression, (AstTyped **) &(*mcall)->right); - - // TODO: This doesn't look right? Does this ever happen? Check this... - if ((*mcall)->right->kind != Ast_Kind_Call) { - *mcall = (AstBinaryOp *) (*mcall)->right; - } - - return Symres_Success; -} - -static SymresStatus symres_unaryop(AstUnaryOp** unaryop) { - if ((*unaryop)->operation == Unary_Op_Cast) { - SYMRES(type, &(*unaryop)->type_node); - } - - SYMRES(expression, &(*unaryop)->expr); - - return Symres_Success; -} - -static SymresStatus symres_struct_literal(AstStructLiteral* sl) { - if (sl->stnode != NULL) SYMRES(expression, &sl->stnode); - - // :EliminatingSymres - sl->type_node = (AstType *) sl->stnode; - while (sl->type_node && sl->type_node->kind == Ast_Kind_Type_Alias) - sl->type_node = ((AstTypeAlias *) sl->type_node)->to; - - SYMRES(arguments, &sl->args); - - return Symres_Success; -} - -static SymresStatus symres_array_literal(AstArrayLiteral* al) { - if (al->atnode != NULL) SYMRES(expression, &al->atnode); - - // :EliminatingSymres - al->type_node = (AstType *) al->atnode; - while (al->type_node && al->type_node->kind == Ast_Kind_Type_Alias) - al->type_node = ((AstTypeAlias *) al->type_node)->to; - - bh_arr_each(AstTyped *, expr, al->values) - SYMRES(expression, expr); - - return Symres_Success; -} - -static SymresStatus symres_address_of(AstAddressOf** paof) { - AstAddressOf *aof = (AstAddressOf *) *paof; - SYMRES(expression, &aof->expr); - - AstTyped *expr = (AstTyped *) strip_aliases((AstNode *) aof->expr); - if (node_is_type((AstNode *) expr)) { - AstPointerType *pt = onyx_ast_node_new(context.ast_alloc, sizeof(AstPointerType), Ast_Kind_Pointer_Type); - pt->token = aof->token; - pt->elem = (AstType *) expr; - pt->next = aof->next; - *paof = (AstAddressOf *) pt; - SYMRES(type, (AstType **) &pt); - return Symres_Success; - } - - return Symres_Success; -} - -static SymresStatus symres_expression(AstTyped** expr) { - if (node_is_type((AstNode *) *expr)) { - SYMRES(type, (AstType **) expr); - return Symres_Success; - } - - switch ((*expr)->kind) { - case Ast_Kind_Symbol: - SYMRES(symbol, (AstNode **) expr); - - // HACK? - // I don't know how I never ran into this problem before, - // but when a symbol is resolved, there is never a "double - // check" that its type node is symbol resolved as well. - // This only proved to be an issue when using constraint - // sentinels, so I only added that case here. This should - // maybe be considered in the future because I think this - // lack of double checking could be causing other bugs. - if ((*expr)->kind == Ast_Kind_Constraint_Sentinel) { - SYMRES(type, &(*expr)->type_node); - } - break; - - case Ast_Kind_Binary_Op: - SYMRES(expression, &((AstBinaryOp *)(*expr))->left); - SYMRES(expression, &((AstBinaryOp *)(*expr))->right); - break; - - case Ast_Kind_Unary_Op: SYMRES(unaryop, (AstUnaryOp **) expr); break; - case Ast_Kind_Call: SYMRES(call, (AstCall **) expr); break; - case Ast_Kind_Argument: SYMRES(expression, &((AstArgument *) *expr)->value); break; - case Ast_Kind_Block: SYMRES(block, (AstBlock *) *expr); break; - case Ast_Kind_Dereference: SYMRES(expression, &((AstDereference *)(*expr))->expr); break; - case Ast_Kind_Field_Access: SYMRES(field_access, (AstFieldAccess **) expr); break; - case Ast_Kind_Pipe: SYMRES(pipe, (AstBinaryOp **) expr); break; - case Ast_Kind_Method_Call: SYMRES(method_call, (AstBinaryOp **) expr); break; - case Ast_Kind_Size_Of: SYMRES(size_of, (AstSizeOf *)*expr); break; - case Ast_Kind_Align_Of: SYMRES(align_of, (AstAlignOf *)*expr); break; - case Ast_Kind_Address_Of: SYMRES(address_of, (AstAddressOf **) expr); break; - case Ast_Kind_Alias: { - AstAlias *alias = (AstAlias *) *expr; - SYMRES_INVISIBLE(expression, alias, &alias->alias); - break; - } - - case Ast_Kind_Range_Literal: - SYMRES(expression, &((AstRangeLiteral *)(*expr))->low); - SYMRES(expression, &((AstRangeLiteral *)(*expr))->high); - - // :EliminatingSymres - SYMRES(type, &builtin_range_type); - (*expr)->type_node = builtin_range_type; - break; - - case Ast_Kind_Polymorphic_Proc: - if (((AstFunction *) *expr)->captures) { - ((AstFunction *) *expr)->scope_to_lookup_captured_values = current_scope; - } - break; - - case Ast_Kind_Function: - if (((AstFunction *) *expr)->captures) { - ((AstFunction *) *expr)->scope_to_lookup_captured_values = current_scope; - } - - SYMRES(type, &(*expr)->type_node); - break; - - case Ast_Kind_NumLit: - SYMRES(type, &(*expr)->type_node); - break; - - case Ast_Kind_StrLit: { - AstStrLit* str = (AstStrLit *) *expr; - if (str->is_cstr) { - SYMRES(type, &builtin_cstring_type); - str->type_node = builtin_cstring_type; - - } else { - SYMRES(type, &builtin_string_type); - str->type_node = builtin_string_type; - } - break; - } - - case Ast_Kind_Slice: - case Ast_Kind_Subscript: - SYMRES(expression, &((AstSubscript *)(*expr))->addr); - SYMRES(expression, &((AstSubscript *)(*expr))->expr); - break; - - case Ast_Kind_Struct_Literal: - SYMRES(struct_literal, (AstStructLiteral *)(*expr)); - break; - - case Ast_Kind_Array_Literal: - SYMRES(array_literal, (AstArrayLiteral *)(*expr)); - break; - - case Ast_Kind_Directive_Solidify: - SYMRES(directive_solidify, (AstDirectiveSolidify **) expr); - break; - - case Ast_Kind_Directive_Defined: - SYMRES(directive_defined, (AstDirectiveDefined **) expr); - break; - - case Ast_Kind_Compound: - SYMRES(compound, (AstCompound *) *expr); - break; - - case Ast_Kind_Package: - SYMRES(package, (AstPackage *) *expr); - break; - - case Ast_Kind_If_Expression: - SYMRES(if_expression, (AstIfExpression *) *expr); - break; - - case Ast_Kind_Directive_Insert: - SYMRES(directive_insert, (AstDirectiveInsert *) *expr); - break; - - case Ast_Kind_Do_Block: { - Scope* old_current_scope = current_scope; - SYMRES(type, &(*expr)->type_node); - SYMRES(block, ((AstDoBlock *) *expr)->block); - current_scope = old_current_scope; - break; - } - - case Ast_Kind_Param: - if ((*expr)->flags & Ast_Flag_Param_Symbol_Dirty) { - assert((*expr)->token->type == Token_Type_Symbol); - *expr = (AstTyped *) make_symbol(context.ast_alloc, (*expr)->token); - SYMRES(expression, expr); - } - break; - - case Ast_Kind_Constraint_Sentinel: { - AstTyped *sentinel = (AstTyped *) *expr; - SYMRES(type, &sentinel->type_node); - break; - } - - case Ast_Kind_Directive_Export_Name: { - AstDirectiveExportName *ename = (AstDirectiveExportName *) *expr; - SYMRES(expression, (AstTyped **) &ename->func); - break; - } - - case Ast_Kind_Switch: { - SYMRES(switch, (AstSwitch *) *expr); - break; - } - - default: break; - } - - return Symres_Success; -} - -static SymresStatus symres_return(AstReturn* ret) { - if (ret->expr) - SYMRES(expression, &ret->expr); - - return Symres_Success; -} - -static SymresStatus symres_if(AstIfWhile* ifnode) { - if (ifnode->kind == Ast_Kind_Static_If) { - if ((ifnode->flags & Ast_Flag_Static_If_Resolved) == 0) { - if (context.cycle_detected) { - onyx_report_error(ifnode->token->pos, Error_Waiting_On, "Waiting on static if resolution."); - return Symres_Error; - } else { - return Symres_Yield_Macro; - } - } - - if (static_if_resolution(ifnode)) { - if (ifnode->true_stmt != NULL) SYMRES(statement, (AstNode **) &ifnode->true_stmt, NULL); - - } else { - if (ifnode->false_stmt != NULL) SYMRES(statement, (AstNode **) &ifnode->false_stmt, NULL); - } - - } else { - if (ifnode->initialization != NULL) { - ifnode->scope = scope_create(context.ast_alloc, current_scope, ifnode->token->pos); - scope_enter(ifnode->scope); - - SYMRES(statement_chain, &ifnode->initialization); - } - - SYMRES(expression, &ifnode->cond); - - // NOTE: These are statements because "elseif" means the `false_stmt` has an if node. - if (ifnode->true_stmt != NULL) SYMRES(statement, (AstNode **) &ifnode->true_stmt, NULL); - if (ifnode->false_stmt != NULL) SYMRES(statement, (AstNode **) &ifnode->false_stmt, NULL); - - if (ifnode->initialization != NULL) scope_leave(); - } - - return Symres_Success; -} - -static SymresStatus symres_while(AstIfWhile* whilenode) { - if (whilenode->initialization != NULL) { - whilenode->scope = scope_create(context.ast_alloc, current_scope, whilenode->token->pos); - scope_enter(whilenode->scope); - - SYMRES(statement_chain, &whilenode->initialization); - } - - SYMRES(expression, &whilenode->cond); - - if (whilenode->true_stmt) SYMRES(block, whilenode->true_stmt); - if (whilenode->false_stmt) SYMRES(block, whilenode->false_stmt); - - if (whilenode->initialization != NULL) scope_leave(); - - return Symres_Success; -} - -static SymresStatus symres_for(AstFor* fornode) { - fornode->scope = scope_create(context.ast_alloc, current_scope, fornode->token->pos); - scope_enter(fornode->scope); - SYMRES(expression, &fornode->iter); - SYMRES(local, &fornode->var); - - // Right now, the index variable is optional - if (fornode->index_var) { - SYMRES(local, &fornode->index_var); - } - - SYMRES(block, fornode->stmt); - scope_leave(); - - return Symres_Success; -} - -static SymresStatus symres_case(AstSwitchCase *casenode) { - if (!casenode->is_default) { - bh_arr_each(AstTyped *, expr, casenode->values) { - SYMRES(expression, expr); - } - } - - if (casenode->capture) { - if (casenode->scope == NULL) { - casenode->scope = scope_create(context.ast_alloc, current_scope, casenode->token->pos); - symbol_introduce(casenode->scope, casenode->capture->token, (AstNode *) casenode->capture); - } - - scope_enter(casenode->scope); - } - - if (casenode->body_is_expr) { - SYMRES(expression, &casenode->expr); - } else { - SYMRES(block, casenode->block); - } - - if (casenode->capture) { - scope_leave(); - } - - return Symres_Success; -} - -static SymresStatus symres_switch(AstSwitch* switchnode) { - if (switchnode->initialization != NULL) { - switchnode->scope = scope_create(context.ast_alloc, current_scope, switchnode->token->pos); - scope_enter(switchnode->scope); - - SYMRES(statement_chain, &switchnode->initialization); - } - - SYMRES(expression, &switchnode->expr); - - if (switchnode->cases == NULL) { - SYMRES(block, switchnode->case_block); - } else { - bh_arr_each(AstSwitchCase *, pcase, switchnode->cases) { - SYMRES(case, *pcase); - } - - if (switchnode->default_case) { - if (switchnode->is_expr) { - SYMRES(expression, (AstTyped **) &switchnode->default_case); - } else { - SYMRES(block, switchnode->default_case); - } - } - } - - if (switchnode->switch_kind == Switch_Kind_Use_Equals && switchnode->case_exprs) { - bh_arr_each(CaseToBlock, ctb, switchnode->case_exprs) { - SYMRES(expression, (AstTyped **) &ctb->comparison); - } - } - - if (switchnode->initialization != NULL) scope_leave(); - - return Symres_Success; -} - -static SymresStatus symres_directive_solidify(AstDirectiveSolidify** psolid) { - AstDirectiveSolidify* solid = *psolid; - - SYMRES(expression, (AstTyped **) &solid->poly_proc); - if (solid->poly_proc && solid->poly_proc->kind == Ast_Kind_Directive_Solidify) { - AstFunction* potentially_resolved_proc = (AstFunction *) ((AstDirectiveSolidify *) solid->poly_proc)->resolved_proc; - if (!potentially_resolved_proc) return Symres_Yield_Micro; - - solid->poly_proc = potentially_resolved_proc; - } - - if (!solid->poly_proc || solid->poly_proc->kind != Ast_Kind_Polymorphic_Proc) { - onyx_report_error(solid->token->pos, Error_Critical, "Expected polymorphic procedure in #solidify directive."); - return Symres_Error; - } - - bh_arr_each(AstPolySolution, sln, solid->known_polyvars) { - // HACK: This assumes that 'ast_type' and 'value' are at the same offset. - SYMRES(expression, &sln->value); - } - - return Symres_Success; -} - -static SymresStatus symres_directive_defined(AstDirectiveDefined** pdefined) { - AstDirectiveDefined* defined = *pdefined; - - b32 has_to_be_resolved = context.cycle_almost_detected >= 1; - - onyx_errors_disable(); - resolved_a_symbol = 0; - SymresStatus ss = symres_expression(&defined->expr); - if (has_to_be_resolved && ss != Symres_Success && !resolved_a_symbol) { - // The symbol definitely was not found and there is no chance that it could be found. - defined->is_defined = 0; - - onyx_errors_enable(); - return Symres_Success; - } - - if (ss == Symres_Success) { - defined->is_defined = 1; - - onyx_errors_enable(); - return Symres_Success; - } - - onyx_errors_enable(); - return Symres_Yield_Macro; -} - -static SymresStatus symres_directive_insert(AstDirectiveInsert* insert) { - SYMRES(expression, &insert->code_expr); - bh_arr_each(AstTyped *, pexpr, insert->binding_exprs) { - SYMRES(expression, pexpr); - } - return Symres_Success; -} - -static SymresStatus symres_capture_block(AstCaptureBlock *block, Scope *captured_scope) { - bh_arr_each(AstCaptureLocal *, capture, block->captures) { - OnyxToken *token = (*capture)->token; - AstTyped *resolved = (AstTyped *) symbol_resolve(captured_scope, token); - - if (!resolved) { - // Should this do a yield? In there any case that that would make sense? - onyx_report_error(token->pos, Error_Critical, "'%b' is not found in the enclosing scope.", - token->text, token->length); - return Symres_Error; - } - - (*capture)->captured_value = resolved; - } - - bh_arr_each(AstCaptureLocal *, capture, block->captures) { - symbol_introduce(current_scope, (*capture)->token, (AstNode *) *capture); - } - - return Symres_Success; -} - -static SymresStatus symres_statement(AstNode** stmt, b32 *remove) { - if (remove) *remove = 0; - - switch ((*stmt)->kind) { - case Ast_Kind_Return: SYMRES(return, (AstReturn *) *stmt); break; - case Ast_Kind_If: SYMRES(if, (AstIfWhile *) *stmt); break; - case Ast_Kind_Static_If: SYMRES(if, (AstIfWhile *) *stmt); break; - case Ast_Kind_While: SYMRES(while, (AstIfWhile *) *stmt); break; - case Ast_Kind_For: SYMRES(for, (AstFor *) *stmt); break; - case Ast_Kind_Switch: SYMRES(switch, (AstSwitch *) *stmt); break; - case Ast_Kind_Call: SYMRES(call, (AstCall **) stmt); break; - case Ast_Kind_Argument: SYMRES(expression, (AstTyped **) &((AstArgument *) *stmt)->value); break; - case Ast_Kind_Block: SYMRES(block, (AstBlock *) *stmt); break; - case Ast_Kind_Defer: SYMRES(statement, &((AstDefer *) *stmt)->stmt, NULL); break; - case Ast_Kind_Switch_Case: SYMRES(case, (AstSwitchCase *) *stmt); break; - case Ast_Kind_Jump: break; - case Ast_Kind_Directive_Remove: break; - - case Ast_Kind_Local: - // if (remove) *remove = 1; - SYMRES(local, (AstLocal **) stmt); - break; - - case Ast_Kind_Import: - if (remove) *remove = 1; - break; - - default: SYMRES(expression, (AstTyped **) stmt); break; - } - - return Symres_Success; -} - -static SymresStatus symres_statement_chain(AstNode** walker) { - b32 remove = 0; - - while (*walker) { - SYMRES(statement, walker, &remove); - if (remove) { - remove = 0; - AstNode* tmp = (*walker)->next; - (*walker)->next = NULL; - (*walker) = tmp; - - } else { - walker = &(*walker)->next; - } - } - return Symres_Success; -} - -static SymresStatus symres_block(AstBlock* block) { - if (block->rules & Block_Rule_New_Scope) { - if (block->scope == NULL) - block->scope = scope_create(context.ast_alloc, current_scope, block->token->pos); - - scope_enter(block->scope); - } - - if (block->binding_scope != NULL) - scope_include(current_scope, block->binding_scope, block->token->pos); - - if (block->quoted_block_capture_scope != NULL) - scope_include(current_scope, block->quoted_block_capture_scope, block->token->pos); - - if (block->body) { - AstNode** start = &block->body; - fori (i, 0, block->statement_idx) { - start = &(*start)->next; - } - - b32 remove = 0; - - while (*start) { - SymresStatus cs = symres_statement(start, &remove); - - if (remove) { - remove = 0; - AstNode* tmp = (*start)->next; - (*start)->next = NULL; - (*start) = tmp; - - } else { - switch (cs) { - case Symres_Success: - start = &(*start)->next; - block->statement_idx++; - break; - - default: - return cs; - } - } - } - - block->statement_idx = 0; - } - - if (block->rules & Block_Rule_New_Scope) - scope_leave(); - - return Symres_Success; -} - -SymresStatus symres_function_header(AstFunction* func) { - func->flags |= Ast_Flag_Comptime; - - if (!(func->flags & Ast_Flag_Function_Is_Lambda) && func->captures) { - onyx_report_error(func->captures->token->pos, Error_Critical, "This procedure cannot capture values as it is not defined in an expression."); - return Symres_Error; - } - - if (func->captures && !func->scope_to_lookup_captured_values) { - if (func->flags & Ast_Flag_Function_Is_Lambda_Inside_PolyProc) return Symres_Complete; - - return Symres_Yield_Macro; - } - - if (func->scope == NULL) - func->scope = scope_create(context.ast_alloc, current_scope, func->token->pos); - - if (func->constraints.constraints != NULL && func->constraints.constraints_met == 0) { - bh_arr_each(AstConstraint *, constraint, func->constraints.constraints) { - SYMRES(constraint, *constraint); - } - - // Return early here to finish checking constraints in the checker. - // Will resume here after constraints have been met. - return Symres_Success; - } - - scope_enter(func->scope); - - bh_arr_each(AstParam, param, func->params) { - if (param->default_value != NULL) { - SYMRES(expression, ¶m->default_value); - if (onyx_has_errors()) return Symres_Error; - } - } - - bh_arr_each(AstParam, param, func->params) { - symbol_introduce(current_scope, param->local->token, (AstNode *) param->local); - } - - bh_arr_each(AstParam, param, func->params) { - if (param->local->type_node != NULL) { - SYMRES_INVISIBLE(type, param->local, ¶m->local->type_node); - } - } - - if (potentially_convert_function_to_polyproc(func)) { - return Symres_Complete; - } - - if (func->nodes_that_need_entities_after_clone && bh_arr_length(func->nodes_that_need_entities_after_clone) > 0 && func->entity) { - bh_arr_each(AstNode *, node, func->nodes_that_need_entities_after_clone) { - // This makes a lot of assumptions about how these nodes are being processed, - // and I don't want to start using this with other nodes without considering - // what the ramifications of that is. - assert((*node)->kind == Ast_Kind_Static_If || (*node)->kind == Ast_Kind_File_Contents - || (*node)->kind == Ast_Kind_Function || (*node)->kind == Ast_Kind_Polymorphic_Proc); - - // Need to use current_scope->parent because current_scope is the function body scope. - Scope *scope = current_scope->parent; - - if ((*node)->kind == Ast_Kind_Static_If) { - AstIf *static_if = (AstIf *) *node; - assert(static_if->defined_in_scope); - scope = static_if->defined_in_scope; - - if (func->poly_scope) { - scope = scope_create(context.ast_alloc, scope, static_if->token->pos); - scope_include(scope, func->poly_scope, static_if->token->pos); - } - } - - add_entities_for_node(NULL, *node, scope, func->entity->package); - } - - bh_arr_set_length(func->nodes_that_need_entities_after_clone, 0); - } - - if (func->deprecated_warning) { - SYMRES(expression, (AstTyped **) &func->deprecated_warning); - } - - bh_arr_each(AstTyped *, pexpr, func->tags) { - SYMRES(expression, pexpr); - } - - if (func->foreign.import_name) { - SYMRES(expression, &func->foreign.module_name); - SYMRES(expression, &func->foreign.import_name); - } - - if (func->captures) { - SYMRES(capture_block, func->captures, func->scope_to_lookup_captured_values); - } - - SYMRES(type, &func->return_type); - - if (context.options->stack_trace_enabled) { - OnyxToken *stack_trace_token = bh_alloc_item(context.ast_alloc, OnyxToken); - stack_trace_token->type = Token_Type_Symbol; - stack_trace_token->length = 13; - stack_trace_token->text = bh_strdup(context.ast_alloc, "__stack_trace "); - stack_trace_token->pos = func->token->pos; - - if (!func->stack_trace_local) { - assert(builtin_stack_trace_type); - func->stack_trace_local = make_local(context.ast_alloc, stack_trace_token, builtin_stack_trace_type); - func->stack_trace_local->flags |= Ast_Flag_Decl_Followed_By_Init; - } - - SYMRES(local, &func->stack_trace_local); - } - - scope_leave(); - - return Symres_Success; -} - -SymresStatus symres_function(AstFunction* func) { - if (func->entity_header && func->entity_header->state < Entity_State_Check_Types) return Symres_Yield_Macro; - if (func->kind == Ast_Kind_Polymorphic_Proc) return Symres_Complete; - if (func->flags & Ast_Flag_Function_Is_Lambda_Inside_PolyProc) return Symres_Complete; - assert(func->scope); - - scope_enter(func->scope); - - if ((func->flags & Ast_Flag_Has_Been_Symres) == 0) { - // :EliminatingSymres - bh_arr_each(AstParam, param, func->params) { - // CLEANUP: Currently, in order to 'use' parameters, the type must be completely - // resolved and built. This is excessive because all that should need to be known - // is the names of the members, since all that happens is implicit field accesses - // are placed in the scope. So instead, there should be a way to just query all the - // member names in the structure, without needing to know their type. This would be - // easy if it were not for 'use' statements in structs. It is made even more complicated - // by this situtation: - // - // Foo :: struct (T: type_expr) { - // use t : T; - // - // something_else := 5 + 6 * 8; - // } - // - // The 'use t : T' member requires completely knowing the type of T, to know which - // members should be brought in. At the moment, that requires completely building the - // type of Foo($T). - if (param->is_used && !param->use_processed) { - if (param->local->type_node != NULL && param->local->type == NULL) { - param->local->type = type_build_from_ast(context.ast_alloc, param->local->type_node); - - if (param->local->type == NULL) return Symres_Yield_Macro; - } - - if (type_is_struct(param->local->type)) { - Type* st; - if (param->local->type->kind == Type_Kind_Struct) { - st = param->local->type; - } else { - st = param->local->type->Pointer.elem; - } - - if (st->Struct.status != SPS_Uses_Done) return Symres_Yield_Macro; - - fori (i, 0, shlen(st->Struct.members)) { - StructMember* value = st->Struct.members[i].value; - AstFieldAccess* fa = make_field_access(context.ast_alloc, (AstTyped *) param->local, value->name); - symbol_raw_introduce(current_scope, value->name, param->local->token->pos, (AstNode *) fa); - } - - param->use_processed = 1; - - } else if (param->local->type != NULL) { - onyx_report_error(param->local->token->pos, Error_Critical, "Can only 'use' structures or pointers to structures."); - - } else { - // :ExplicitTyping - onyx_report_error(param->local->token->pos, Error_Critical, "Cannot deduce type of parameter '%b'; Try adding it explicitly.", - param->local->token->text, - param->local->token->length); - } - } - } - - func->flags |= Ast_Flag_Has_Been_Symres; - } - - SYMRES(block, func->body); - - scope_leave(); - return Symres_Success; -} - -static SymresStatus symres_global(AstGlobal* global) { - SYMRES(type, &global->type_node); - return Symres_Success; -} - -static SymresStatus symres_overloaded_function(AstOverloadedFunction* ofunc) { - bh_arr_each(OverloadOption, overload, ofunc->overloads) { - SYMRES(expression, &overload->option); - } - - if (ofunc->expected_return_node) { - SYMRES(type, &ofunc->expected_return_node); - } - - return Symres_Success; -} - -static SymresStatus symres_package(AstPackage* package) { - if (package->package == NULL) { - if (!package->package_name) return Symres_Error; - - package->package = package_lookup(package->package_name); - } - - if (package->package) { - package_mark_as_used(package->package); - return Symres_Success; - } else { - if (report_unresolved_symbols) { - onyx_report_error(package->token->pos, Error_Critical, - "Package '%s' not found in included source files.", - package->package_name); - return Symres_Error; - } else { - return Symres_Yield_Macro; - } - } -} - -static SymresStatus symres_enum(AstEnumType* enum_node) { - if (!enum_node->backing_type) { - if (enum_node->backing == NULL) return Symres_Error; - if (enum_node->backing->kind == Ast_Kind_Symbol) SYMRES(symbol, (AstNode **) &enum_node->backing); - - enum_node->backing_type = type_build_from_ast(context.ast_alloc, enum_node->backing); - } - - if (enum_node->scope == NULL) { - enum_node->scope = scope_create(context.ast_alloc, current_scope, enum_node->token->pos); - - symbol_raw_introduce(enum_node->scope, "__backing_type", enum_node->token->pos, (AstNode *) enum_node->backing); - - type_build_from_ast(context.ast_alloc, (AstType *) enum_node); - } - - scope_enter(enum_node->scope); - - // :EliminatingSymres - u64 next_assign_value = enum_node->is_flags ? 1 : 0; - bh_arr_each(AstEnumValue *, value, enum_node->values) { - if ((*value)->flags & Ast_Flag_Has_Been_Checked) continue; - - (*value)->type = enum_node->etcache; - (*value)->flags |= Ast_Flag_Comptime; - - if ((*value)->value != NULL) { - SYMRES(expression, &(*value)->value); - - if ((*value)->value->kind == Ast_Kind_Enum_Value) { - (*value)->value = ((AstEnumValue *) (*value)->value)->value; - (*value)->value->type = enum_node->etcache; - } - - if ((*value)->value->kind == Ast_Kind_NumLit) { - AstNumLit *n_value = (AstNumLit *) (*value)->value; - resolve_expression_type((AstTyped *) n_value); - - if (type_is_small_integer(n_value->type)) { - next_assign_value = n_value->value.i; - } else if (type_is_integer(n_value->type)) { - next_assign_value = n_value->value.l; - } else { - onyx_report_error((*value)->token->pos, Error_Critical, "expected numeric integer literal for enum initialization, got '%s'", type_get_name(n_value->type)); - return Symres_Error; - } - - n_value->type = enum_node->etcache; - - } else { - if ((*value)->entity == NULL) { - add_entities_for_node(NULL, (AstNode *) (*value), enum_node->scope, NULL); - } - - if (context.cycle_detected) { - onyx_report_error((*value)->token->pos, Error_Critical, "Expected compile time known value for enum initialization."); - return Symres_Error; - } - - return Symres_Yield_Macro; - } - - } else { - AstNumLit* num = make_int_literal(context.ast_alloc, next_assign_value); - num->type = enum_node->etcache; - - (*value)->value = (AstTyped *) num; - } - - symbol_introduce(enum_node->scope, (*value)->token, (AstNode *) (*value)); - - (*value)->flags |= Ast_Flag_Comptime | Ast_Flag_Has_Been_Checked; - - if (enum_node->is_flags) { - next_assign_value <<= 1; - } else { - next_assign_value++; - } - } - - scope_leave(); - - // HACK this ensure that you can only lookup symbols in an Enum that are actually defined in the enum. - // However, during the symbol resolution of the values in an enum, they need to be able to see the - // enclosing scope. - enum_node->scope->parent = NULL; - - return Symres_Success; -} - -static SymresStatus symres_memres_type(AstMemRes** memres) { - SYMRES(type, &(*memres)->type_node); - return Symres_Success; -} - -static SymresStatus symres_memres(AstMemRes** memres) { - if ((*memres)->initial_value != NULL) { - SYMRES(expression, &(*memres)->initial_value); - } - - bh_arr_each(AstTyped *, ptag, (*memres)->tags) { - SYMRES(expression, ptag); - } - - return Symres_Success; -} - -static SymresStatus symres_struct_defaults(AstType* t) { - if (t->kind != Ast_Kind_Struct_Type) return Symres_Error; - - AstStructType* st = (AstStructType *) t; - if (st->scope) scope_enter(st->scope); - - if (st->meta_tags) { - bh_arr_each(AstTyped *, meta, st->meta_tags) { - SYMRES(expression, meta); - } - } - - bh_arr_each(AstStructMember *, smem, st->members) { - if ((*smem)->initial_value != NULL) { - SYMRES(expression, &(*smem)->initial_value); - } - - if ((*smem)->meta_tags != NULL) { - bh_arr_each(AstTyped *, meta, (*smem)->meta_tags) { - SYMRES(expression, meta); - } - } - } - - if (st->scope) scope_leave(); - return Symres_Success; -} - -static SymresStatus symres_polyproc(AstFunction* pp) { - pp->flags |= Ast_Flag_Comptime; - pp->parent_scope_of_poly_proc = current_scope; - - bh_arr_each(AstPolyParam, p, pp->poly_params) { - if (p->kind != PSK_Value) continue; - - AstParam *param = &pp->params[p->idx]; - if (param->default_value != NULL) { - SYMRES(expression, ¶m->default_value); - if (onyx_has_errors()) return Symres_Error; - } - } - - return Symres_Success; -} - -static SymresStatus symres_static_if(AstIf* static_if) { - if (static_if->flags & Ast_Flag_Dead) return Symres_Complete; - - SYMRES(expression, &static_if->cond); - return Symres_Success; -} - -static SymresStatus symres_process_directive(AstNode* directive) { - // :EliminatingSymres - switch (directive->kind) { - case Ast_Kind_Directive_Add_Overload: { - AstDirectiveAddOverload *add_overload = (AstDirectiveAddOverload *) directive; - - SYMRES(expression, (AstTyped **) &add_overload->overloaded_function); - if (add_overload->overloaded_function == NULL) return Symres_Error; // NOTE: Error message will already be generated - - AstOverloadedFunction *ofunc = (AstOverloadedFunction *) strip_aliases((AstNode *) add_overload->overloaded_function); - if (ofunc->kind == Ast_Kind_Symbol) { - if (context.cycle_detected) { - onyx_report_error(add_overload->token->pos, Error_Waiting_On, "Waiting for matched procedure to be known."); - return Symres_Error; - } - - return Symres_Yield_Macro; - } - - if (ofunc->kind != Ast_Kind_Overloaded_Function) { - onyx_report_error(add_overload->token->pos, Error_Critical, "#match directive expects a matched procedure, got '%s'.", - onyx_ast_node_kind_string(ofunc->kind)); - return Symres_Error; - } - - if (ofunc->locked) { - onyx_report_error(add_overload->token->pos, Error_Critical, "Cannot add match option here as the original #match was declared as #locked."); - onyx_report_error(ofunc->token->pos, Error_Critical, "Here is the original #match."); - return Symres_Error; - } - - if (ofunc->only_local_functions) { - if (!token_same_file(add_overload->token, ofunc->token)) { - onyx_report_error(add_overload->token->pos, Error_Critical, "Cannot add match option here as this option is not within the same file as the original #match declared with #local."); - onyx_report_error(ofunc->token->pos, Error_Critical, "Here is the original #match."); - return Symres_Error; - } - } - - SYMRES(expression, (AstTyped **) &add_overload->overload); - add_overload->overload->flags &= ~Ast_Flag_Function_Is_Lambda; - - add_overload_option(&ofunc->overloads, add_overload->order, add_overload->overload); - break; - } - - case Ast_Kind_Directive_Operator: { - AstDirectiveOperator *operator = (AstDirectiveOperator *) directive; - SYMRES(expression, &operator->overload); - if (!operator->overload) return Symres_Error; - - AstFunction* overload = get_function_from_node((AstNode *) operator->overload); - if (overload == NULL) { - onyx_report_error(operator->token->pos, Error_Critical, "This cannot be used as an operator overload."); - return Symres_Error; - } - - overload->flags &= ~Ast_Flag_Function_Is_Lambda; - - // First try unary operator overloading - // CLEANUP This is not written well at all... - if (operator->operator == Binary_Op_Count) { - if (bh_arr_length(overload->params) != 1) { - onyx_report_error(operator->token->pos, Error_Critical, "Expected exactly 1 argument for unary operator overload."); - return Symres_Error; - } - - UnaryOp unop = Unary_Op_Count; - if (operator->operator_token->type == (TokenType) '?') { - unop = Unary_Op_Try; - } - - if (unop == Unary_Op_Count) { - onyx_report_error(operator->token->pos, Error_Critical, "Unknown operator."); - return Symres_Error; - } - - add_overload_option(&unary_operator_overloads[unop], operator->order, operator->overload); - return Symres_Success; - } - - if (operator->operator != Binary_Op_Subscript_Equals && bh_arr_length(overload->params) != 2) { - onyx_report_error(operator->token->pos, Error_Critical, "Expected exactly 2 arguments for binary operator overload."); - return Symres_Error; - } - - add_overload_option(&operator_overloads[operator->operator], operator->order, operator->overload); - break; - } - - case Ast_Kind_Directive_Export: { - AstDirectiveExport *export = (AstDirectiveExport *) directive; - SYMRES(expression, &export->export); - SYMRES(expression, &export->export_name_expr); - - if (export->export->kind == Ast_Kind_Polymorphic_Proc) { - onyx_report_error(export->token->pos, Error_Critical, "Cannot export a polymorphic function."); - return Symres_Error; - } - - if (export->export->kind == Ast_Kind_Function) { - AstFunction *func = (AstFunction *) export->export; - func->is_exported = 1; - - if (func->is_foreign) { - onyx_report_error(export->token->pos, Error_Critical, "Cannot export a foreign function."); - return Symres_Error; - } - - if (func->is_intrinsic) { - onyx_report_error(export->token->pos, Error_Critical, "Cannot export an intrinsic function."); - return Symres_Error; - } - } - - break; - } - - case Ast_Kind_Directive_Init: { - AstDirectiveInit *init = (AstDirectiveInit *) directive; - SYMRES(expression, &init->init_proc); - - if (init->dependencies) { - bh_arr_each(AstDirectiveInit *, dependency, init->dependencies) { - SYMRES(expression, (AstTyped **) dependency); - } - } - - break; - } - - case Ast_Kind_Directive_Library: { - AstDirectiveLibrary *library = (AstDirectiveLibrary *) directive; - SYMRES(expression, &library->library_symbol); - break; - } - - case Ast_Kind_Injection: { - AstInjection *inject = (AstInjection *) directive; - - if (inject->dest == NULL) { - if (inject->full_loc == NULL) return Symres_Error; - - AstTyped *full_loc = (AstTyped *) strip_aliases((AstNode *) inject->full_loc); - - if (full_loc->kind != Ast_Kind_Field_Access) { - onyx_report_error(inject->token->pos, Error_Critical, "#inject expects a dot expression (a.b) for the injection point."); - return Symres_Error; - } - - AstFieldAccess *acc = (AstFieldAccess *) full_loc; - inject->dest = acc->expr; - inject->symbol = acc->token; - } - - SYMRES(expression, &inject->dest); - SYMRES(expression, &inject->to_inject); - break; - } - - case Ast_Kind_Directive_This_Package: { - AstPackage *package = (AstPackage *) directive; - package->kind = Ast_Kind_Package; - package->package = current_entity->package; - return Symres_Complete; - } - - default: assert("Bad directive in symres_process_directive" && 0); break; - } - - return Symres_Success; -} - -static SymresStatus symres_macro(AstMacro* macro) { - macro->flags |= Ast_Flag_Comptime; - - if (macro->body->kind == Ast_Kind_Function) { - SYMRES(function_header, (AstFunction *) macro->body); - } - else if (macro->body->kind == Ast_Kind_Polymorphic_Proc) { - SYMRES(polyproc, (AstFunction *) macro->body); - } - - return Symres_Success; -} - -static SymresStatus symres_interface(AstInterface* interface) { - bh_arr_each(InterfaceParam, param, interface->params) { - SYMRES(type, ¶m->value_type); - } - - return Symres_Success; -} - -static SymresStatus symres_constraint(AstConstraint* constraint) { - switch (constraint->phase) { - case Constraint_Phase_Cloning_Expressions: - case Constraint_Phase_Waiting_To_Be_Queued: { - SYMRES(expression, (AstTyped **) &constraint->interface); - - bh_arr_each(AstTyped *, arg, constraint->args) { - SYMRES(expression, arg); - } - - return Symres_Success; - } - - case Constraint_Phase_Checking_Expressions: { - SymresStatus ss; - onyx_errors_disable(); - - fori (i, constraint->expr_idx, bh_arr_length(constraint->exprs)) { - InterfaceConstraint* ic = &constraint->exprs[i]; - - // Most of this logic was directly copied from the - // check_constraint code. There might be a better - // way to factor this? - ss = symres_expression(&ic->expr); - if (ss == Symres_Yield_Macro) { - onyx_errors_enable(); - return ss; - } - - if (ss == Symres_Error && !ic->invert_condition) { - goto constraint_error; - } - - if (ss == Symres_Success && ic->invert_condition) { - goto constraint_error; - } - - if (ic->expected_type_expr) { - ss = symres_type(&ic->expected_type_expr); - if (ss == Symres_Yield_Macro) { - onyx_errors_enable(); - return ss; - } - } - - continue; - - constraint_error: - onyx_errors_enable(); - *constraint->report_status = Constraint_Check_Status_Failed; - return Symres_Error; - } - - onyx_errors_enable(); - return Symres_Success; - } - - default: break; - } - - return Symres_Success; -} - -static SymresStatus symres_polyquery(AstPolyQuery *query) { - // :EliminatingSymres - query->successful_symres = 0; - - if (query->function_header->scope == NULL) - query->function_header->scope = scope_create(context.ast_alloc, query->proc->parent_scope_of_poly_proc, query->token->pos); - - scope_enter(query->function_header->scope); - - u32 idx = 0; - bh_arr_each(AstParam, param, query->function_header->params) { - bh_arr_each(AstPolyParam, pp, query->proc->poly_params) { - if (pp->kind == PPK_Baked_Value && pp->idx == idx) goto skip_introducing_symbol; - } - - symbol_introduce(current_scope, param->local->token, (AstNode *) param->local); - - skip_introducing_symbol: - idx++; - } - - bh_arr_each(AstParam, param, query->function_header->params) { - if (param->local->type_node != NULL) { - resolved_a_symbol = 0; - - onyx_errors_disable(); - param->local->flags |= Ast_Flag_Symbol_Invisible; - symres_type(¶m->local->type_node); - param->local->flags &= ~Ast_Flag_Symbol_Invisible; - onyx_errors_enable(); - - if (resolved_a_symbol) query->successful_symres = 1; - } - } - - scope_leave(); - return Symres_Success; -} - -static SymresStatus symres_foreign_block(AstForeignBlock *fb) { - if (fb->scope == NULL) - fb->scope = scope_create(context.ast_alloc, current_scope, fb->token->pos); - - SYMRES(expression, &fb->module_name); - - if (fb->module_name->kind != Ast_Kind_StrLit) { - onyx_report_error(fb->token->pos, Error_Critical, "Expected module name to be a compile-time string literal."); - return Symres_Error; - } - - bh_arr_each(Entity *, pent, fb->captured_entities) { - Entity *ent = *pent; - if (ent->type == Entity_Type_Function_Header) { - if (ent->function->body->next != NULL) { - onyx_report_error(ent->function->token->pos, Error_Critical, "Procedures declared in a #foreign block should not have bodies."); - return Symres_Error; - } - - ent->function->foreign.import_name = (AstTyped *) make_string_literal(context.ast_alloc, ent->function->intrinsic_name); - ent->function->foreign.module_name = fb->module_name; - ent->function->is_foreign = 1; - ent->function->is_foreign_dyncall = fb->uses_dyncall; - ent->function->entity = NULL; - ent->function->entity_header = NULL; - ent->function->entity_body = NULL; - - add_entities_for_node(NULL, (AstNode *) ent->function, ent->scope, ent->package); - continue; - } - - if (ent->type == Entity_Type_Binding) { - AstBinding* new_binding = onyx_ast_node_new(context.ast_alloc, sizeof(AstBinding), Ast_Kind_Binding); - new_binding->token = ent->binding->token; - new_binding->node = ent->binding->node; - - Entity e; - memset(&e, 0, sizeof(e)); - e.type = Entity_Type_Binding; - e.state = Entity_State_Introduce_Symbols; - e.binding = new_binding; - e.scope = fb->scope; - e.package = ent->package; - - entity_heap_insert(&context.entities, e); - } - - if (ent->type != Entity_Type_Function) { - entity_heap_insert_existing(&context.entities, ent); - } - } - - return Symres_Complete; -} - -static SymresStatus symres_include(AstInclude* include) { - if (include->name != NULL) return Symres_Goto_Parse; - - SYMRES(expression, &include->name_node); - - if (include->name_node->kind != Ast_Kind_StrLit) { - onyx_report_error(include->token->pos, Error_Critical, "Expected compile-time known string literal here. Got '%s'.", onyx_ast_node_kind_string(include->name_node->kind)); - return Symres_Error; - } - - OnyxToken* str_token = include->name_node->token; - if (str_token != NULL) { - token_toggle_end(str_token); - include->name = bh_strdup(context.ast_alloc, str_token->text); - string_process_escape_seqs(include->name, include->name, strlen(include->name)); - token_toggle_end(str_token); - } - - return Symres_Goto_Parse; -} - -static SymresStatus symres_file_contents(AstFileContents* fc) { - SYMRES(expression, &fc->filename_expr); - - if (fc->filename_expr->kind != Ast_Kind_StrLit) { - onyx_report_error(fc->token->pos, Error_Critical, "Expected given expression to be a compile-time stirng literal."); - return Symres_Error; - } - - return Symres_Success; -} - -static SymresStatus symres_import(AstImport* import) { - AstPackage* package = import->imported_package; - SYMRES(package, package); - - if (import->import_package_itself) { - OnyxToken *name = bh_arr_last(package->path); - name = import->qualified_package_name ? import->qualified_package_name : name; - - symbol_introduce( - current_entity->scope, - name, - (AstNode *) package); - } - - if (import->specified_imports) { - package_track_use_package(package->package, import->entity); - - Scope *import_scope = package->package->scope; - if (import_scope == current_scope) return Symres_Complete; - - // use X { * } - if (import->only == NULL) { - OnyxFilePos pos = import->token->pos; - scope_include(current_scope, import_scope, pos); - return Symres_Complete; - } - - - // use X { a, b, c } - bh_arr_each(QualifiedImport, qi, import->only) { - AstNode* imported = symbol_resolve(import_scope, qi->symbol_name); - if (imported == NULL) { // :SymresStall - if (report_unresolved_symbols) { - // TODO: Change package->name to package->qualified_name when - // merged with the documentation generation branch. - onyx_report_error(qi->symbol_name->pos, Error_Critical, - "The symbol '%b' was not found the package '%s'.", - qi->symbol_name->text, qi->symbol_name->length, package->package->name); - - return Symres_Error; - } else { - return Symres_Yield_Macro; - } - } - - symbol_introduce(current_scope, qi->as_name, imported); - } - } - - return Symres_Complete; -} - -void symres_entity(Entity* ent) { - current_entity = ent; - if (ent->scope) scope_enter(ent->scope); - - report_unresolved_symbols = context.cycle_detected; - - SymresStatus ss = Symres_Success; - EntityState next_state = Entity_State_Check_Types; - - switch (ent->type) { - case Entity_Type_Binding: { - symbol_introduce(current_scope, ent->binding->token, ent->binding->node); - track_documentation_for_symbol_info(ent->binding->node, ent->binding->documentation); - track_declaration_for_tags((AstNode *) ent->binding); - - if (context.doc_info) { - onyx_docs_submit(context.doc_info, ent->binding); - } - - package_reinsert_use_packages(ent->package); - next_state = Entity_State_Finalized; - break; - } - - case Entity_Type_Static_If: ss = symres_static_if(ent->static_if); break; - - case Entity_Type_Load_Path: - case Entity_Type_Load_File: ss = symres_include(ent->include); break; - case Entity_Type_File_Contents: ss = symres_file_contents(ent->file_contents); break; - - case Entity_Type_Foreign_Function_Header: - case Entity_Type_Temp_Function_Header: - case Entity_Type_Function_Header: ss = symres_function_header(ent->function); break; - case Entity_Type_Function: ss = symres_function(ent->function); break; - - case Entity_Type_Global_Header: ss = symres_global(ent->global); break; - - case Entity_Type_Import: ss = symres_import(ent->import); break; - - - case Entity_Type_Polymorphic_Proc: ss = symres_polyproc(ent->poly_proc); - next_state = Entity_State_Finalized; - break; - - case Entity_Type_Interface: ss = symres_interface(ent->interface); break; - - case Entity_Type_Overloaded_Function: ss = symres_overloaded_function(ent->overloaded_function); break; - case Entity_Type_Expression: ss = symres_expression(&ent->expr); break; - case Entity_Type_Type_Alias: ss = symres_type(&ent->type_alias); break; - case Entity_Type_Enum: ss = symres_enum(ent->enum_type); break; - case Entity_Type_Memory_Reservation_Type: ss = symres_memres_type(&ent->mem_res); break; - case Entity_Type_Memory_Reservation: ss = symres_memres(&ent->mem_res); break; - case Entity_Type_String_Literal: ss = symres_expression(&ent->expr); break; - case Entity_Type_Struct_Member_Default: ss = symres_struct_defaults((AstType *) ent->type_alias); break; - case Entity_Type_Process_Directive: ss = symres_process_directive((AstNode *) ent->expr); break; - case Entity_Type_Macro: ss = symres_macro(ent->macro); break; - case Entity_Type_Constraint_Check: ss = symres_constraint(ent->constraint); break; - case Entity_Type_Polymorph_Query: ss = symres_polyquery(ent->poly_query); break; - case Entity_Type_Foreign_Block: ss = symres_foreign_block(ent->foreign_block); - if (context.options->generate_foreign_info) { - next_state = Entity_State_Check_Types; - ss = Symres_Success; - } - break; - - default: break; - } - - if (ss == Symres_Yield_Macro) ent->macro_attempts++; - if (ss == Symres_Yield_Micro) ent->micro_attempts++; - if (ss == Symres_Complete) ent->state = Entity_State_Finalized; - if (ss == Symres_Goto_Parse) ent->state = Entity_State_Parse; - if (ss == Symres_Error) ent->state = Entity_State_Failed; - if (ss == Symres_Success) { - ent->macro_attempts = 0; - ent->micro_attempts = 0; - ent->state = next_state; - } - - current_scope = NULL; - current_entity = NULL; -} diff --git a/compiler/src/types.c b/compiler/src/types.c index d886c66a7..1cba7b8f0 100644 --- a/compiler/src/types.c +++ b/compiler/src/types.c @@ -6,104 +6,90 @@ #include "errors.h" #include "parser.h" -// NOTE: These have to be in the same order as Basic -Type basic_types[] = { - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_void, { Basic_Kind_Void, 0, 0, 1, "void" } }, - - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_bool, { Basic_Kind_Bool, Basic_Flag_Boolean, 1, 1, "bool" } }, - - { Type_Kind_Basic, 0, 0, NULL, { Basic_Kind_Int_Unsized, Basic_Flag_Integer, 0, 0, "unsized int" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_i8, { Basic_Kind_I8, Basic_Flag_Integer, 1, 1, "i8" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_u8, { Basic_Kind_U8, Basic_Flag_Integer | Basic_Flag_Unsigned, 1, 1, "u8" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_i16, { Basic_Kind_I16, Basic_Flag_Integer, 2, 2, "i16" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_u16, { Basic_Kind_U16, Basic_Flag_Integer | Basic_Flag_Unsigned, 2, 2, "u16" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_i32, { Basic_Kind_I32, Basic_Flag_Integer, 4, 4, "i32" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_u32, { Basic_Kind_U32, Basic_Flag_Integer | Basic_Flag_Unsigned, 4, 4, "u32" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_i64, { Basic_Kind_I64, Basic_Flag_Integer, 8, 8, "i64" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_u64, { Basic_Kind_U64, Basic_Flag_Integer | Basic_Flag_Unsigned, 8, 8, "u64" } }, - - { Type_Kind_Basic, 0, 0, NULL, { Basic_Kind_Float_Unsized, Basic_Flag_Float, 0, 0, "unsized float" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_f32, { Basic_Kind_F32, Basic_Flag_Float, 4, 4, "f32" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_f64, { Basic_Kind_F64, Basic_Flag_Float, 8, 4, "f64" } }, - - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_rawptr, { Basic_Kind_Rawptr, Basic_Flag_Pointer, POINTER_SIZE, POINTER_SIZE, "rawptr" } }, - - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_i8x16, { Basic_Kind_I8X16, Basic_Flag_SIMD, 16, 16, "i8x16" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_i16x8, { Basic_Kind_I16X8, Basic_Flag_SIMD, 16, 16, "i16x8" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_i32x4, { Basic_Kind_I32X4, Basic_Flag_SIMD, 16, 16, "i32x4" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_i64x2, { Basic_Kind_I64X2, Basic_Flag_SIMD, 16, 16, "i64x2" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_f32x4, { Basic_Kind_F32X4, Basic_Flag_SIMD, 16, 16, "f32x4" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_f64x2, { Basic_Kind_F64X2, Basic_Flag_SIMD, 16, 16, "f64x2" } }, - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_v128, { Basic_Kind_V128, Basic_Flag_SIMD, 16, 16, "v128" } }, - - { Type_Kind_Basic, 0, 0, (AstType *) &basic_type_type_expr, { Basic_Kind_Type_Index, Basic_Flag_Type_Index, 4, 4, "type_expr" } }, -}; - -// TODO: Document this!! - bh_imap type_map; -static bh_imap type_pointer_map; -static bh_imap type_multi_pointer_map; -static bh_imap type_array_map; -static bh_imap type_slice_map; -static bh_imap type_dynarr_map; -static bh_imap type_vararg_map; -static Table(u64) type_func_map; - -static Type* type_create(TypeKind kind, bh_allocator a, u32 extra_type_pointer_count) { - Type* type = bh_alloc(a, sizeof(Type) + sizeof(Type *) * extra_type_pointer_count); +static Type* type_create(Context *context, TypeKind kind, u32 extra_type_pointer_count) { + Type* type = bh_alloc(context->ast_alloc, sizeof(Type) + sizeof(Type *) * extra_type_pointer_count); memset(type, 0, sizeof(Type)); type->kind = kind; - type->ast_type = NULL; return type; } -static void type_register(Type* type) { - type->id = ++context.next_type_id; +static void type_register(Context *context, Type* type) { + type->id = ++context->next_type_id; if (type->ast_type) type->ast_type->type_id = type->id; - bh_imap_put(&type_map, type->id, (u64) type); + bh_imap_put(&context->types.type_map, type->id, (u64) type); } -void types_init() { -#define MAKE_MAP(x) (memset(&x, 0, sizeof(x)), bh_imap_init(&x, global_heap_allocator, 255)) - MAKE_MAP(type_map); - MAKE_MAP(type_pointer_map); - MAKE_MAP(type_multi_pointer_map); - MAKE_MAP(type_array_map); - MAKE_MAP(type_slice_map); - MAKE_MAP(type_dynarr_map); - MAKE_MAP(type_vararg_map); +void types_init(Context *context) { +#define MAKE_MAP(x) (memset(&x, 0, sizeof(x)), bh_imap_init(&x, context->gp_alloc, 255)) + MAKE_MAP(context->types.type_map); + MAKE_MAP(context->types.pointer_map); + MAKE_MAP(context->types.multi_pointer_map); + MAKE_MAP(context->types.array_map); + MAKE_MAP(context->types.slice_map); + MAKE_MAP(context->types.dynarr_map); + MAKE_MAP(context->types.vararg_map); +#undef MAKE_MAP - type_func_map = NULL; - sh_new_arena(type_func_map); + context->types.func_map = NULL; + sh_new_arena(context->types.func_map); - fori (i, 0, Basic_Kind_Count) type_register(&basic_types[i]); -#undef MAKE_MAP + fori (i, 0, Basic_Kind_Count) { + context->types.basic[i] = bh_alloc_item(context->ast_alloc, Type); + } + + *context->types.basic[Basic_Kind_Void] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_void, { Basic_Kind_Void, 0, 0, 1, "void" } }); + *context->types.basic[Basic_Kind_Bool] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_bool, { Basic_Kind_Bool, Basic_Flag_Boolean, 1, 1, "bool" } }); + *context->types.basic[Basic_Kind_Int_Unsized] = ((Type) { Type_Kind_Basic, 0, 0, NULL, { Basic_Kind_Int_Unsized, Basic_Flag_Integer, 0, 0, "unsized int" } }); + *context->types.basic[Basic_Kind_I8] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_i8, { Basic_Kind_I8, Basic_Flag_Integer, 1, 1, "i8" } }); + *context->types.basic[Basic_Kind_U8] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_u8, { Basic_Kind_U8, Basic_Flag_Integer | Basic_Flag_Unsigned, 1, 1, "u8" } }); + *context->types.basic[Basic_Kind_I16] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_i16, { Basic_Kind_I16, Basic_Flag_Integer, 2, 2, "i16" } }); + *context->types.basic[Basic_Kind_U16] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_u16, { Basic_Kind_U16, Basic_Flag_Integer | Basic_Flag_Unsigned, 2, 2, "u16" } }); + *context->types.basic[Basic_Kind_I32] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_i32, { Basic_Kind_I32, Basic_Flag_Integer, 4, 4, "i32" } }); + *context->types.basic[Basic_Kind_U32] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_u32, { Basic_Kind_U32, Basic_Flag_Integer | Basic_Flag_Unsigned, 4, 4, "u32" } }); + *context->types.basic[Basic_Kind_I64] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_i64, { Basic_Kind_I64, Basic_Flag_Integer, 8, 8, "i64" } }); + *context->types.basic[Basic_Kind_U64] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_u64, { Basic_Kind_U64, Basic_Flag_Integer | Basic_Flag_Unsigned, 8, 8, "u64" } }); + *context->types.basic[Basic_Kind_Float_Unsized] = ((Type) { Type_Kind_Basic, 0, 0, NULL, { Basic_Kind_Float_Unsized, Basic_Flag_Float, 0, 0, "unsized float" } }); + *context->types.basic[Basic_Kind_F32] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_f32, { Basic_Kind_F32, Basic_Flag_Float, 4, 4, "f32" } }); + *context->types.basic[Basic_Kind_F64] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_f64, { Basic_Kind_F64, Basic_Flag_Float, 8, 4, "f64" } }); + *context->types.basic[Basic_Kind_Rawptr] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_rawptr, { Basic_Kind_Rawptr, Basic_Flag_Pointer, POINTER_SIZE, POINTER_SIZE, "rawptr" } }); + *context->types.basic[Basic_Kind_I8X16] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_i8x16, { Basic_Kind_I8X16, Basic_Flag_SIMD, 16, 16, "i8x16" } }); + *context->types.basic[Basic_Kind_I16X8] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_i16x8, { Basic_Kind_I16X8, Basic_Flag_SIMD, 16, 16, "i16x8" } }); + *context->types.basic[Basic_Kind_I32X4] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_i32x4, { Basic_Kind_I32X4, Basic_Flag_SIMD, 16, 16, "i32x4" } }); + *context->types.basic[Basic_Kind_I64X2] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_i64x2, { Basic_Kind_I64X2, Basic_Flag_SIMD, 16, 16, "i64x2" } }); + *context->types.basic[Basic_Kind_F32X4] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_f32x4, { Basic_Kind_F32X4, Basic_Flag_SIMD, 16, 16, "f32x4" } }); + *context->types.basic[Basic_Kind_F64X2] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_f64x2, { Basic_Kind_F64X2, Basic_Flag_SIMD, 16, 16, "f64x2" } }); + *context->types.basic[Basic_Kind_V128] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_v128, { Basic_Kind_V128, Basic_Flag_SIMD, 16, 16, "v128" } }); + *context->types.basic[Basic_Kind_Type_Index] = ((Type) { Type_Kind_Basic, 0, 0, (AstType *) &context->basic_types.type_type_expr, { Basic_Kind_Type_Index, Basic_Flag_Type_Index, 4, 4, "type_expr" } }); + + fori (i, 0, Basic_Kind_Count) { + type_register(context, context->types.basic[i]); + } } -void types_dump_type_info() { - bh_arr_each(bh__imap_entry, entry, type_map.entries) { - bh_printf("%d -> %s\n", entry->key, type_get_name((Type *) entry->value)); +void types_dump_type_info(Context *context) { + bh_arr_each(bh__imap_entry, entry, context->types.type_map.entries) { + bh_printf("%d -> %s\n", entry->key, type_get_name(context, (Type *) entry->value)); } } -Type* type_lookup_by_id(u32 id) { - if (bh_imap_has(&type_map, id)) { - return (Type *) bh_imap_get(&type_map, id); +Type* type_lookup_by_id(Context *context, u32 id) { + if (bh_imap_has(&context->types.type_map, id)) { + return (Type *) bh_imap_get(&context->types.type_map, id); } return NULL; } -b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { +b32 types_are_compatible(Context *context, Type* t1, Type* t2) { // NOTE: If they are pointing to the same thing, // it is safe to assume they are the same type if (t1 == t2) return 1; if (t1 == NULL || t2 == NULL) return 0; if (t1->id == t2->id) return 1; - if (t1 == &type_auto_return || t2 == &type_auto_return) { + if (t1 == context->types.auto_return || t2 == context->types.auto_return) { return 0; } @@ -125,9 +111,7 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { case Type_Kind_Pointer: { if (t2->kind == Type_Kind_Pointer) { - if (!recurse_pointers) return 1; - - if (types_are_compatible(t1->Pointer.elem, t2->Pointer.elem)) return 1; + if (types_are_compatible(context, t1->Pointer.elem, t2->Pointer.elem)) return 1; if (t1->Pointer.elem->kind == Type_Kind_Struct && t2->Pointer.elem->kind == Type_Kind_Struct) { Type* t1_struct = t1->Pointer.elem; @@ -135,16 +119,14 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { bh_arr(StructMember *) members = t1_struct->Struct.memarr; if (bh_arr_length(members) > 0 && members[0]->used) - return types_are_compatible(t2_struct,members[0]->type); + return types_are_compatible(context, t2_struct,members[0]->type); } } // Pointers promote to multi-pointers // &u8 -> [&] u8 if (t2->kind == Type_Kind_MultiPointer) { - if (!recurse_pointers) return 1; - - if (types_are_compatible(t1->Pointer.elem, t2->Pointer.elem)) return 1; + if (types_are_compatible(context,t1->Pointer.elem, t2->Pointer.elem)) return 1; } // Pointer decays to rawptr @@ -157,9 +139,7 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { // Multi-pointer decays to pointer // [&] u8 -> &u8 if (t2->kind == Type_Kind_Pointer) { - if (!recurse_pointers) return 1; - - if (types_are_compatible(t1->MultiPointer.elem, t2->Pointer.elem)) return 1; + if (types_are_compatible(context, t1->MultiPointer.elem, t2->Pointer.elem)) return 1; } // Multi-pointer decays to rawptr @@ -174,7 +154,7 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { if (t1->Array.count != 0) if (t1->Array.count != t2->Array.count) return 0; - return types_are_compatible(t1->Array.elem, t2->Array.elem); + return types_are_compatible(context, t1->Array.elem, t2->Array.elem); } case Type_Kind_Struct: { @@ -195,11 +175,11 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { if (t2->kind != Type_Kind_Function) return 0; if (t1->Function.param_count != t2->Function.param_count) return 0; - if (!types_are_compatible(t1->Function.return_type, t2->Function.return_type)) return 0; + if (!types_are_compatible(context, t1->Function.return_type, t2->Function.return_type)) return 0; if (t1->Function.param_count > 0) { fori (i, 0, t1->Function.param_count) { - if (!types_are_compatible(t1->Function.params[i], t2->Function.params[i])) return 0; + if (!types_are_compatible(context, t1->Function.params[i], t2->Function.params[i])) return 0; } } @@ -208,17 +188,17 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { case Type_Kind_Slice: { if (t2->kind != Type_Kind_Slice) return 0; - return types_are_compatible(t1->Slice.elem, t2->Slice.elem); + return types_are_compatible(context, t1->Slice.elem, t2->Slice.elem); } case Type_Kind_VarArgs: { if (t2->kind != Type_Kind_VarArgs) return 0; - return types_are_compatible(t1->VarArgs.elem, t2->VarArgs.elem); + return types_are_compatible(context, t1->VarArgs.elem, t2->VarArgs.elem); } case Type_Kind_DynArray: { if (t2->kind != Type_Kind_DynArray) return 0; - return types_are_compatible(t1->DynArray.elem, t2->DynArray.elem); + return types_are_compatible(context, t1->DynArray.elem, t2->DynArray.elem); } case Type_Kind_Compound: { @@ -226,7 +206,7 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { if (t1->Compound.count != t2->Compound.count) return 0; fori (i, 0, (i64) t1->Compound.count) { - if (!types_are_compatible(t1->Compound.types[i], t2->Compound.types[i])) return 0; + if (!types_are_compatible(context, t1->Compound.types[i], t2->Compound.types[i])) return 0; } return 1; @@ -240,6 +220,10 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { // If the above cases didn't catch it, then these union types are not compatible. return 0; + case Type_Kind_Invalid: + // I'm not 100% sure when this can happen, but if this happens, the types will automatically not match. + return 0; + default: assert("Invalid type" && 0); break; @@ -248,10 +232,6 @@ b32 types_are_compatible_(Type* t1, Type* t2, b32 recurse_pointers) { return 0; } -b32 types_are_compatible(Type* t1, Type* t2) { - return types_are_compatible_(t1, t2, 1); -} - u32 type_size_of(Type* type) { if (type == NULL) return 0; @@ -302,20 +282,22 @@ static b32 type_is_ready_to_be_used_in_construction(Type *t) { } } -static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b32 accept_partial_types) { +static Type* type_build_from_ast_inner(Context *context, AstType* type_node, b32 accept_partial_types) { if (type_node == NULL) return NULL; + bh_allocator alloc = context->ast_alloc; + switch (type_node->kind) { case Ast_Kind_Pointer_Type: { - Type *inner_type = type_build_from_ast_inner(alloc, ((AstPointerType *) type_node)->elem, accept_partial_types); - Type *ptr_type = type_make_pointer(alloc, inner_type); + Type *inner_type = type_build_from_ast_inner(context, ((AstPointerType *) type_node)->elem, accept_partial_types); + Type *ptr_type = type_make_pointer(context, inner_type); if (ptr_type) ptr_type->ast_type = type_node; return ptr_type; } case Ast_Kind_Multi_Pointer_Type: { - Type *inner_type = type_build_from_ast_inner(alloc, ((AstMultiPointerType *) type_node)->elem, accept_partial_types); - Type *ptr_type = type_make_multi_pointer(alloc, inner_type); + Type *inner_type = type_build_from_ast_inner(context, ((AstMultiPointerType *) type_node)->elem, accept_partial_types); + Type *ptr_type = type_make_multi_pointer(context, inner_type); if (ptr_type) ptr_type->ast_type = type_node; return ptr_type; } @@ -324,10 +306,10 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b AstFunctionType* ftype_node = (AstFunctionType *) type_node; u64 param_count = ftype_node->param_count; - Type* return_type = type_build_from_ast_inner(alloc, ftype_node->return_type, accept_partial_types); + Type* return_type = type_build_from_ast_inner(context, ftype_node->return_type, accept_partial_types); if (return_type == NULL) return NULL; - Type* func_type = type_create(Type_Kind_Function, alloc, param_count); + Type* func_type = type_create(context, Type_Kind_Function, param_count); func_type->ast_type = type_node; func_type->Function.param_count = param_count; func_type->Function.needed_param_count = param_count; @@ -336,27 +318,27 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b if (param_count > 0) { fori (i, 0, (i64) param_count) { - func_type->Function.params[i] = type_build_from_ast_inner(alloc, ftype_node->params[i], accept_partial_types); + func_type->Function.params[i] = type_build_from_ast_inner(context, ftype_node->params[i], accept_partial_types); // LEAK LEAK LEAK if (func_type->Function.params[i] == NULL) return NULL; } } - char* name = (char *) type_get_unique_name(func_type); - if (func_type->Function.return_type != &type_auto_return) { - i32 index = shgeti(type_func_map, name); + char* name = (char *) type_get_unique_name(context, func_type); + if (func_type->Function.return_type != context->types.auto_return) { + i32 index = shgeti(context->types.func_map, name); if (index != -1) { - u64 id = type_func_map[index].value; - Type* existing_type = (Type *) bh_imap_get(&type_map, id); + u64 id = context->types.func_map[index].value; + Type* existing_type = (Type *) bh_imap_get(&context->types.type_map, id); // LEAK LEAK LEAK the func_type that is created return existing_type; } } - type_register(func_type); - shput(type_func_map, name, func_type->id); + type_register(context, func_type); + shput(context->types.func_map, name, func_type->id); return func_type; } @@ -364,50 +346,50 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b case Ast_Kind_Array_Type: { AstArrayType* a_node = (AstArrayType *) type_node; - Type *elem_type = type_build_from_ast_inner(alloc, a_node->elem, accept_partial_types); + Type *elem_type = type_build_from_ast_inner(context, a_node->elem, accept_partial_types); if (elem_type == NULL) return NULL; u32 count = 0; if (a_node->count_expr) { if (a_node->count_expr->type == NULL) - a_node->count_expr->type = type_build_from_ast(alloc, a_node->count_expr->type_node); + a_node->count_expr->type = type_build_from_ast(context, a_node->count_expr->type_node); if (node_is_auto_cast((AstNode *) a_node->count_expr)) { a_node->count_expr = ((AstUnaryOp *) a_node)->expr; } - resolve_expression_type(a_node->count_expr); + resolve_expression_type(context, a_node->count_expr); // NOTE: Currently, the count_expr has to be an I32 literal if (a_node->count_expr->type->kind != Type_Kind_Basic || a_node->count_expr->type->Basic.kind != Basic_Kind_I32) { - onyx_report_error(type_node->token->pos, Error_Critical, "Array type expects type 'i32' for size, got '%s'.", - type_get_name(a_node->count_expr->type)); + ONYX_ERROR(type_node->token->pos, Error_Critical, "Array type expects type 'i32' for size, got '%s'.", + type_get_name(context, a_node->count_expr->type)); return NULL; } b32 valid = 0; - count = get_expression_integer_value(a_node->count_expr, &valid); + count = get_expression_integer_value(context, a_node->count_expr, &valid); if (!valid) { if (!(a_node->count_expr->flags & Ast_Flag_Comptime)) { - onyx_report_error(a_node->token->pos, Error_Critical, "Array type size must be a constant."); + ONYX_ERROR(a_node->token->pos, Error_Critical, "Array type size must be a constant."); } else { - onyx_report_error(a_node->token->pos, Error_Critical, "Array type size expression must be 'i32', got '%s'.", - type_get_name(a_node->count_expr->type)); + ONYX_ERROR(a_node->token->pos, Error_Critical, "Array type size expression must be 'i32', got '%s'.", + type_get_name(context, a_node->count_expr->type)); } return NULL; } if ((i32)count < 0) { - onyx_report_error(a_node->token->pos, Error_Critical, "Array type size must be a positive integer."); + ONYX_ERROR(a_node->token->pos, Error_Critical, "Array type size must be a positive integer."); return NULL; } } - Type* array_type = type_make_array(alloc, elem_type, count); + Type* array_type = type_make_array(context, elem_type, count); if (array_type) array_type->ast_type = type_node; return array_type; } @@ -420,7 +402,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b Type* s_type; if (s_node->pending_type == NULL) { - s_type = type_create(Type_Kind_Struct, alloc, 0); + s_type = type_create(context, Type_Kind_Struct, 0); s_node->pending_type = s_type; s_type->ast_type = type_node; @@ -430,11 +412,12 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b s_type->Struct.constructed_from = NULL; s_type->Struct.poly_sln = NULL; s_type->Struct.status = SPS_Start; - type_register(s_type); + s_type->Struct.scope = s_node->scope; + type_register(context, s_type); s_type->Struct.memarr = NULL; sh_new_arena(s_type->Struct.members); - bh_arr_new(global_heap_allocator, s_type->Struct.memarr, s_type->Struct.mem_count); + bh_arr_new(context->gp_alloc, s_type->Struct.memarr, s_type->Struct.mem_count); } else { s_type = s_node->pending_type; @@ -453,11 +436,11 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b u32 idx = 0; bh_arr_each(AstStructMember *, member, s_node->members) { if ((*member)->type == NULL) - (*member)->type = type_build_from_ast_inner(alloc, (*member)->type_node, 1); + (*member)->type = type_build_from_ast_inner(context, (*member)->type_node, 1); if ((*member)->type == NULL) { - if (context.cycle_detected) { - onyx_report_error((* member)->token->pos, Error_Critical, "Unable to figure out the type of this structure member."); + if (context->cycle_detected) { + ONYX_ERROR((* member)->token->pos, Error_Critical, "Unable to figure out the type of this structure member."); } s_node->pending_type_is_valid = 0; @@ -471,7 +454,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b mem_alignment = type_alignment_of((*member)->type); if (mem_alignment <= 0) { - onyx_report_error((*member)->token->pos, Error_Critical, "Invalid member type: %s. Has alignment %d", type_get_name((*member)->type), mem_alignment); + ONYX_ERROR((*member)->token->pos, Error_Critical, "Invalid member type: %s. Has alignment %d", type_get_name(context, (*member)->type), mem_alignment); return NULL; } @@ -483,16 +466,16 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b token_toggle_end((*member)->token); if (shgeti(s_type->Struct.members, (*member)->token->text) != -1) { - onyx_report_error((*member)->token->pos, Error_Critical, "Duplicate struct member, '%s'.", (*member)->token->text); + ONYX_ERROR((*member)->token->pos, Error_Critical, "Duplicate struct member, '%s'.", (*member)->token->text); token_toggle_end((*member)->token); return NULL; } - StructMember* smem = bh_alloc_item(alloc, StructMember); + StructMember* smem = bh_alloc_item(context->ast_alloc, StructMember); smem->offset = offset; smem->type = (*member)->type; smem->idx = idx; - smem->name = bh_strdup(alloc, (*member)->token->text); + smem->name = bh_strdup(context->ast_alloc, (*member)->token->text); smem->token = (*member)->token; smem->initial_value = &(*member)->initial_value; smem->meta_tags = (*member)->meta_tags; @@ -517,13 +500,13 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b idx++; } - u32 min_alignment = get_expression_integer_value(s_node->min_alignment_, NULL); + u32 min_alignment = get_expression_integer_value(context, s_node->min_alignment_, NULL); alignment = bh_max(min_alignment, alignment); if (!s_node->is_packed) { bh_align(size, alignment); } - u32 min_size = get_expression_integer_value(s_node->min_size_, NULL); + u32 min_size = get_expression_integer_value(context, s_node->min_size_, NULL); size = bh_max(min_size, size); s_type->Struct.alignment = alignment; @@ -538,7 +521,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b if (enum_node->etcache) return enum_node->etcache; if (enum_node->backing_type == NULL) return NULL; - Type* enum_type = type_create(Type_Kind_Enum, alloc, 0); + Type* enum_type = type_create(context, Type_Kind_Enum, 0); enum_node->etcache = enum_type; enum_type->ast_type = type_node; @@ -546,24 +529,24 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b enum_type->Enum.name = enum_node->name; enum_type->Enum.is_flags = enum_node->is_flags; - type_register(enum_type); + type_register(context, enum_type); return enum_type; } case Ast_Kind_Slice_Type: { - Type* slice_type = type_make_slice(alloc, type_build_from_ast_inner(alloc, ((AstSliceType *) type_node)->elem, accept_partial_types)); + Type* slice_type = type_make_slice(context, type_build_from_ast_inner(context, ((AstSliceType *) type_node)->elem, accept_partial_types)); if (slice_type) slice_type->ast_type = type_node; return slice_type; } case Ast_Kind_DynArr_Type: { - Type* dynarr_type = type_make_dynarray(alloc, type_build_from_ast_inner(alloc, ((AstDynArrType *) type_node)->elem, accept_partial_types)); + Type* dynarr_type = type_make_dynarray(context, type_build_from_ast_inner(context, ((AstDynArrType *) type_node)->elem, accept_partial_types)); if (dynarr_type) dynarr_type->ast_type = type_node; return dynarr_type; } case Ast_Kind_VarArg_Type: { - Type* va_type = type_make_varargs(alloc, type_build_from_ast_inner(alloc, ((AstVarArgType *) type_node)->elem, accept_partial_types)); + Type* va_type = type_make_varargs(context, type_build_from_ast_inner(context, ((AstVarArgType *) type_node)->elem, accept_partial_types)); if (va_type) va_type->ast_type = type_node; return va_type; } @@ -573,7 +556,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b } case Ast_Kind_Type_Alias: { - Type* type = type_build_from_ast_inner(alloc, ((AstTypeAlias *) type_node)->to, accept_partial_types); + Type* type = type_build_from_ast_inner(context, ((AstTypeAlias *) type_node)->to, accept_partial_types); if (type && type->ast_type) type_node->type_id = type->id; return type; } @@ -584,24 +567,26 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b case Ast_Kind_Poly_Struct_Type: { if (type_node->type_id != 0) return NULL; - Type* p_type = type_create(Type_Kind_PolyStruct, alloc, 0); + Type* p_type = type_create(context, Type_Kind_PolyStruct, 0); p_type->ast_type = type_node; p_type->PolyStruct.name = ((AstPolyStructType *) type_node)->name; p_type->PolyStruct.meta_tags = ((AstPolyStructType *) type_node)->base_struct->meta_tags; + p_type->PolyStruct.scope = ((AstPolyStructType *) type_node)->scope; - type_register(p_type); + type_register(context, p_type); return NULL; } case Ast_Kind_Poly_Union_Type: { if (type_node->type_id != 0) return NULL; - Type* p_type = type_create(Type_Kind_PolyUnion, alloc, 0); + Type* p_type = type_create(context, Type_Kind_PolyUnion, 0); p_type->ast_type = type_node; p_type->PolyUnion.name = ((AstPolyUnionType *) type_node)->name; p_type->PolyUnion.meta_tags = ((AstPolyUnionType *) type_node)->base_union->meta_tags; + p_type->PolyUnion.scope = ((AstPolyUnionType *) type_node)->scope; - type_register(p_type); + type_register(context, p_type); return NULL; } @@ -617,16 +602,16 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b // If it is an unresolved field access or symbol, just return because an error will be printed elsewhere. if (pc_type->callee->kind == Ast_Kind_Field_Access || pc_type->callee->kind == Ast_Kind_Symbol) return NULL; - onyx_report_error(pc_type->token->pos, Error_Critical, "Cannot instantiate a concrete type off of a non-polymorphic type."); - onyx_report_error(pc_type->callee->token->pos, Error_Critical, "Here is the type trying to be instantiated. (%s)", onyx_ast_node_kind_string(pc_type->callee->kind)); + ONYX_ERROR(pc_type->token->pos, Error_Critical, "Cannot instantiate a concrete type off of a non-polymorphic type."); + ONYX_ERROR(pc_type->callee->token->pos, Error_Critical, "Here is the type trying to be instantiated. (%s)", onyx_ast_node_kind_string(pc_type->callee->kind)); return NULL; } bh_arr(AstPolySolution) slns = NULL; - bh_arr_new(global_heap_allocator, slns, bh_arr_length(pc_type->params)); + bh_arr_new(context->gp_alloc, slns, bh_arr_length(pc_type->params)); bh_arr_each(AstNode *, given, pc_type->params) { if (node_is_type(*given)) { - Type* param_type = type_build_from_ast_inner(alloc, (AstType *) *given, 1); + Type* param_type = type_build_from_ast_inner(context, (AstType *) *given, 1); // LEAK LEAK LEAK if (param_type == NULL) return NULL; @@ -646,11 +631,13 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b Type* concrete = NULL; if (pc_type->callee->kind == Ast_Kind_Poly_Struct_Type) { AstPolyStructType* ps_type = (AstPolyStructType *) pc_type->callee; - concrete = polymorphic_struct_lookup(ps_type, slns, pc_type->token->pos, (pc_type->flags & Ast_Flag_Header_Check_No_Error) == 0); + type_build_from_ast_inner(context, (AstType *) ps_type, 0); + concrete = polymorphic_struct_lookup(context, ps_type, slns, pc_type->token->pos, (pc_type->flags & Ast_Flag_Header_Check_No_Error) == 0); } else if (pc_type->callee->kind == Ast_Kind_Poly_Union_Type) { AstPolyUnionType* pu_type = (AstPolyUnionType *) pc_type->callee; - concrete = polymorphic_union_lookup(pu_type, slns, pc_type->token->pos, (pc_type->flags & Ast_Flag_Header_Check_No_Error) == 0); + type_build_from_ast_inner(context, (AstType *) pu_type, 0); + concrete = polymorphic_union_lookup(context, pu_type, slns, pc_type->token->pos, (pc_type->flags & Ast_Flag_Header_Check_No_Error) == 0); } // This should be copied in the previous function. @@ -658,7 +645,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b bh_arr_free(slns); if (!concrete) return NULL; - if (concrete == (Type *) &node_that_signals_failure) return concrete; + if (concrete == (Type *) &context->node_that_signals_failure) return concrete; pc_type->resolved_type = concrete; return concrete; } @@ -668,13 +655,13 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b i64 type_count = bh_arr_length(ctype->types); - Type* comp_type = type_create(Type_Kind_Compound, alloc, type_count); + Type* comp_type = type_create(context, Type_Kind_Compound, type_count); comp_type->Compound.size = 0; comp_type->Compound.count = type_count; fori (i, 0, type_count) { assert(ctype->types[i] != NULL); - comp_type->Compound.types[i] = type_build_from_ast_inner(alloc, ctype->types[i], accept_partial_types); + comp_type->Compound.types[i] = type_build_from_ast_inner(context, ctype->types[i], accept_partial_types); // LEAK LEAK LEAK if (comp_type->Compound.types[i] == NULL) return NULL; @@ -685,16 +672,16 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b bh_align(comp_type->Compound.size, 4); comp_type->Compound.linear_members = NULL; - bh_arr_new(global_heap_allocator, comp_type->Compound.linear_members, comp_type->Compound.count); - build_linear_types_with_offset(comp_type, &comp_type->Compound.linear_members, 0); + bh_arr_new(context->gp_alloc, comp_type->Compound.linear_members, comp_type->Compound.count); + build_linear_types_with_offset(context, comp_type, &comp_type->Compound.linear_members, 0); - type_register(comp_type); + type_register(context, comp_type); return comp_type; } case Ast_Kind_Alias: { AstAlias* alias = (AstAlias *) type_node; - return type_build_from_ast_inner(alloc, (AstType *) alias->alias, accept_partial_types); + return type_build_from_ast_inner(context, (AstType *) alias->alias, accept_partial_types); } case Ast_Kind_Typeof: { @@ -710,16 +697,17 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b AstDistinctType* distinct = (AstDistinctType *) type_node; if (distinct->dtcache) return distinct->dtcache; - Type *base_type = type_build_from_ast(alloc, distinct->base_type); + Type *base_type = type_build_from_ast(context, distinct->base_type); if (base_type == NULL) return NULL; - Type *distinct_type = type_create(Type_Kind_Distinct, alloc, 0); + Type *distinct_type = type_create(context, Type_Kind_Distinct, 0); distinct_type->Distinct.base_type = base_type; distinct_type->Distinct.name = distinct->name; + distinct_type->Distinct.scope = distinct->scope; distinct_type->ast_type = type_node; distinct->dtcache = distinct_type; - type_register(distinct_type); + type_register(context, distinct_type); return distinct_type; } @@ -730,7 +718,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b Type *u_type; if (union_->pending_type == NULL) { - u_type = type_create(Type_Kind_Union, alloc, 0); + u_type = type_create(context, Type_Kind_Union, 0); union_->pending_type = u_type; u_type->ast_type = type_node; @@ -738,12 +726,13 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b u_type->Union.meta_tags = union_->meta_tags; u_type->Union.constructed_from = NULL; u_type->Union.status = SPS_Start; - type_register(u_type); + u_type->Union.scope = union_->scope; + type_register(context, u_type); u_type->Union.variants = NULL; u_type->Union.variants_ordered = NULL; sh_new_arena(u_type->Union.variants); - bh_arr_new(global_heap_allocator, u_type->Union.variants_ordered, bh_arr_length(union_->variants)); + bh_arr_new(context->gp_alloc, u_type->Union.variants_ordered, bh_arr_length(union_->variants)); } else { u_type = union_->pending_type; } @@ -755,12 +744,12 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b bh_arr_each(AstUnionVariant *, pvariant, union_->variants) { AstUnionVariant *variant = *pvariant; if (!variant->type) { - variant->type = type_build_from_ast_inner(alloc, variant->type_node, 1); + variant->type = type_build_from_ast_inner(context, variant->type_node, 1); } if (!variant->type) { - if (context.cycle_detected) { - onyx_report_error(variant->token->pos, Error_Critical, "Unable to figure out the type of this union variant."); + if (context->cycle_detected) { + ONYX_ERROR(variant->token->pos, Error_Critical, "Unable to figure out the type of this union variant."); } union_->pending_type_is_valid = 0; @@ -783,14 +772,15 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b u32 alignment = 0; u32 next_tag_value = 0; - AstEnumType* tag_enum_node = onyx_ast_node_new(alloc, sizeof(AstEnumType), Ast_Kind_Enum_Type); + assert(union_->tag_backing_type); + + AstEnumType* tag_enum_node = onyx_ast_node_new(context->ast_alloc, sizeof(AstEnumType), Ast_Kind_Enum_Type); tag_enum_node->token = union_->token; - tag_enum_node->name = bh_aprintf(alloc, "%s.tag_enum", union_->name); - tag_enum_node->backing_type = type_build_from_ast(alloc, union_->tag_backing_type); - bh_arr_new(alloc, tag_enum_node->values, bh_arr_length(union_->variants)); + tag_enum_node->name = bh_aprintf(context->ast_alloc, "%s.tag_enum", union_->name); + tag_enum_node->backing_type = type_build_from_ast(context, union_->tag_backing_type); + bh_arr_new(context->ast_alloc, tag_enum_node->values, bh_arr_length(union_->variants)); - void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* scope, Package* package); // HACK - add_entities_for_node(NULL, (AstNode *) tag_enum_node, union_->entity->scope, union_->entity->package); + add_entities_for_node(&context->entities, NULL, (AstNode *) tag_enum_node, union_->entity->scope, union_->entity->package); // // Create variant instances @@ -800,7 +790,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b u32 var_alignment = type_alignment_of(variant->type); if (var_alignment <= 0) { - onyx_report_error(variant->token->pos, Error_Critical, "Invalid variant type '%s', has alignment %d", type_get_name(variant->type), var_alignment); + ONYX_ERROR(variant->token->pos, Error_Critical, "Invalid variant type '%s', has alignment %d", type_get_name(context, variant->type), var_alignment); return NULL; } @@ -808,7 +798,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b token_toggle_end(variant->token); if (shgeti(u_type->Union.variants, variant->token->text) != -1) { - onyx_report_error(variant->token->pos, Error_Critical, "Duplicate union variant, '%s'.", variant->token->text); + ONYX_ERROR(variant->token->pos, Error_Critical, "Duplicate union variant, '%s'.", variant->token->text); token_toggle_end(variant->token); return NULL; } @@ -816,21 +806,33 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b u32 type_size = type_size_of(variant->type); size = bh_max(size, type_size); - UnionVariant* uv = bh_alloc_item(alloc, UnionVariant); - uv->name = bh_strdup(alloc, variant->token->text); + UnionVariant* uv = bh_alloc_item(context->ast_alloc, UnionVariant); + uv->name = bh_strdup(context->ast_alloc, variant->token->text); uv->token = variant->token; - uv->tag_value = next_tag_value++; uv->meta_tags = variant->meta_tags; uv->type = variant->type; + if (variant->explicit_tag_value) { + b32 success; + uv->tag_value = get_expression_integer_value(context, variant->explicit_tag_value, &success); + next_tag_value = uv->tag_value + 1; + + if (!success) { + ONYX_ERROR(variant->token->pos, Error_Critical, "Expected a compile-time known integer for explicit value of variant."); + return NULL; + } + } else { + uv->tag_value = next_tag_value++; + } + shput(u_type->Union.variants, variant->token->text, uv); token_toggle_end(variant->token); bh_arr_push(u_type->Union.variants_ordered, uv); - AstEnumValue *ev = onyx_ast_node_new(alloc, sizeof(AstEnumValue), Ast_Kind_Enum_Value); + AstEnumValue *ev = onyx_ast_node_new(context->ast_alloc, sizeof(AstEnumValue), Ast_Kind_Enum_Value); ev->token = uv->token; - ev->value = (AstTyped *) make_int_literal(alloc, uv->tag_value); + ev->value = (AstTyped *) make_int_literal(context, uv->tag_value); bh_arr_push(tag_enum_node->values, ev); } @@ -839,7 +841,7 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b u_type->Union.alignment = alignment; u_type->Union.size = size + alignment; // Add the size of the tag - u_type->Union.tag_type = type_build_from_ast(alloc, (AstType *) tag_enum_node); + u_type->Union.tag_type = type_build_from_ast(context, (AstType *) tag_enum_node); u_type->Union.status = SPS_Uses_Done; return u_type; @@ -853,18 +855,18 @@ static Type* type_build_from_ast_inner(bh_allocator alloc, AstType* type_node, b // If this function returns NULL, then the caller MUST yield because the type may still be constructed in the future. // If there was an error constructing the type, then this function will report that directly. -Type *type_build_from_ast(bh_allocator alloc, AstType* type_node) { - return type_build_from_ast_inner(alloc, type_node, 0); +Type *type_build_from_ast(Context *context, AstType* type_node) { + return type_build_from_ast_inner(context, type_node, 0); } // CLEANUP: This needs to be merged with the very similar code from up above. -Type* type_build_function_type(bh_allocator alloc, AstFunction* func) { +Type* type_build_function_type(Context *context, AstFunction* func) { u64 param_count = bh_arr_length(func->params); - Type* return_type = type_build_from_ast(alloc, func->return_type); + Type* return_type = type_build_from_ast(context, func->return_type); if (return_type == NULL) return NULL; - Type* func_type = type_create(Type_Kind_Function, alloc, param_count); + Type* func_type = type_create(context, Type_Kind_Function, param_count); func_type->Function.param_count = param_count; func_type->Function.needed_param_count = 0; func_type->Function.vararg_arg_pos = -1; @@ -884,25 +886,25 @@ Type* type_build_function_type(bh_allocator alloc, AstFunction* func) { } // CopyPaste from above in type_build_from_ast - char* name = (char *) type_get_unique_name(func_type); - if (func_type->Function.return_type != &type_auto_return) { - i32 index = shgeti(type_func_map, name); + char* name = (char *) type_get_unique_name(context, func_type); + if (func_type->Function.return_type != context->types.auto_return) { + i32 index = shgeti(context->types.func_map, name); if (index != -1) { - u64 id = type_func_map[index].value; - Type* existing_type = (Type *) bh_imap_get(&type_map, id); + u64 id = context->types.func_map[index].value; + Type* existing_type = (Type *) bh_imap_get(&context->types.type_map, id); // LEAK LEAK LEAK the func_type that is created return existing_type; } } - type_register(func_type); - shput(type_func_map, name, func_type->id); + type_register(context, func_type); + shput(context->types.func_map, name, func_type->id); return func_type; } -Type* type_build_compound_type(bh_allocator alloc, AstCompound* compound) { +Type* type_build_compound_type(Context *context, AstCompound* compound) { i64 expr_count = bh_arr_length(compound->exprs); fori (i, 0, expr_count) { if (compound->exprs[i]->type == NULL) return NULL; @@ -913,7 +915,7 @@ Type* type_build_compound_type(bh_allocator alloc, AstCompound* compound) { } } - Type* comp_type = type_create(Type_Kind_Compound, alloc, expr_count); + Type* comp_type = type_create(context, Type_Kind_Compound, expr_count); comp_type->Compound.size = 0; comp_type->Compound.count = expr_count; @@ -926,19 +928,19 @@ Type* type_build_compound_type(bh_allocator alloc, AstCompound* compound) { bh_align(comp_type->Compound.size, 4); comp_type->Compound.linear_members = NULL; - bh_arr_new(global_heap_allocator, comp_type->Compound.linear_members, comp_type->Compound.count); - build_linear_types_with_offset(comp_type, &comp_type->Compound.linear_members, 0); + bh_arr_new(context->gp_alloc, comp_type->Compound.linear_members, comp_type->Compound.count); + build_linear_types_with_offset(context, comp_type, &comp_type->Compound.linear_members, 0); - type_register(comp_type); + type_register(context, comp_type); return comp_type; } -Type* type_build_implicit_type_of_struct_literal(bh_allocator alloc, AstStructLiteral* lit, b32 is_query) { +Type* type_build_implicit_type_of_struct_literal(Context *context, AstStructLiteral* lit, b32 is_query) { if (lit->generated_inferred_type) { return lit->generated_inferred_type; } - Type* type = type_create(Type_Kind_Struct, alloc, 0); + Type* type = type_create(context, Type_Kind_Struct, 0); type->ast_type = NULL; type->Struct.name = NULL; type->Struct.mem_count = bh_arr_length(lit->args.named_values); @@ -946,11 +948,11 @@ Type* type_build_implicit_type_of_struct_literal(bh_allocator alloc, AstStructLi type->Struct.constructed_from = NULL; type->Struct.status = SPS_Start; type->Struct.poly_sln = NULL; - type_register(type); + type_register(context, type); type->Struct.memarr = NULL; sh_new_arena(type->Struct.members); - bh_arr_new(global_heap_allocator, type->Struct.memarr, type->Struct.mem_count); + bh_arr_new(context->gp_alloc, type->Struct.memarr, type->Struct.mem_count); u32 size = 0; u32 offset = 0; @@ -959,10 +961,10 @@ Type* type_build_implicit_type_of_struct_literal(bh_allocator alloc, AstStructLi bh_arr_each(AstNamedValue *, pnv, lit->args.named_values) { AstNamedValue *nv = *pnv; - Type* member_type = resolve_expression_type(nv->value); + Type* member_type = resolve_expression_type(context, nv->value); if (member_type == NULL) { if (!is_query) { - onyx_report_error(nv->value->token->pos, Error_Critical, "Unable to resolve type of this member when trying to construct an inferred type of the structure literal."); + ONYX_ERROR(nv->value->token->pos, Error_Critical, "Unable to resolve type of this member when trying to construct an inferred type of the structure literal."); } return NULL; @@ -984,11 +986,11 @@ Type* type_build_implicit_type_of_struct_literal(bh_allocator alloc, AstStructLi return NULL; } - StructMember *smem = bh_alloc_item(alloc, StructMember); + StructMember *smem = bh_alloc_item(context->ast_alloc, StructMember); smem->offset = offset; smem->type = member_type; smem->idx = idx; - smem->name = bh_strdup(alloc, nv->token->text); + smem->name = bh_strdup(context->ast_alloc, nv->token->text); smem->token = nv->token; smem->meta_tags = NULL; smem->included_through_use = 0; @@ -1021,148 +1023,156 @@ Type* type_build_implicit_type_of_struct_literal(bh_allocator alloc, AstStructLi return type; } -Type* type_make_pointer(bh_allocator alloc, Type* to) { +Type* type_make_pointer(Context *context, Type* to) { if (to == NULL) return NULL; - if (to == (Type *) &node_that_signals_failure) return to; + if (to == (Type *) &context->node_that_signals_failure) return to; assert(to->id > 0); - u64 ptr_id = bh_imap_get(&type_pointer_map, to->id); + u64 ptr_id = bh_imap_get(&context->types.pointer_map, to->id); if (ptr_id > 0) { - Type* ptr_type = (Type *) bh_imap_get(&type_map, ptr_id); + Type* ptr_type = (Type *) bh_imap_get(&context->types.type_map, ptr_id); return ptr_type; } else { - Type* ptr_type = type_create(Type_Kind_Pointer, alloc, 0); + Type* ptr_type = type_create(context, Type_Kind_Pointer, 0); ptr_type->Pointer.base.flags |= Basic_Flag_Pointer; ptr_type->Pointer.base.size = POINTER_SIZE; ptr_type->Pointer.elem = to; - type_register(ptr_type); - bh_imap_put(&type_pointer_map, to->id, ptr_type->id); + type_register(context, ptr_type); + bh_imap_put(&context->types.pointer_map, to->id, ptr_type->id); return ptr_type; } } -Type* type_make_multi_pointer(bh_allocator alloc, Type* to) { +Type* type_make_multi_pointer(Context *context, Type* to) { if (to == NULL) return NULL; - if (to == (Type *) &node_that_signals_failure) return to; + if (to == (Type *) &context->node_that_signals_failure) return to; assert(to->id > 0); - u64 ptr_id = bh_imap_get(&type_multi_pointer_map, to->id); + u64 ptr_id = bh_imap_get(&context->types.multi_pointer_map, to->id); if (ptr_id > 0) { - Type* ptr_type = (Type *) bh_imap_get(&type_map, ptr_id); + Type* ptr_type = (Type *) bh_imap_get(&context->types.type_map, ptr_id); return ptr_type; } else { - Type* ptr_type = type_create(Type_Kind_MultiPointer, alloc, 0); + Type* ptr_type = type_create(context, Type_Kind_MultiPointer, 0); ptr_type->MultiPointer.base.flags |= Basic_Flag_Pointer; ptr_type->MultiPointer.base.flags |= Basic_Flag_Multi_Pointer; ptr_type->MultiPointer.base.size = POINTER_SIZE; ptr_type->MultiPointer.elem = to; - type_register(ptr_type); - bh_imap_put(&type_multi_pointer_map, to->id, ptr_type->id); + type_register(context, ptr_type); + bh_imap_put(&context->types.multi_pointer_map, to->id, ptr_type->id); return ptr_type; } } -Type* type_make_array(bh_allocator alloc, Type* to, u32 count) { +Type* type_make_array(Context *context, Type* to, u32 count) { if (to == NULL) return NULL; - if (to == (Type *) &node_that_signals_failure) return to; + if (to == (Type *) &context->node_that_signals_failure) return to; assert(to->id > 0); u64 key = ((((u64) to->id) << 32) | (u64) count); - u64 array_id = bh_imap_get(&type_array_map, key); + u64 array_id = bh_imap_get(&context->types.array_map, key); if (array_id > 0) { - Type* array_type = (Type *) bh_imap_get(&type_map, array_id); + Type* array_type = (Type *) bh_imap_get(&context->types.type_map, array_id); return array_type; } else { - Type* arr_type = type_create(Type_Kind_Array, alloc, 0); + Type* arr_type = type_create(context, Type_Kind_Array, 0); arr_type->Array.count = count; arr_type->Array.elem = to; arr_type->Array.size = count * type_size_of(to); - type_register(arr_type); - bh_imap_put(&type_array_map, key, arr_type->id); + type_register(context, arr_type); + bh_imap_put(&context->types.array_map, key, arr_type->id); return arr_type; } } -Type* type_make_slice(bh_allocator alloc, Type* of) { +Type* type_make_slice(Context *context, Type* of) { if (of == NULL) return NULL; - if (of == (Type *) &node_that_signals_failure) return of; + if (of == (Type *) &context->node_that_signals_failure) return of; assert(of->id > 0); - u64 slice_id = bh_imap_get(&type_slice_map, of->id); + u64 slice_id = bh_imap_get(&context->types.slice_map, of->id); if (slice_id > 0) { - Type* slice_type = (Type *) bh_imap_get(&type_map, slice_id); + Type* slice_type = (Type *) bh_imap_get(&context->types.type_map, slice_id); return slice_type; } else { - Type* slice_type = type_create(Type_Kind_Slice, alloc, 0); - type_register(slice_type); - bh_imap_put(&type_slice_map, of->id, slice_type->id); + Type* slice_type = type_create(context, Type_Kind_Slice, 0); + type_register(context, slice_type); + bh_imap_put(&context->types.slice_map, of->id, slice_type->id); - type_make_multi_pointer(alloc, of); + type_make_multi_pointer(context, of); slice_type->Slice.elem = of; + AstPolyStructType* pslice_type = (AstPolyStructType *) context->builtins.slice_type; + OnyxFilePos pos = { 0 }; + slice_type->Slice.scope = scope_create(context, pslice_type->scope, pos); + return slice_type; } } -Type* type_make_dynarray(bh_allocator alloc, Type* of) { +Type* type_make_dynarray(Context *context, Type* of) { if (of == NULL) return NULL; - if (of == (Type *) &node_that_signals_failure) return of; + if (of == (Type *) &context->node_that_signals_failure) return of; assert(of->id > 0); - u64 dynarr_id = bh_imap_get(&type_dynarr_map, of->id); + u64 dynarr_id = bh_imap_get(&context->types.dynarr_map, of->id); if (dynarr_id > 0) { - Type* dynarr = (Type *) bh_imap_get(&type_map, dynarr_id); + Type* dynarr = (Type *) bh_imap_get(&context->types.type_map, dynarr_id); return dynarr; } else { - Type* dynarr = type_create(Type_Kind_DynArray, alloc, 0); - type_register(dynarr); - bh_imap_put(&type_dynarr_map, of->id, dynarr->id); + Type* dynarr = type_create(context, Type_Kind_DynArray, 0); + type_register(context, dynarr); + bh_imap_put(&context->types.dynarr_map, of->id, dynarr->id); - type_make_multi_pointer(alloc, of); + type_make_multi_pointer(context, of); dynarr->DynArray.elem = of; + AstPolyStructType* dynarr_type = (AstPolyStructType *) context->builtins.array_type; + OnyxFilePos pos = { 0 }; + dynarr->DynArray.scope = scope_create(context, dynarr_type->scope, pos); + return dynarr; } } -Type* type_make_varargs(bh_allocator alloc, Type* of) { +Type* type_make_varargs(Context *context, Type* of) { if (of == NULL) return NULL; - if (of == (Type *) &node_that_signals_failure) return of; + if (of == (Type *) &context->node_that_signals_failure) return of; assert(of->id > 0); - u64 vararg_id = bh_imap_get(&type_vararg_map, of->id); + u64 vararg_id = bh_imap_get(&context->types.vararg_map, of->id); if (vararg_id > 0) { - Type* va_type = (Type *) bh_imap_get(&type_map, vararg_id); + Type* va_type = (Type *) bh_imap_get(&context->types.type_map, vararg_id); return va_type; } else { - Type* va_type = type_create(Type_Kind_VarArgs, alloc, 0); - type_register(va_type); - bh_imap_put(&type_vararg_map, of->id, va_type->id); + Type* va_type = type_create(context, Type_Kind_VarArgs, 0); + type_register(context, va_type); + bh_imap_put(&context->types.vararg_map, of->id, va_type->id); - type_make_multi_pointer(alloc, of); + type_make_multi_pointer(context, of); va_type->VarArgs.elem = of; return va_type; } } -void build_linear_types_with_offset(Type* type, bh_arr(TypeWithOffset)* pdest, u32 offset) { +void build_linear_types_with_offset(Context *context, Type* type, bh_arr(TypeWithOffset)* pdest, u32 offset) { if (type->kind == Type_Kind_Compound) { u32 elem_offset = 0; fori (i, 0, type->Compound.count) { - build_linear_types_with_offset(type->Compound.types[i], pdest, offset + elem_offset); + build_linear_types_with_offset(context, type->Compound.types[i], pdest, offset + elem_offset); elem_offset += bh_max(type_size_of(type->Compound.types[i]), 4); } @@ -1170,8 +1180,8 @@ void build_linear_types_with_offset(Type* type, bh_arr(TypeWithOffset)* pdest, u u32 mem_count = type_structlike_mem_count(type); StructMember smem = { 0 }; fori (i, 0, mem_count) { - type_lookup_member_by_idx(type, i, &smem); - build_linear_types_with_offset(smem.type, pdest, offset + smem.offset); + type_lookup_member_by_idx(context, type, i, &smem); + build_linear_types_with_offset(context, smem.type, pdest, offset + smem.offset); } } else { @@ -1186,7 +1196,7 @@ void build_linear_types_with_offset(Type* type, bh_arr(TypeWithOffset)* pdest, u } } -b32 type_struct_member_apply_use(bh_allocator alloc, Type *s_type, StructMember *smem) { +b32 type_struct_member_apply_use(Context *context, Type *s_type, StructMember *smem) { Type* used_type = smem->type; b32 type_is_pointer = 0; @@ -1196,7 +1206,7 @@ b32 type_struct_member_apply_use(bh_allocator alloc, Type *s_type, StructMember } if (used_type->kind != Type_Kind_Struct) { - onyx_report_error(smem->token->pos, Error_Critical, "Can only use things of structure, or pointer to structure type."); + ONYX_ERROR(smem->token->pos, Error_Critical, "Can only use things of structure, or pointer to structure type."); return 0; } @@ -1213,11 +1223,11 @@ b32 type_struct_member_apply_use(bh_allocator alloc, Type *s_type, StructMember } if (shgeti(s_type->Struct.members, nsmem->name) != -1) { - onyx_report_error(smem->token->pos, Error_Critical, "Used name '%s' conflicts with existing struct member.", nsmem->name); + ONYX_ERROR(smem->token->pos, Error_Critical, "Used name '%s' conflicts with existing struct member.", nsmem->name); return 0; } - StructMember* new_smem = bh_alloc_item(alloc, StructMember); + StructMember* new_smem = bh_alloc_item(context->ast_alloc, StructMember); new_smem->type = nsmem->type; new_smem->name = nsmem->name; new_smem->meta_tags = nsmem->meta_tags; @@ -1243,33 +1253,33 @@ b32 type_struct_member_apply_use(bh_allocator alloc, Type *s_type, StructMember return 1; } -const char* type_get_unique_name(Type* type) { +const char* type_get_unique_name(Context *context, Type* type) { if (type == NULL) return "unknown"; switch (type->kind) { case Type_Kind_Basic: return type->Basic.name; - case Type_Kind_Pointer: return bh_aprintf(global_scratch_allocator, "&%s", type_get_unique_name(type->Pointer.elem)); - case Type_Kind_MultiPointer: return bh_aprintf(global_scratch_allocator, "[&] %s", type_get_unique_name(type->Pointer.elem)); - case Type_Kind_Array: return bh_aprintf(global_scratch_allocator, "[%d] %s", type->Array.count, type_get_unique_name(type->Array.elem)); + case Type_Kind_Pointer: return bh_aprintf(context->scratch_alloc, "&%s", type_get_unique_name(context, type->Pointer.elem)); + case Type_Kind_MultiPointer: return bh_aprintf(context->scratch_alloc, "[&] %s", type_get_unique_name(context, type->Pointer.elem)); + case Type_Kind_Array: return bh_aprintf(context->scratch_alloc, "[%d] %s", type->Array.count, type_get_unique_name(context, type->Array.elem)); case Type_Kind_Struct: if (type->Struct.name) - return bh_aprintf(global_scratch_allocator, "%s@%l", type->Struct.name, type->id); + return bh_aprintf(context->scratch_alloc, "%s@%l", type->Struct.name, type->id); else - return bh_aprintf(global_scratch_allocator, "%s@%l", "", type->id); + return bh_aprintf(context->scratch_alloc, "%s@%l", "", type->id); case Type_Kind_Enum: if (type->Enum.name) - return bh_aprintf(global_scratch_allocator, "%s@%l", type->Enum.name, type->id); + return bh_aprintf(context->scratch_alloc, "%s@%l", type->Enum.name, type->id); else - return bh_aprintf(global_scratch_allocator, "%s@%l", "", type->id); + return bh_aprintf(context->scratch_alloc, "%s@%l", "", type->id); case Type_Kind_Union: if (type->Union.name) - return bh_aprintf(global_scratch_allocator, "%s@%l", type->Union.name, type->id); + return bh_aprintf(context->scratch_alloc, "%s@%l", type->Union.name, type->id); else - return bh_aprintf(global_scratch_allocator, "%s@%l", "", type->id); + return bh_aprintf(context->scratch_alloc, "%s@%l", "", type->id); - case Type_Kind_Slice: return bh_aprintf(global_scratch_allocator, "[] %s", type_get_unique_name(type->Slice.elem)); - case Type_Kind_VarArgs: return bh_aprintf(global_scratch_allocator, "..%s", type_get_unique_name(type->VarArgs.elem)); - case Type_Kind_DynArray: return bh_aprintf(global_scratch_allocator, "[..] %s", type_get_unique_name(type->DynArray.elem)); + case Type_Kind_Slice: return bh_aprintf(context->scratch_alloc, "[] %s", type_get_unique_name(context, type->Slice.elem)); + case Type_Kind_VarArgs: return bh_aprintf(context->scratch_alloc, "..%s", type_get_unique_name(context, type->VarArgs.elem)); + case Type_Kind_DynArray: return bh_aprintf(context->scratch_alloc, "[..] %s", type_get_unique_name(context, type->DynArray.elem)); case Type_Kind_Function: { char buf[1024]; @@ -1277,7 +1287,7 @@ const char* type_get_unique_name(Type* type) { strncat(buf, "(", 1023); fori (i, 0, type->Function.param_count) { - strncat(buf, type_get_unique_name(type->Function.params[i]), 1023); + strncat(buf, type_get_unique_name(context, type->Function.params[i]), 1023); if (i >= type->Function.needed_param_count) strncat(buf, "?", 1023); @@ -1287,9 +1297,9 @@ const char* type_get_unique_name(Type* type) { } strncat(buf, ") -> ", 1023); - strncat(buf, type_get_unique_name(type->Function.return_type), 1023); + strncat(buf, type_get_unique_name(context, type->Function.return_type), 1023); - return bh_aprintf(global_scratch_allocator, "%s", buf); + return bh_aprintf(context->scratch_alloc, "%s", buf); } case Type_Kind_Compound: { @@ -1298,31 +1308,31 @@ const char* type_get_unique_name(Type* type) { strncat(buf, "(", 1023); fori (i, 0, type->Compound.count) { - strncat(buf, type_get_unique_name(type->Compound.types[i]), 1023); + strncat(buf, type_get_unique_name(context, type->Compound.types[i]), 1023); if (i != type->Compound.count - 1) strncat(buf, ", ", 1023); } strncat(buf, ")", 1023); - return bh_aprintf(global_scratch_allocator, "%s", buf); + return bh_aprintf(context->scratch_alloc, "%s", buf); } case Type_Kind_Distinct: { - return bh_aprintf(global_scratch_allocator, "%s@%l", type->Distinct.name, type->id); + return bh_aprintf(context->scratch_alloc, "%s@%l", type->Distinct.name, type->id); } default: return "unknown (not null)"; } } -const char* type_get_name(Type* type) { +const char* type_get_name(Context *context, Type* type) { if (type == NULL) return "unknown"; switch (type->kind) { case Type_Kind_Basic: return type->Basic.name; - case Type_Kind_Pointer: return bh_aprintf(global_scratch_allocator, "&%s", type_get_name(type->Pointer.elem)); - case Type_Kind_MultiPointer: return bh_aprintf(global_scratch_allocator, "[&] %s", type_get_name(type->Pointer.elem)); - case Type_Kind_Array: return bh_aprintf(global_scratch_allocator, "[%d] %s", type->Array.count, type_get_name(type->Array.elem)); + case Type_Kind_Pointer: return bh_aprintf(context->scratch_alloc, "&%s", type_get_name(context, type->Pointer.elem)); + case Type_Kind_MultiPointer: return bh_aprintf(context->scratch_alloc, "[&] %s", type_get_name(context, type->Pointer.elem)); + case Type_Kind_Array: return bh_aprintf(context->scratch_alloc, "[%d] %s", type->Array.count, type_get_name(context, type->Array.elem)); case Type_Kind_PolyStruct: return type->PolyStruct.name; @@ -1348,9 +1358,9 @@ const char* type_get_name(Type* type) { else return ""; - case Type_Kind_Slice: return bh_aprintf(global_scratch_allocator, "[] %s", type_get_name(type->Slice.elem)); - case Type_Kind_VarArgs: return bh_aprintf(global_scratch_allocator, "..%s", type_get_name(type->VarArgs.elem)); - case Type_Kind_DynArray: return bh_aprintf(global_scratch_allocator, "[..] %s", type_get_name(type->DynArray.elem)); + case Type_Kind_Slice: return bh_aprintf(context->scratch_alloc, "[] %s", type_get_name(context, type->Slice.elem)); + case Type_Kind_VarArgs: return bh_aprintf(context->scratch_alloc, "..%s", type_get_name(context, type->VarArgs.elem)); + case Type_Kind_DynArray: return bh_aprintf(context->scratch_alloc, "[..] %s", type_get_name(context, type->DynArray.elem)); case Type_Kind_Function: { char buf[512]; @@ -1358,15 +1368,15 @@ const char* type_get_name(Type* type) { strncat(buf, "(", 511); fori (i, 0, type->Function.param_count) { - strncat(buf, type_get_name(type->Function.params[i]), 511); + strncat(buf, type_get_name(context, type->Function.params[i]), 511); if (i != type->Function.param_count - 1) strncat(buf, ", ", 511); } strncat(buf, ") -> ", 511); - strncat(buf, type_get_name(type->Function.return_type), 511); + strncat(buf, type_get_name(context, type->Function.return_type), 511); - return bh_aprintf(global_scratch_allocator, "%s", buf); + return bh_aprintf(context->scratch_alloc, "%s", buf); } case Type_Kind_Compound: { @@ -1375,17 +1385,17 @@ const char* type_get_name(Type* type) { strncat(buf, "(", 511); fori (i, 0, type->Compound.count) { - strncat(buf, type_get_name(type->Compound.types[i]), 511); + strncat(buf, type_get_name(context, type->Compound.types[i]), 511); if (i != type->Compound.count - 1) strncat(buf, ", ", 511); } strncat(buf, ")", 511); - return bh_aprintf(global_scratch_allocator, "%s", buf); + return bh_aprintf(context->scratch_alloc, "%s", buf); } case Type_Kind_Distinct: { - return bh_aprintf(global_scratch_allocator, "%s", type->Distinct.name); + return bh_aprintf(context->scratch_alloc, "%s", type->Distinct.name); } default: return "unknown"; @@ -1426,31 +1436,31 @@ b32 type_is_ready_for_lookup(Type* type) { } static const StructMember slice_members[] = { - { 0, 0, NULL, "data", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE, 1, &basic_types[Basic_Kind_U32], "count", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE, 1, &basic_types[Basic_Kind_U32], "size", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE, 1, &basic_types[Basic_Kind_U32], "length", NULL, NULL, -1, 0, 0 }, + { 0, 0, NULL, "data", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE, 1, NULL, "count", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE, 1, NULL, "size", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE, 1, NULL, "length", NULL, NULL, -1, 0, 0 }, }; static const StructMember array_members[] = { - { 0, 0, NULL, "data", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE, 1, &basic_types[Basic_Kind_U32], "count", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE + 4, 2, &basic_types[Basic_Kind_U32], "capacity", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE + 8, 3, NULL, "allocator", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE, 1, &basic_types[Basic_Kind_U32], "size", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE, 1, &basic_types[Basic_Kind_U32], "length", NULL, NULL, -1, 0, 0 }, + { 0, 0, NULL, "data", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE, 1, NULL, "count", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE + 4, 2, NULL, "capacity", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE + 8, 3, NULL, "allocator", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE, 1, NULL, "size", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE, 1, NULL, "length", NULL, NULL, -1, 0, 0 }, }; static const StructMember func_members[] = { - { 0, 0, &basic_types[Basic_Kind_U32], "__funcidx", NULL, NULL, -1, 0, 0 }, - { POINTER_SIZE, 1, &basic_types[Basic_Kind_Rawptr], "closure", NULL, NULL, -1, 0, 0 }, + { 0, 0, NULL, "__funcidx", NULL, NULL, -1, 0, 0 }, + { POINTER_SIZE, 1, NULL, "closure", NULL, NULL, -1, 0, 0 }, }; static const StructMember union_members[] = { { 0, 0, NULL, "tag", NULL, NULL, -1, 0, 0 }, }; -b32 type_lookup_member(Type* type, char* member, StructMember* smem) { +b32 type_lookup_member(Context *context, Type* type, char* member, StructMember* smem) { if (type->kind == Type_Kind_Pointer) type = type->Pointer.elem; switch (type->kind) { @@ -1468,7 +1478,8 @@ b32 type_lookup_member(Type* type, char* member, StructMember* smem) { fori (i, 0, (i64) (sizeof(slice_members) / sizeof(StructMember))) { if (strcmp(slice_members[i].name, member) == 0) { *smem = slice_members[i]; - if (smem->idx == 0) smem->type = type_make_multi_pointer(context.ast_alloc, type->Slice.elem); + if (smem->idx == 0) smem->type = type_make_multi_pointer(context, type->Slice.elem); + else smem->type = context->types.basic[Basic_Kind_U32]; return 1; } @@ -1480,8 +1491,9 @@ b32 type_lookup_member(Type* type, char* member, StructMember* smem) { fori (i, 0, (i64) (sizeof(array_members) / sizeof(StructMember))) { if (strcmp(array_members[i].name, member) == 0) { *smem = array_members[i]; - if (smem->idx == 0) smem->type = type_make_multi_pointer(context.ast_alloc, type->DynArray.elem); - if (smem->idx == 3) smem->type = type_build_from_ast(context.ast_alloc, builtin_allocator_type); + if (smem->idx == 0) smem->type = type_make_multi_pointer(context, type->DynArray.elem); + else if (smem->idx == 3) smem->type = type_build_from_ast(context, context->builtins.allocator_type); + else smem->type = context->types.basic[Basic_Kind_U32]; return 1; } @@ -1493,6 +1505,9 @@ b32 type_lookup_member(Type* type, char* member, StructMember* smem) { fori (i, 0, (i64) (sizeof(func_members) / sizeof(StructMember))) { if (strcmp(func_members[i].name, member) == 0) { *smem = func_members[i]; + if (smem->idx == 0) smem->type = context->types.basic[Basic_Kind_U32]; + if (smem->idx == 1) smem->type = context->types.basic[Basic_Kind_Rawptr]; + return 1; } } @@ -1512,7 +1527,7 @@ b32 type_lookup_member(Type* type, char* member, StructMember* smem) { } } -b32 type_lookup_member_by_idx(Type* type, i32 idx, StructMember* smem) { +b32 type_lookup_member_by_idx(Context *context, Type* type, i32 idx, StructMember* smem) { while (type->kind == Type_Kind_Distinct) type = type->Distinct.base_type; if (type->kind == Type_Kind_Pointer) type = type->Pointer.elem; @@ -1533,7 +1548,8 @@ b32 type_lookup_member_by_idx(Type* type, i32 idx, StructMember* smem) { if (idx > 2) return 0; *smem = slice_members[idx]; - if (smem->idx == 0) smem->type = type_make_multi_pointer(context.ast_alloc, type->Slice.elem); + if (smem->idx == 0) smem->type = type_make_multi_pointer(context, type->Slice.elem); + else smem->type = context->types.basic[Basic_Kind_U32]; return 1; } @@ -1542,8 +1558,9 @@ b32 type_lookup_member_by_idx(Type* type, i32 idx, StructMember* smem) { if (idx > 4) return 0; *smem = array_members[idx]; - if (idx == 0) smem->type = type_make_multi_pointer(context.ast_alloc, type->DynArray.elem); - if (idx == 3) smem->type = type_build_from_ast(context.ast_alloc, builtin_allocator_type); + if (idx == 0) smem->type = type_make_multi_pointer(context, type->DynArray.elem); + else if (idx == 3) smem->type = type_build_from_ast(context, context->builtins.allocator_type); + else smem->type = context->types.basic[Basic_Kind_U32]; return 1; } @@ -1552,6 +1569,9 @@ b32 type_lookup_member_by_idx(Type* type, i32 idx, StructMember* smem) { if (idx > 1) return 0; *smem = func_members[idx]; + if (idx == 0) smem->type = context->types.basic[Basic_Kind_U32]; + if (idx == 1) smem->type = context->types.basic[Basic_Kind_Rawptr]; + return 1; } @@ -1587,18 +1607,18 @@ i32 type_linear_member_count(Type* type) { } } -b32 type_linear_member_lookup(Type* type, i32 idx, TypeWithOffset* two) { +b32 type_linear_member_lookup(Context *context, Type* type, i32 idx, TypeWithOffset* two) { while (type->kind == Type_Kind_Distinct) type = type->Distinct.base_type; switch (type->kind) { case Type_Kind_Slice: case Type_Kind_VarArgs: { if (idx == 0) { - two->type = type_make_multi_pointer(context.ast_alloc, type->Slice.elem); + two->type = type_make_multi_pointer(context, type->Slice.elem); two->offset = 0; } if (idx == 1) { - two->type = &basic_types[Basic_Kind_U32]; + two->type = context->types.basic[Basic_Kind_U32]; two->offset = POINTER_SIZE; } @@ -1606,20 +1626,20 @@ b32 type_linear_member_lookup(Type* type, i32 idx, TypeWithOffset* two) { } case Type_Kind_DynArray: { if (idx == 0) { - two->type = type_make_multi_pointer(context.ast_alloc, type->DynArray.elem); + two->type = type_make_multi_pointer(context, type->DynArray.elem); two->offset = 0; } if (idx == 1) { - two->type = &basic_types[Basic_Kind_U32]; + two->type = context->types.basic[Basic_Kind_U32]; two->offset = POINTER_SIZE; } if (idx == 2) { - two->type = &basic_types[Basic_Kind_U32]; + two->type = context->types.basic[Basic_Kind_U32]; two->offset = POINTER_SIZE + 4; } if (idx == 3 || idx == 4) { - Type* allocator_type = type_build_from_ast(context.ast_alloc, builtin_allocator_type); - type_linear_member_lookup(allocator_type, idx - 3, two); + Type* allocator_type = type_build_from_ast(context, context->builtins.allocator_type); + type_linear_member_lookup(context, allocator_type, idx - 3, two); two->offset += POINTER_SIZE + 8; } @@ -1629,11 +1649,11 @@ b32 type_linear_member_lookup(Type* type, i32 idx, TypeWithOffset* two) { case Type_Kind_Function: if (idx == 0) { - two->type = &basic_types[Basic_Kind_U32]; + two->type = context->types.basic[Basic_Kind_U32]; two->offset = 0; } if (idx == 1) { - two->type = &basic_types[Basic_Kind_Rawptr]; + two->type = context->types.basic[Basic_Kind_Rawptr]; two->offset = POINTER_SIZE; } return 1; @@ -1807,6 +1827,7 @@ b32 type_is_structlike_strict(Type* type) { b32 type_should_be_passed_like_a_struct(Type *type) { if (type == NULL) return 0; if (type->kind == Type_Kind_Struct) return 1; + if (type->kind == Type_Kind_Array) return 1; if (type->kind == Type_Kind_Slice) return 1; if (type->kind == Type_Kind_DynArray) return 1; if (type->kind == Type_Kind_Function) return 1; @@ -1849,6 +1870,7 @@ b32 type_is_sl_constructable(Type* type) { case Type_Kind_DynArray: return 1; case Type_Kind_Function: return 1; case Type_Kind_Union: return 1; + case Type_Kind_Array: return 1; default: return 0; } } diff --git a/compiler/src/utils.c b/compiler/src/utils.c index 002a0c93f..7f04ee805 100644 --- a/compiler/src/utils.c +++ b/compiler/src/utils.c @@ -1,6 +1,3 @@ -#define BH_INTERNAL_ALLOCATOR (global_heap_allocator) -#define BH_DEBUG - #include "utils.h" #include "lex.h" #include "astnodes.h" @@ -10,100 +7,94 @@ #include "errors.h" #include "doc.h" -bh_scratch global_scratch; -bh_allocator global_scratch_allocator; - -bh_managed_heap global_heap; -bh_allocator global_heap_allocator; - // // Program info and packages // -Package* package_lookup(char* package_name) { - i32 index = shgeti(context.packages, package_name); +Package* package_lookup(Context *context, char* package_name) { + i32 index = shgeti(context->packages, package_name); if (index != -1) { - return context.packages[index].value; + return context->packages[index].value; } else { return NULL; } } -Package* package_lookup_or_create(char* package_name, Scope* parent_scope, bh_allocator alloc, OnyxFilePos pos) { - i32 index = shgeti(context.packages, package_name); +Package* package_lookup_or_create(Context *context, char* package_name, Scope* parent_scope, OnyxFilePos pos) { + i32 index = shgeti(context->packages, package_name); if (index != -1) { - return context.packages[index].value; + return context->packages[index].value; } else { - Package* package = bh_alloc_item(alloc, Package); + Package* package = bh_alloc_item(context->ast_alloc, Package); - char* pac_name = bh_alloc_array(alloc, char, strlen(package_name) + 1); + char* pac_name = bh_alloc_array(context->ast_alloc, char, strlen(package_name) + 1); memcpy(pac_name, package_name, strlen(package_name) + 1); pac_name[strlen(package_name)] = '\0'; package->name = pac_name; package->unqualified_name = pac_name + bh_str_last_index_of(pac_name, '.'); package->use_package_entities = NULL; - package->id = ++context.next_package_id; + package->id = ++context->next_package_id; package->parent_id = -1; - bh_arr_new(global_heap_allocator, package->sub_packages, 4); + bh_arr_new(context->gp_alloc, package->sub_packages, 4); if (!strcmp(pac_name, "builtin")) { - package->private_scope = scope_create(alloc, context.global_scope, pos); - package->scope = context.global_scope; + package->private_scope = scope_create(context, context->global_scope, pos); + package->scope = context->global_scope; } else { - package->scope = scope_create(alloc, parent_scope, pos); - package->private_scope = scope_create(alloc, package->scope, pos); + package->scope = scope_create(context, parent_scope, pos); + package->private_scope = scope_create(context, package->scope, pos); } - shput(context.packages, pac_name, package); + shput(context->packages, pac_name, package); // The builtin package is special. The 'builtin' symbol will be // accessible even if you do not `use builtin`. if (!strcmp(pac_name, "builtin")) { - AstPackage* package_node = onyx_ast_node_new(alloc, sizeof(AstPackage), Ast_Kind_Package); + AstPackage* package_node = onyx_ast_node_new(context->ast_alloc, sizeof(AstPackage), Ast_Kind_Package); package_node->package_name = package->name; package_node->package = package; - package_node->type_node = builtin_package_id_type; + package_node->type_node = context->builtins.package_id_type; package_node->flags |= Ast_Flag_Comptime; - symbol_raw_introduce(context.global_scope, pac_name, pos, (AstNode *) package_node); + symbol_raw_introduce(context, context->global_scope, pac_name, pos, (AstNode *) package_node); } return package; } } -void package_track_use_package(Package* package, Entity* entity) { +void package_track_use_package(Context *context, Package* package, Entity* entity) { assert(entity); if (package->use_package_entities == NULL) { - bh_arr_new(global_heap_allocator, package->use_package_entities, 4); + bh_arr_new(context->gp_alloc, package->use_package_entities, 4); } bh_arr_push(package->use_package_entities, entity); } -void package_reinsert_use_packages(Package* package) { +void package_reinsert_use_packages(Context *context, Package* package) { if (!package) return; if (!package->use_package_entities) return; bh_arr_each(Entity *, use_package, package->use_package_entities) { - (*use_package)->state = Entity_State_Resolve_Symbols; + (*use_package)->state = Entity_State_Check_Types; (*use_package)->macro_attempts = 0; - entity_heap_insert_existing(&context.entities, *use_package); + entity_heap_insert_existing(&context->entities, *use_package); } bh_arr_set_length(package->use_package_entities, 0); } -void package_mark_as_used(Package* package) { +void package_mark_as_used(Context *context, Package* package) { if (!package) return; if (package->is_included_somewhere) return; package->is_included_somewhere = 1; bh_arr_each(Entity *, pent, package->buffered_entities) { - entity_heap_insert_existing(&context.entities, *pent); + entity_heap_insert_existing(&context->entities, *pent); } bh_arr_clear(package->buffered_entities); @@ -115,44 +106,50 @@ void package_mark_as_used(Package* package) { // Scoping // -Scope* scope_create(bh_allocator a, Scope* parent, OnyxFilePos created_at) { - Scope* scope = bh_alloc_item(a, Scope); - scope->id = ++context.next_scope_id; +Scope* scope_create(Context *context, Scope* parent, OnyxFilePos created_at) { + Scope* scope = bh_alloc_item(context->ast_alloc, Scope); + bh_arr_push(context->scopes, scope); + + scope->id = ++context->next_scope_id; scope->parent = parent; scope->created_at = created_at; scope->name = NULL; + // This will be set on the first symbol insertion. scope->symbols = NULL; - sh_new_arena(scope->symbols); return scope; } -void scope_include(Scope* target, Scope* source, OnyxFilePos pos) { +void scope_include(Context *context, Scope* target, Scope* source, OnyxFilePos pos) { fori (i, 0, shlen(source->symbols)) { - symbol_raw_introduce(target, source->symbols[i].key, pos, source->symbols[i].value); + symbol_raw_introduce(context, target, source->symbols[i].key, pos, source->symbols[i].value); } } -b32 symbol_introduce(Scope* scope, OnyxToken* tkn, AstNode* symbol) { +b32 symbol_introduce(Context *context, Scope* scope, OnyxToken* tkn, AstNode* symbol) { token_toggle_end(tkn); - b32 ret = symbol_raw_introduce(scope, tkn->text, tkn->pos, symbol); + b32 ret = symbol_raw_introduce(context, scope, tkn->text, tkn->pos, symbol); token_toggle_end(tkn); return ret; } -b32 symbol_raw_introduce(Scope* scope, char* name, OnyxFilePos pos, AstNode* symbol) { +b32 symbol_raw_introduce(Context *context, Scope* scope, char* name, OnyxFilePos pos, AstNode* symbol) { + if (!scope->symbols) { + sh_new_arena(scope->symbols); + } + if (strcmp(name, "_")) { i32 index = shgeti(scope->symbols, name); if (index != -1) { AstNode *node = scope->symbols[index].value; if (node != symbol) { - onyx_report_error(pos, Error_Critical, "Redeclaration of symbol '%s'.", name); + ONYX_ERROR(pos, Error_Critical, "Redeclaration of symbol '%s'.", name); if (node->token) { - onyx_report_error(node->token->pos, Error_Critical, "Previous declaration was here."); + ONYX_ERROR(node->token->pos, Error_Critical, "Previous declaration was here."); } return 0; @@ -162,16 +159,19 @@ b32 symbol_raw_introduce(Scope* scope, char* name, OnyxFilePos pos, AstNode* sym } shput(scope->symbols, name, symbol); - track_declaration_for_symbol_info(pos, symbol); + track_declaration_for_symbol_info(context, pos, symbol); return 1; } -void symbol_builtin_introduce(Scope* scope, char* sym, AstNode *node) { +void symbol_builtin_introduce(Context *context, Scope* scope, char* sym, AstNode *node) { + if (!scope->symbols) sh_new_arena(scope->symbols); + shput(scope->symbols, sym, node); } -void symbol_subpackage_introduce(Package* parent, char* sym, AstPackage* subpackage) { +void symbol_subpackage_introduce(Context *context, Package* parent, char* sym, AstPackage* subpackage) { Scope *scope = parent->scope; + if (!scope->symbols) sh_new_arena(scope->symbols); i32 index = shgeti(scope->symbols, sym); if (index != -1) { @@ -189,7 +189,7 @@ void symbol_subpackage_introduce(Package* parent, char* sym, AstPackage* subpack } } -AstNode* symbol_raw_resolve_no_ascend(Scope* scope, char* sym) { +AstNode* symbol_raw_resolve_no_ascend(Context *context, Scope* scope, char* sym) { if (!scope || !scope->symbols) return NULL; i32 index = shgeti(scope->symbols, sym); @@ -204,12 +204,28 @@ AstNode* symbol_raw_resolve_no_ascend(Scope* scope, char* sym) { return NULL; } -AstNode* symbol_raw_resolve(Scope* start_scope, char* sym) { +AstNode* symbol_raw_resolve_limited(Context *context, Scope* start_scope, char* sym, i32 limit) { + Scope* scope = start_scope; + AstNode *res = NULL; + + while (scope != NULL && limit-- > 0) { + res = symbol_raw_resolve_no_ascend(context, scope, sym); + if (res) { + return res; + } + + scope = scope->parent; + } + + return NULL; +} + +AstNode* symbol_raw_resolve(Context *context, Scope* start_scope, char* sym) { Scope* scope = start_scope; AstNode *res = NULL; while (scope != NULL) { - res = symbol_raw_resolve_no_ascend(scope, sym); + res = symbol_raw_resolve_no_ascend(context, scope, sym); if (res) { return res; } @@ -220,15 +236,15 @@ AstNode* symbol_raw_resolve(Scope* start_scope, char* sym) { return NULL; } -AstNode* symbol_resolve(Scope* start_scope, OnyxToken* tkn) { +AstNode* symbol_resolve(Context *context, Scope* start_scope, OnyxToken* tkn) { token_toggle_end(tkn); - AstNode* res = symbol_raw_resolve(start_scope, tkn->text); + AstNode* res = symbol_raw_resolve(context, start_scope, tkn->text); token_toggle_end(tkn); return res; } -AstNode* try_symbol_raw_resolve_from_node(AstNode* node, char* symbol) { +AstNode* try_symbol_raw_resolve_from_node(Context *context, AstNode* node, char* symbol) { // CLEANUP: I think this has a lot of duplication from get_scope_from_node. // There are some additional cases handled here, but I think the majority // of this code could be rewritten in terms of get_scope_from_node. @@ -263,80 +279,50 @@ AstNode* try_symbol_raw_resolve_from_node(AstNode* node, char* symbol) { // CLEANUP if (package->package == NULL) { - package->package = package_lookup(package->package_name); + package->package = package_lookup(context, package->package_name); } if (package->package == NULL) { return NULL; } - return symbol_raw_resolve_no_ascend(package->package->scope, symbol); + return symbol_raw_resolve_no_ascend(context, package->package->scope, symbol); } - case Ast_Kind_Foreign_Block: { - AstForeignBlock* fb = (AstForeignBlock *) node; - - if (fb->scope == NULL) - return NULL; - - return symbol_raw_resolve(fb->scope, symbol); + case Ast_Kind_Foreign_Block: + case Ast_Kind_Basic_Type: + case Ast_Kind_Enum_Type: + case Ast_Kind_Poly_Union_Type: + case Ast_Kind_Distinct_Type: + case Ast_Kind_Interface: { + Scope* scope = get_scope_from_node(context, node); + return symbol_raw_resolve_no_ascend(context, scope, symbol); } - case Ast_Kind_Basic_Type: { - AstBasicType *bt = (AstBasicType *) node; + case Ast_Kind_Slice_Type: + case Ast_Kind_DynArr_Type: { + Scope* scope = get_scope_from_node(context, node); - if (bt->scope == NULL) + if (!scope) return NULL; - return symbol_raw_resolve_no_ascend(bt->scope, symbol); - } - - case Ast_Kind_Enum_Type: { - AstEnumType* etype = (AstEnumType *) node; - return symbol_raw_resolve_no_ascend(etype->scope, symbol); + return symbol_raw_resolve(context, scope, symbol); } case Ast_Kind_Struct_Type: { AstStructType* stype = (AstStructType *) node; - // HACK HACK // Temporarily disable the parent scope so that you can't access things // "above" the structures scope. This leads to unintended behavior, as when // you are accessing a static element on a structure, you don't expect to - // bleed to the top level scope. This code is currently very GROSS, and - // should be refactored soon. + // bleed to the top level scope. AstNode *result = NULL; AstNode *result = NULL; - if (stype->scope) { - Scope **tmp_parent; - Scope *tmp_parent_backup; - if (stype->stcache && stype->stcache->Struct.constructed_from) { - // Structs scope -> Poly Solution Scope -> Poly Struct Scope -> Enclosing Scope - tmp_parent = &stype->scope->parent->parent->parent; - } else { - tmp_parent = &stype->scope->parent; - } - - tmp_parent_backup = *tmp_parent; - *tmp_parent = NULL; - - result = symbol_raw_resolve(stype->scope, symbol); - - *tmp_parent = tmp_parent_backup; + if (stype->stcache != NULL) { + result = try_symbol_raw_resolve_from_type(context, stype->stcache, symbol); } - if (result == NULL && stype->stcache != NULL) { - Type* struct_type = stype->stcache; - assert(struct_type->kind == Type_Kind_Struct); - - bh_arr_each(AstPolySolution, sln, struct_type->Struct.poly_sln) { - if (token_text_equals(sln->poly_sym->token, symbol)) { - if (sln->kind == PSK_Type) { - result = (AstNode *) sln->type->ast_type; - } else { - result = (AstNode *) sln->value; - } - } - } + if (result == NULL && stype->scope) { + result = symbol_raw_resolve_no_ascend(context, stype->scope, symbol); } return result; @@ -346,28 +332,12 @@ AstNode* try_symbol_raw_resolve_from_node(AstNode* node, char* symbol) { AstUnionType* utype = (AstUnionType *) node; AstNode *result = NULL; - if (utype->scope) { - Scope **tmp_parent; - Scope *tmp_parent_backup; - if (utype->utcache && utype->utcache->Union.constructed_from) { - // Structs scope -> Poly Solution Scope -> Poly Struct Scope -> Enclosing Scope - tmp_parent = &utype->scope->parent->parent->parent; - } else { - tmp_parent = &utype->scope->parent; - } - - tmp_parent_backup = *tmp_parent; - *tmp_parent = NULL; - - result = symbol_raw_resolve(utype->scope, symbol); - - *tmp_parent = tmp_parent_backup; + if (utype->utcache != NULL) { + result = try_symbol_raw_resolve_from_type(context, utype->utcache, symbol); } - if (result == NULL && utype->utcache != NULL) { - if (!strcmp(symbol, "tag_enum")) { - result = (AstNode *) utype->utcache->Union.tag_type->ast_type; - } + if (result == NULL && utype->scope) { + result = symbol_raw_resolve_no_ascend(context, utype->scope, symbol); } return result; @@ -375,30 +345,38 @@ AstNode* try_symbol_raw_resolve_from_node(AstNode* node, char* symbol) { case Ast_Kind_Poly_Struct_Type: { AstPolyStructType* stype = ((AstPolyStructType *) node); - return symbol_raw_resolve_no_ascend(stype->scope, symbol); - } + if ((AstType *) node == context->builtins.array_type) { + // We have to ascend on the builtin Array type because it + // "extends" the Slice type. This is the only structure + // that works this way. It might be worth considering + // forcing the use the Slice functions, but then it can + // get confusing about where every function lives, ya know. + // Is "get" in Array or Slice. + return symbol_raw_resolve_limited(context, stype->scope, symbol, 2); - case Ast_Kind_Poly_Union_Type: { - AstPolyUnionType* utype = ((AstPolyUnionType *) node); - return symbol_raw_resolve_no_ascend(utype->scope, symbol); + } else { + return symbol_raw_resolve_no_ascend(context, stype->scope, symbol); + } } case Ast_Kind_Poly_Call_Type: { AstPolyCallType* pctype = (AstPolyCallType *) node; if (pctype->resolved_type) { - return try_symbol_raw_resolve_from_node((AstNode*) pctype->resolved_type->ast_type, symbol); + return try_symbol_raw_resolve_from_type(context, pctype->resolved_type, symbol); } return NULL; } - case Ast_Kind_Distinct_Type: { - AstDistinctType* dtype = (AstDistinctType *) node; - return symbol_raw_resolve_no_ascend(dtype->scope, symbol); - } + case Ast_Kind_Compiler_Extension: { + AstCompilerExtension *ext = (AstCompilerExtension *) node; - case Ast_Kind_Interface: { - AstInterface* inter = (AstInterface *) node; - return symbol_raw_resolve_no_ascend(inter->scope, symbol); + bh_arr_each(AstProceduralMacro *, pmac, ext->proc_macros) { + if (token_text_equals((*pmac)->token, symbol)) { + return (AstNode *) *pmac; + } + } + + return NULL; } default: break; @@ -407,40 +385,112 @@ AstNode* try_symbol_raw_resolve_from_node(AstNode* node, char* symbol) { return NULL; } -AstNode* try_symbol_resolve_from_node(AstNode* node, OnyxToken* token) { +AstNode* try_symbol_resolve_from_node(Context *context, AstNode* node, OnyxToken* token) { token_toggle_end(token); - AstNode* result = try_symbol_raw_resolve_from_node(node, token->text); + AstNode* result = try_symbol_raw_resolve_from_node(context, node, token->text); token_toggle_end(token); return result; } -AstNode* try_symbol_raw_resolve_from_type(Type *type, char* symbol) { +static AstNode* try_symbol_raw_resolve_from_poly_sln(Context *context, bh_arr(AstPolySolution) slns, char *symbol) { + if (slns == NULL) return NULL; + + bh_arr_each(AstPolySolution, sln, slns) { + if (token_text_equals(sln->poly_sym->token, symbol)) { + if (sln->kind == PSK_Type) { + AstTypeRawAlias* alias = onyx_ast_node_new(context->ast_alloc, sizeof(AstTypeRawAlias), Ast_Kind_Type_Raw_Alias); + alias->type = context->types.basic[Basic_Kind_Type_Index]; + alias->to = sln->type; + return (AstNode *) alias; + + } else { + return (AstNode *) sln->value; + } + } + } + + return NULL; +} + +AstNode* try_symbol_raw_resolve_from_type(Context *context, Type *type, char* symbol) { while (type->kind == Type_Kind_Pointer) { type = type->Pointer.elem; } - if (type->kind == Type_Kind_Struct) { - if (type->Struct.poly_sln == NULL) return NULL; + switch (type->kind) { + case Type_Kind_Basic: { + return symbol_raw_resolve_no_ascend(context, ((AstBasicType *) type->ast_type)->scope, symbol); + } - bh_arr_each(AstPolySolution, sln, type->Struct.poly_sln) { - if (token_text_equals(sln->poly_sym->token, symbol)) { - if (sln->kind == PSK_Type) { - AstTypeRawAlias* alias = onyx_ast_node_new(context.ast_alloc, sizeof(AstTypeRawAlias), Ast_Kind_Type_Raw_Alias); - alias->type = &basic_types[Basic_Kind_Type_Index]; - alias->to = sln->type; - return (AstNode *) alias; + case Type_Kind_Enum: { + return symbol_raw_resolve_no_ascend(context, ((AstEnumType *) type->ast_type)->scope, symbol); + } - } else { - return (AstNode *) sln->value; - } + case Type_Kind_Slice: { + return symbol_raw_resolve(context, type->Slice.scope, symbol); + } + + case Type_Kind_DynArray: { + return symbol_raw_resolve(context, type->DynArray.scope, symbol); + } + + case Type_Kind_Struct: { + AstNode *poly_sln_res = try_symbol_raw_resolve_from_poly_sln(context, type->Struct.poly_sln, symbol); + if (poly_sln_res) return poly_sln_res; + + i32 limit = 1; + if (type->Struct.constructed_from) { + // Structs scope -> Poly Solution Scope -> Poly Struct Scope -> Enclosing Scope + limit = 3; + } + + return symbol_raw_resolve_limited(context, type->Struct.scope, symbol, limit); + } + + case Type_Kind_Union: { + AstNode *poly_sln_res = try_symbol_raw_resolve_from_poly_sln(context, type->Union.poly_sln, symbol); + if (poly_sln_res) return poly_sln_res; + + if (!strcmp(symbol, "tag_enum")) { + return (AstNode *) type->Union.tag_type->ast_type; + } + + i32 limit = 1; + if (type->Union.constructed_from) { + // Structs scope -> Poly Solution Scope -> Poly Struct Scope -> Enclosing Scope + limit = 3; } + + return symbol_raw_resolve_limited(context, type->Union.scope, symbol, limit); + } + + case Type_Kind_PolyStruct: { + return symbol_raw_resolve_no_ascend(context, type->PolyStruct.scope, symbol); + } + + case Type_Kind_PolyUnion: { + return symbol_raw_resolve_no_ascend(context, type->PolyUnion.scope, symbol); } + + case Type_Kind_Distinct: { + return symbol_raw_resolve(context, type->Distinct.scope, symbol); + } + + default: return NULL; } return NULL; } +AstNode* try_symbol_resolve_from_type(Context *context, Type *type, OnyxToken *token) { + token_toggle_end(token); + AstNode* result = try_symbol_raw_resolve_from_type(context, type, token->text); + token_toggle_end(token); + + return result; +} + void scope_clear(Scope* scope) { sh_new_arena(scope->symbols); } @@ -532,27 +582,27 @@ void build_all_overload_options(bh_arr(OverloadOption) overloads, bh_imap* all_o } } -AstTyped* find_matching_overload_by_arguments(bh_arr(OverloadOption) overloads, Arguments* param_args) { +AstTyped* find_matching_overload_by_arguments(Context *context, bh_arr(OverloadOption) overloads, Arguments* param_args) { Arguments args; - arguments_clone(&args, param_args); - arguments_ensure_length(&args, bh_arr_length(args.values) + bh_arr_length(args.named_values)); + arguments_clone(context, &args, param_args); + arguments_ensure_length(context, &args, bh_arr_length(args.values) + bh_arr_length(args.named_values)); // CLEANUP SPEED: This currently rebuilds the complete set of overloads every time one is looked up. // This should be cached in the AstOverloadedFunction or somewhere like that. bh_imap all_overloads; - bh_imap_init(&all_overloads, global_heap_allocator, bh_arr_length(overloads) * 2); + bh_imap_init(&all_overloads, context->gp_alloc, bh_arr_length(overloads) * 2); build_all_overload_options(overloads, &all_overloads); AstTyped *matched_overload = NULL; bh_arr_each(bh__imap_entry, entry, all_overloads.entries) { AstTyped* node = (AstTyped *) strip_aliases((AstNode *) entry->key); - arguments_copy(&args, param_args); + arguments_copy(context, &args, param_args); AstFunction* overload = NULL; switch (node->kind) { - case Ast_Kind_Macro: overload = macro_resolve_header((AstMacro *) node, param_args, NULL, 0); break; - case Ast_Kind_Polymorphic_Proc: overload = polymorphic_proc_build_only_header((AstFunction *) node, PPLM_By_Arguments, param_args); break; + case Ast_Kind_Macro: overload = macro_resolve_header(context, (AstMacro *) node, param_args, NULL, 0); break; + case Ast_Kind_Polymorphic_Proc: overload = polymorphic_proc_build_only_header(context, (AstFunction *) node, PPLM_By_Arguments, param_args); break; case Ast_Kind_Function: overload = (AstFunction *) node; arguments_clear_baked_flags(&args); @@ -563,7 +613,7 @@ AstTyped* find_matching_overload_by_arguments(bh_arr(OverloadOption) overloads, // NOTE: Overload is not something that is known to be overloadable. if (overload == NULL) continue; if (overload->kind != Ast_Kind_Function) continue; - if (overload == (AstFunction *) &node_that_signals_a_yield || overload->type == NULL) { + if (overload == (AstFunction *) &context->node_that_signals_a_yield || overload->type == NULL) { // If it was not possible to create the type for this procedure, tell the // caller that this should yield and try again later. @@ -571,18 +621,18 @@ AstTyped* find_matching_overload_by_arguments(bh_arr(OverloadOption) overloads, // work in the future, then it has to take precedence over the other options available. bh_imap_free(&all_overloads); bh_arr_free(args.values); - return (AstTyped *) &node_that_signals_a_yield; + return (AstTyped *) &context->node_that_signals_a_yield; } assert(overload->type->kind == Type_Kind_Function); arguments_remove_baked(&args); - arguments_ensure_length(&args, get_argument_buffer_size(&overload->type->Function, &args)); + arguments_ensure_length(context, &args, get_argument_buffer_size(context, &overload->type->Function, &args)); // NOTE: If the arguments cannot be placed successfully in the parameters list - if (!fill_in_arguments(&args, (AstNode *) overload, NULL, 0)) continue; + if (!fill_in_arguments(context, &args, (AstNode *) overload, NULL, 0)) continue; VarArgKind va_kind; - TypeMatch tm = check_arguments_against_type(&args, &overload->type->Function, &va_kind, NULL, NULL, NULL); + TypeMatch tm = check_arguments_against_type(context, &args, &overload->type->Function, &va_kind, NULL, NULL, NULL); if (tm == TYPE_MATCH_SUCCESS) { matched_overload = node; break; @@ -591,7 +641,7 @@ AstTyped* find_matching_overload_by_arguments(bh_arr(OverloadOption) overloads, if (tm == TYPE_MATCH_YIELD) { bh_imap_free(&all_overloads); bh_arr_free(args.values); - return (AstTyped *) &node_that_signals_a_yield; + return (AstTyped *) &context->node_that_signals_a_yield; } } @@ -600,11 +650,11 @@ AstTyped* find_matching_overload_by_arguments(bh_arr(OverloadOption) overloads, return matched_overload; } -AstTyped* find_matching_overload_by_type(bh_arr(OverloadOption) overloads, Type* type) { +AstTyped* find_matching_overload_by_type(Context *context, bh_arr(OverloadOption) overloads, Type* type) { if (type->kind != Type_Kind_Function) return NULL; bh_imap all_overloads; - bh_imap_init(&all_overloads, global_heap_allocator, bh_arr_length(overloads) * 2); + bh_imap_init(&all_overloads, context->gp_alloc, bh_arr_length(overloads) * 2); build_all_overload_options(overloads, &all_overloads); AstTyped *matched_overload = NULL; @@ -613,14 +663,14 @@ AstTyped* find_matching_overload_by_type(bh_arr(OverloadOption) overloads, Type* AstTyped* node = (AstTyped *) entry->key; if (node->kind == Ast_Kind_Overloaded_Function) continue; - TypeMatch tm = unify_node_and_type(&node, type); + TypeMatch tm = unify_node_and_type(context, &node, type); if (tm == TYPE_MATCH_SUCCESS) { matched_overload = node; break; } if (tm == TYPE_MATCH_YIELD) { - return (AstTyped *) &node_that_signals_a_yield; + return (AstTyped *) &context->node_that_signals_a_yield; } } @@ -628,12 +678,12 @@ AstTyped* find_matching_overload_by_type(bh_arr(OverloadOption) overloads, Type* return matched_overload; } -void report_unable_to_match_overload(AstCall* call, bh_arr(OverloadOption) overloads) { - char* arg_str = bh_alloc(global_scratch_allocator, 1024); +void report_unable_to_match_overload(Context *context, AstCall* call, bh_arr(OverloadOption) overloads) { + char* arg_str = bh_alloc(context->scratch_alloc, 1024); arg_str[0] = '\0'; bh_arr_each(AstTyped *, arg, call->args.values) { - strncat(arg_str, node_get_type_name(*arg), 1023); + strncat(arg_str, node_get_type_name(context, *arg), 1023); if (arg != &bh_arr_last(call->args.values)) strncat(arg_str, ", ", 1023); @@ -650,41 +700,41 @@ void report_unable_to_match_overload(AstCall* call, bh_arr(OverloadOption) overl token_toggle_end((*named_value)->token); strncat(arg_str, "=", 1023); - strncat(arg_str, node_get_type_name((*named_value)->value), 1023); // CHECK: this might say 'unknown'. + strncat(arg_str, node_get_type_name(context, (*named_value)->value), 1023); // CHECK: this might say 'unknown'. if (named_value != &bh_arr_last(call->args.named_values)) strncat(arg_str, ", ", 1023); } } - onyx_report_error(call->token->pos, Error_Critical, "Unable to match overloaded function with provided argument types: (%s)", arg_str); + ONYX_ERROR(call->token->pos, Error_Critical, "Unable to match overloaded function with provided argument types: (%s)", arg_str); - bh_free(global_scratch_allocator, arg_str); + bh_free(context->scratch_alloc, arg_str); // CLEANUP SPEED: This currently rebuilds the complete set of overloads every time one is looked up. // This should be cached in the AstOverloadedFunction or somewhere like that. bh_imap all_overloads; - bh_imap_init(&all_overloads, global_heap_allocator, bh_arr_length(overloads) * 2); + bh_imap_init(&all_overloads, context->gp_alloc, bh_arr_length(overloads) * 2); build_all_overload_options(overloads, &all_overloads); i32 i = 1; bh_arr_each(bh__imap_entry, entry, all_overloads.entries) { AstTyped* node = (AstTyped *) strip_aliases((AstNode *) entry->key); - onyx_report_error(node->token->pos, Error_Critical, "Here is one of the overloads. %d/%d", i++, bh_arr_length(all_overloads.entries)); + ONYX_ERROR(node->token->pos, Error_Critical, "Here is one of the overloads. %d/%d", i++, bh_arr_length(all_overloads.entries)); } bh_imap_free(&all_overloads); } -void report_incorrect_overload_expected_type(Type *given, Type *expected, OnyxToken *overload, OnyxToken *group) { - onyx_report_error(overload->pos, Error_Critical, +void report_incorrect_overload_expected_type(Context *context, Type *given, Type *expected, OnyxToken *overload, OnyxToken *group) { + ONYX_ERROR(overload->pos, Error_Critical, "Expected this overload option to return '%s', but instead it returns '%s'.", - type_get_name(expected), type_get_name(given)); + type_get_name(context, expected), type_get_name(context, given)); - onyx_report_error(group->pos, Error_Critical, "Here is where the overloaded function was defined."); + ONYX_ERROR(group->pos, Error_Critical, "Here is where the overloaded function was defined."); } -static TypeMatch ensure_overload_returns_correct_type_job(void *raw_data) { +static TypeMatch ensure_overload_returns_correct_type_job(Context *context, void *raw_data) { OverloadReturnTypeCheck *data = raw_data; Type *expected_type = data->expected_type; AstTyped *node = data->node; @@ -702,7 +752,7 @@ static TypeMatch ensure_overload_returns_correct_type_job(void *raw_data) { // were not actually used. This creates a problem here because this code // will still wait for them. As a cheap solution, if there is a cycle detected, // return success, even if the types may not match. - if (context.cycle_almost_detected > 0) { + if (context->cycle_almost_detected > 0) { return TYPE_MATCH_SUCCESS; } @@ -715,7 +765,7 @@ static TypeMatch ensure_overload_returns_correct_type_job(void *raw_data) { if (!func->type->Function.return_type) return TYPE_MATCH_YIELD; Type *return_type = func->type->Function.return_type; - if (return_type == &type_auto_return) return TYPE_MATCH_YIELD; + if (return_type == context->types.auto_return) return TYPE_MATCH_YIELD; // See the note about using Polymorphic Structures as expected return types, // in check_overloaded_function(). @@ -726,32 +776,32 @@ static TypeMatch ensure_overload_returns_correct_type_job(void *raw_data) { return TYPE_MATCH_SUCCESS; } - report_incorrect_overload_expected_type(return_type, expected_type, func->token, data->group); + report_incorrect_overload_expected_type(context, return_type, expected_type, func->token, data->group); return TYPE_MATCH_FAILED; } - if (!types_are_compatible(return_type, expected_type)) { - report_incorrect_overload_expected_type(return_type, expected_type, func->token, data->group); + if (!types_are_compatible(context, return_type, expected_type)) { + report_incorrect_overload_expected_type(context, return_type, expected_type, func->token, data->group); return TYPE_MATCH_FAILED; } return TYPE_MATCH_SUCCESS; } -void ensure_overload_returns_correct_type(AstTyped *overload, AstOverloadedFunction *group) { +void ensure_overload_returns_correct_type(Context *context, AstTyped *overload, AstOverloadedFunction *group) { // This might not be entirely right as the type might not have been constructed yet, I think? // // Also, as a HACK, this does not check for the correct return type when errors are disabled. // Errors are only disabled when doing something non-permantent, like checking an interface // constraint, so this is a cheap way to tell if that is where we are coming from. // - if (group->expected_return_type && onyx_errors_are_enabled()) { - OverloadReturnTypeCheck *data = bh_alloc_item(context.ast_alloc, OverloadReturnTypeCheck); + if (group->expected_return_type && onyx_errors_are_enabled(context)) { + OverloadReturnTypeCheck *data = bh_alloc_item(context->ast_alloc, OverloadReturnTypeCheck); data->expected_type = group->expected_return_type; data->node = overload; data->group = group->token; - entity_heap_add_job(&context.entities, ensure_overload_returns_correct_type_job, data); + entity_heap_add_job(&context->entities, ensure_overload_returns_correct_type_job, data); } } @@ -762,7 +812,7 @@ void ensure_overload_returns_correct_type(AstTyped *overload, AstOverloadedFunct // // // TODO: Write this documentation -void expand_macro(AstCall** pcall, AstFunction* template) { +void expand_macro(Context *context, AstCall** pcall, AstFunction* template) { AstCall* call = *pcall; AstMacro* macro = (AstMacro *) call->callee; assert(macro->kind == Ast_Kind_Macro); @@ -772,36 +822,50 @@ void expand_macro(AstCall** pcall, AstFunction* template) { assert(template->type->kind == Type_Kind_Function); bh_arr(AstNode *) nodes_that_need_entities=NULL; - bh_arr_new(global_heap_allocator, nodes_that_need_entities, 4); + bh_arr_new(context->gp_alloc, nodes_that_need_entities, 4); - AstBlock* expansion = (AstBlock *) ast_clone_with_captured_entities(context.ast_alloc, template->body, &nodes_that_need_entities); + AstBlock* expansion = (AstBlock *) ast_clone_with_captured_entities(context, template->body, &nodes_that_need_entities); expansion->rules = Block_Rule_Macro; expansion->scope = NULL; expansion->next = call->next; + expansion->macro_generated_from = call->token; AstNode* subst = (AstNode *) expansion; - if (template->type->Function.return_type != &basic_types[Basic_Kind_Void]) { + if (template->type->Function.return_type != context->types.basic[Basic_Kind_Void]) { expansion->rules = Block_Rule_Do_Block; - AstDoBlock* doblock = (AstDoBlock *) onyx_ast_node_new(context.ast_alloc, sizeof(AstDoBlock), Ast_Kind_Do_Block); + AstDoBlock* doblock = (AstDoBlock *) onyx_ast_node_new(context->ast_alloc, sizeof(AstDoBlock), Ast_Kind_Do_Block); doblock->token = expansion->token; doblock->block = expansion; doblock->type = template->type->Function.return_type; doblock->next = expansion->next; + doblock->named_return_locals = NULL; expansion->next = NULL; + if (template->named_return_locals) { + bh_arr_new(context->ast_alloc, doblock->named_return_locals, bh_arr_length(template->named_return_locals)); + + bh_arr_each(AstLocal *, named_return, template->named_return_locals) { + AstLocal *cloned = (AstLocal *) ast_clone(context, *named_return); + bh_arr_push(doblock->named_return_locals, cloned); + + cloned->next = doblock->block->body; + doblock->block->body = (AstNode *) cloned; + } + } + subst = (AstNode *) doblock; } - Scope* argument_scope = scope_create(context.ast_alloc, NULL, call->token->pos); + Scope* argument_scope = scope_create(context, NULL, call->token->pos); if (expansion->binding_scope != NULL) - scope_include(argument_scope, expansion->binding_scope, call->token->pos); + scope_include(context, argument_scope, expansion->binding_scope, call->token->pos); expansion->binding_scope = argument_scope; // HACK HACK HACK This is probably very wrong. I don't know what guarentees that // the paramters and arguments are going to be in the same order exactly. - Type *any_type = type_build_from_ast(context.ast_alloc, builtin_any_type); + Type *any_type = type_build_from_ast(context, context->builtins.any_type); fori (i, 0, bh_arr_length(call->args.values)) { AstNode *value = (AstNode *) ((AstArgument *) call->args.values[i])->value; assert(template->params[i].local->type); @@ -809,17 +873,17 @@ void expand_macro(AstCall** pcall, AstFunction* template) { Type *param_type = template->params[i].local->type; if (param_type == any_type || (param_type->kind == Type_Kind_VarArgs && param_type->VarArgs.elem == any_type)) { - onyx_report_error(macro->token->pos, Error_Critical, "Currently, macros do not support arguments of type 'any' or '..any'."); + ONYX_ERROR(macro->token->pos, Error_Critical, "Currently, macros do not support arguments of type 'any' or '..any'."); } - symbol_introduce(argument_scope, template->params[i].local->token, value); + symbol_introduce(context, argument_scope, template->params[i].local->token, value); } if (template->poly_scope != NULL) - scope_include(argument_scope, template->poly_scope, call->token->pos); + scope_include(context, argument_scope, template->poly_scope, call->token->pos); if (bh_arr_length(nodes_that_need_entities) > 0) { - // :CopyPaste from symres_function + // :CopyPaste from check_function bh_arr_each(AstNode *, node, nodes_that_need_entities) { // This makes a lot of assumptions about how these nodes are being processed, // and I don't want to start using this with other nodes without considering @@ -834,12 +898,12 @@ void expand_macro(AstCall** pcall, AstFunction* template) { scope = static_if->defined_in_scope; if (template->poly_scope) { - scope = scope_create(context.ast_alloc, scope, static_if->token->pos); - scope_include(scope, template->poly_scope, static_if->token->pos); + scope = scope_create(context, scope, static_if->token->pos); + scope_include(context, scope, template->poly_scope, static_if->token->pos); } } - add_entities_for_node(NULL, *node, scope, macro->entity->package); + add_entities_for_node(&context->entities, NULL, *node, scope, macro->entity->package); } } @@ -849,26 +913,26 @@ void expand_macro(AstCall** pcall, AstFunction* template) { return; } -AstFunction* macro_resolve_header(AstMacro* macro, Arguments* args, OnyxToken* callsite, b32 error_if_failed) { +AstFunction* macro_resolve_header(Context *context, AstMacro* macro, Arguments* args, OnyxToken* callsite, b32 error_if_failed) { switch (macro->body->kind) { case Ast_Kind_Function: return (AstFunction *) macro->body; case Ast_Kind_Polymorphic_Proc: { AstFunction* pp = (AstFunction *) macro->body; - ensure_polyproc_cache_is_created(pp); + ensure_polyproc_cache_is_created(context, pp); - bh_arr(AstPolySolution) slns = find_polymorphic_slns(pp, PPLM_By_Arguments, args, callsite, error_if_failed); + bh_arr(AstPolySolution) slns = find_polymorphic_slns(context, pp, PPLM_By_Arguments, args, callsite, error_if_failed); if (slns == NULL) { - if (flag_to_yield) { - flag_to_yield = 0; - return (AstFunction *) &node_that_signals_a_yield; + if (context->polymorph.flag_to_yield) { + context->polymorph.flag_to_yield = 0; + return (AstFunction *) &context->node_that_signals_a_yield; } return NULL; } - return polymorphic_proc_build_only_header_with_slns(pp, slns, error_if_failed); + return polymorphic_proc_build_only_header_with_slns(context, pp, slns, error_if_failed); } default: assert("Bad macro body type." && 0); @@ -881,14 +945,14 @@ AstFunction* macro_resolve_header(AstMacro* macro, Arguments* args, OnyxToken* c // // Arguments resolving // -static i32 lookup_idx_by_name(AstNode* provider, char* name) { +static i32 lookup_idx_by_name(Context *context, AstNode* provider, char* name) { switch (provider->kind) { case Ast_Kind_Struct_Literal: { AstStructLiteral* sl = (AstStructLiteral *) provider; assert(sl->type); StructMember s; - if (!type_lookup_member(sl->type, name, &s)) return -1; + if (!type_lookup_member(context, sl->type, name, &s)) return -1; if (s.included_through_use) return -1; return s.idx; @@ -915,7 +979,7 @@ static i32 lookup_idx_by_name(AstNode* provider, char* name) { } } -static AstNode* lookup_default_value_by_idx(AstNode* provider, i32 idx) { +static AstNode* lookup_default_value_by_idx(Context *context, AstNode* provider, i32 idx) { switch (provider->kind) { case Ast_Kind_Struct_Literal: { AstStructLiteral* sl = (AstStructLiteral *) provider; @@ -940,7 +1004,7 @@ static AstNode* lookup_default_value_by_idx(AstNode* provider, i32 idx) { AstTyped* default_value = func->params[idx].default_value; if (default_value == NULL) return NULL; - AstArgument* arg = make_argument(context.ast_alloc, default_value); + AstArgument* arg = make_argument(context, default_value); return (AstNode *) arg; } @@ -984,10 +1048,10 @@ static i32 non_baked_argument_count(Arguments* args) { return count; } -i32 get_argument_buffer_size(TypeFunction* type, Arguments* args) { +i32 get_argument_buffer_size(Context *context, TypeFunction* type, Arguments* args) { i32 non_vararg_param_count = (i32) type->param_count; if (non_vararg_param_count > 0) { - if (type->params[type->param_count - 1] == builtin_vararg_type_type) non_vararg_param_count--; + if (type->params[type->param_count - 1] == context->builtins.vararg_type_type) non_vararg_param_count--; if (type->params[type->param_count - 1]->kind == Type_Kind_VarArgs) non_vararg_param_count--; } @@ -996,7 +1060,7 @@ i32 get_argument_buffer_size(TypeFunction* type, Arguments* args) { // NOTE: The values array can be partially filled out, and is the resulting array. // Returns if all the values were filled in. -b32 fill_in_arguments(Arguments* args, AstNode* provider, char** err_msg, b32 insert_zero_values) { +b32 fill_in_arguments(Context *context, Arguments* args, AstNode* provider, char** err_msg, b32 insert_zero_values) { { // Delete baked arguments // :ArgumentResolvingIsComplicated @@ -1028,22 +1092,22 @@ b32 fill_in_arguments(Arguments* args, AstNode* provider, char** err_msg, b32 in } token_toggle_end(named_value->token); - i32 idx = lookup_idx_by_name(provider, named_value->token->text); + i32 idx = lookup_idx_by_name(context, provider, named_value->token->text); if (idx == -1) { - if (err_msg) *err_msg = bh_aprintf(global_scratch_allocator, "'%s' is not a valid named parameter here.", named_value->token->text); + if (err_msg) *err_msg = bh_aprintf(context->scratch_alloc, "'%s' is not a valid named parameter here.", named_value->token->text); token_toggle_end(named_value->token); return 0; } // assert(idx < bh_arr_length(args->values)); if (idx >= bh_arr_length(args->values)) { - if (err_msg) *err_msg = bh_aprintf(global_scratch_allocator, "Error placing value with name '%s' at index '%d'.", named_value->token->text, idx); + if (err_msg) *err_msg = bh_aprintf(context->scratch_alloc, "Error placing value with name '%s' at index '%d'.", named_value->token->text, idx); token_toggle_end(named_value->token); return 0; } if (args->values[idx] != NULL && args->values[idx] != named_value->value) { - if (err_msg) *err_msg = bh_aprintf(global_scratch_allocator, "Multiple values given for parameter named '%s'.", named_value->token->text); + if (err_msg) *err_msg = bh_aprintf(context->scratch_alloc, "Multiple values given for parameter named '%s'.", named_value->token->text); token_toggle_end(named_value->token); return 0; } @@ -1055,21 +1119,24 @@ b32 fill_in_arguments(Arguments* args, AstNode* provider, char** err_msg, b32 in b32 success = 1; fori (idx, 0, bh_arr_length(args->values)) { - if (args->values[idx] == NULL) args->values[idx] = (AstTyped *) lookup_default_value_by_idx(provider, idx); + if (args->values[idx] == NULL) { + args->values[idx] = (AstTyped *) lookup_default_value_by_idx(context, provider, idx); + } if (args->values[idx] == NULL) { if (insert_zero_values) { assert(provider->token); - args->values[idx] = (AstTyped *) make_zero_value(context.ast_alloc, provider->token, NULL); + args->values[idx] = (AstTyped *) make_zero_value(context, provider->token, NULL); } else { - if (err_msg) *err_msg = bh_aprintf(global_scratch_allocator, "No value given for %d%s argument.", idx + 1, bh_num_suffix(idx + 1)); + if (err_msg) *err_msg = bh_aprintf(context->scratch_alloc, "No value given for %d%s argument.", idx + 1, bh_num_suffix(idx + 1)); success = 0; + break; } } } i32 maximum_arguments = maximum_argument_count(provider); if (bh_arr_length(args->values) > maximum_arguments) { - if (err_msg) *err_msg = bh_aprintf(global_scratch_allocator, "Too many values provided. Expected at most %d.", maximum_arguments); + if (err_msg) *err_msg = bh_aprintf(context->scratch_alloc, "Too many values provided. Expected at most %d.", maximum_arguments); success = 0; } @@ -1087,7 +1154,7 @@ typedef enum ArgState { AS_Expecting_Untyped_VA, } ArgState; -TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, VarArgKind* va_kind, +TypeMatch check_arguments_against_type(Context *context, Arguments* args, TypeFunction* func_type, VarArgKind* va_kind, OnyxToken* location, char* func_name, OnyxError* error) { // In this function, if error is not NULL, then it is assumed that permanent changes can // be made. Otherwise, permanent changes should be avoided; only detecting issues should be done. @@ -1098,11 +1165,11 @@ TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, if (error) error->rank = Error_Critical; bh_arr(AstArgument *) arg_arr = (bh_arr(AstArgument *)) args->values; - i32 arg_count = get_argument_buffer_size(func_type, args); + i32 arg_count = get_argument_buffer_size(context, func_type, args); Type **formal_params = func_type->params; Type* variadic_type = NULL; - i64 any_type_id = type_build_from_ast(context.ast_alloc, builtin_any_type)->id; + i64 any_type_id = type_build_from_ast(context, context->builtins.any_type)->id; ArgState arg_state = AS_Expecting_Exact; u32 arg_pos = 0; @@ -1125,10 +1192,22 @@ TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, if (arg_pos >= (u32) bh_arr_length(arg_arr)) goto type_checking_done; assert(arg_arr[arg_pos]->kind == Ast_Kind_Argument); - TypeMatch tm = unify_node_and_type_(&arg_arr[arg_pos]->value, formal_params[arg_pos], permanent); + + TypeMatch tm = unify_node_and_type_(context, &arg_arr[arg_pos]->value, formal_params[arg_pos], permanent); if (tm == TYPE_MATCH_YIELD) return tm; if (tm == TYPE_MATCH_SPECIAL) return tm; if (tm == TYPE_MATCH_FAILED) { + // Handle the weird case of `x: any` as an argument. + if (formal_params[arg_pos]->id == any_type_id) { + resolve_expression_type(context, arg_arr[arg_pos]->value); + if (error != NULL) { + arg_arr[arg_pos]->pass_as_any = 1; + } + + arg_arr[arg_pos]->va_kind = VA_Kind_Not_VA; + break; + } + if (error != NULL) { AstArgument *the_arg = (void *) arg_arr[arg_pos]; if (the_arg->used_as_lval_of_method_call) { @@ -1138,32 +1217,26 @@ TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, // and its because it wanted a &T, but got a T. This is likely // due to the fact that the method call argument is not an lval. error->pos = arg_arr[arg_pos]->token->pos; - error->text = bh_aprintf(global_heap_allocator, + error->text = bh_aprintf(context->gp_alloc, "This method expects a pointer to the first argument, which normally `->` would do automatically, but in this case, the left-hand side is not an l-value, so its address cannot be taken. Try storing it in a temporary variable first, then calling the method." ); return tm; } } - error->pos = arg_arr[arg_pos]->token->pos; - error->text = bh_aprintf(global_heap_allocator, + if (arg_arr[arg_pos]->token) error->pos = arg_arr[arg_pos]->token->pos; + + error->text = bh_aprintf(context->gp_alloc, "The procedure '%s' expects a value of type '%s' for %d%s parameter, got '%s'.", func_name, - type_get_name(formal_params[arg_pos]), + type_get_name(context, formal_params[arg_pos]), arg_pos + 1, bh_num_suffix(arg_pos + 1), - node_get_type_name(arg_arr[arg_pos]->value)); + node_get_type_name(context, arg_arr[arg_pos]->value)); } return tm; } - if (arg_arr[arg_pos]->value->type && arg_arr[arg_pos]->value->type->id != any_type_id && formal_params[arg_pos]->id == any_type_id) { - resolve_expression_type(arg_arr[arg_pos]->value); - if (error != NULL) { - arg_arr[arg_pos]->pass_as_any = 1; - } - } - arg_arr[arg_pos]->va_kind = VA_Kind_Not_VA; break; } @@ -1174,7 +1247,7 @@ TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, if (arg_pos >= (u32) bh_arr_length(arg_arr)) goto type_checking_done; if (variadic_type->id == any_type_id) { - resolve_expression_type(arg_arr[arg_pos]->value); + resolve_expression_type(context, arg_arr[arg_pos]->value); if (arg_arr[arg_pos]->value->type == NULL) { if (error != NULL) { error->pos = arg_arr[arg_pos]->token->pos; @@ -1190,16 +1263,16 @@ TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, *va_kind = VA_Kind_Typed; assert(arg_arr[arg_pos]->kind == Ast_Kind_Argument); - TypeMatch tm = unify_node_and_type_(&arg_arr[arg_pos]->value, variadic_type, permanent); + TypeMatch tm = unify_node_and_type_(context, &arg_arr[arg_pos]->value, variadic_type, permanent); if (tm == TYPE_MATCH_YIELD) return tm; if (tm == TYPE_MATCH_FAILED) { if (error != NULL) { error->pos = arg_arr[arg_pos]->token->pos, - error->text = bh_aprintf(global_heap_allocator, + error->text = bh_aprintf(context->gp_alloc, "The procedure '%s' expects a value of type '%s' for the variadic parameter, got '%s'.", func_name, - type_get_name(variadic_type), - node_get_type_name(arg_arr[arg_pos]->value)); + type_get_name(context, variadic_type), + node_get_type_name(context, arg_arr[arg_pos]->value)); } return tm; } @@ -1214,7 +1287,7 @@ TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, if (arg_pos >= (u32) bh_arr_length(arg_arr)) goto type_checking_done; assert(arg_arr[arg_pos]->kind == Ast_Kind_Argument); - resolve_expression_type(arg_arr[arg_pos]->value); + resolve_expression_type(context, arg_arr[arg_pos]->value); if (arg_arr[arg_pos]->value->type == NULL) { if (error != NULL) { error->pos = arg_arr[arg_pos]->token->pos; @@ -1234,8 +1307,8 @@ TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, type_checking_done: if (arg_pos < func_type->needed_param_count) { if (error != NULL) { - error->pos = location->pos; - error->text = bh_aprintf(global_heap_allocator, + if (location) error->pos = location->pos; + error->text = bh_aprintf(context->gp_alloc, "Too few arguments to function call. Expected at least %d argument%s, but only got %d.", func_type->needed_param_count, bh_num_plural(func_type->needed_param_count), arg_pos); } @@ -1244,8 +1317,8 @@ TypeMatch check_arguments_against_type(Arguments* args, TypeFunction* func_type, if (arg_pos < (u32) arg_count) { if (error != NULL) { - error->pos = location->pos; - error->text = bh_aprintf(global_heap_allocator, + if (location) error->pos = location->pos; + error->text = bh_aprintf(context->gp_alloc, "Too many arguments to function call. Expected at most %d argument%s, but got %d.", arg_pos, bh_num_plural(arg_pos), arg_count); } @@ -1322,6 +1395,7 @@ i32 string_process_escape_seqs(char* dest, char* src, i32 len) { case 'v': *dest++ = '\v'; total_len++; break; case 'e': *dest++ = '\e'; total_len++; break; case '"': *dest++ = '"'; total_len++; break; + case '\'': *dest++ = '\''; total_len++; break; case '\\': *dest++ = '\\'; total_len++; break; case 'x': { u8 ch1 = src[i + 1]; @@ -1372,7 +1446,7 @@ i32 string_process_escape_seqs(char* dest, char* src, i32 len) { } -static Scope **get_scope_from_node_helper(AstNode *node) { +static Scope **get_scope_from_node_helper(Context *context, AstNode *node) { b32 used_pointer = 0; while (1) { @@ -1405,6 +1479,11 @@ static Scope **get_scope_from_node_helper(AstNode *node) { return &package->package->scope; } + case Ast_Kind_Foreign_Block: { + AstForeignBlock* fb = (AstForeignBlock *) node; + return &fb->scope; + } + case Ast_Kind_Basic_Type: { AstBasicType* btype = (AstBasicType *) node; return &btype->scope; @@ -1415,6 +1494,18 @@ static Scope **get_scope_from_node_helper(AstNode *node) { return &etype->scope; } + case Ast_Kind_Slice_Type: { + Type *t = type_build_from_ast(context, (AstType *) node); + if (t) return &t->Slice.scope; + return NULL; + } + + case Ast_Kind_DynArr_Type: { + Type *t = type_build_from_ast(context, (AstType *) node); + if (t) return &t->DynArray.scope; + return NULL; + } + case Ast_Kind_Struct_Type: { AstStructType* stype = (AstStructType *) node; return &stype->scope; @@ -1436,10 +1527,9 @@ static Scope **get_scope_from_node_helper(AstNode *node) { } case Ast_Kind_Poly_Call_Type: { - AstPolyCallType* pctype = (AstPolyCallType *) node; - Type *t = type_build_from_ast(context.ast_alloc, (AstType *) pctype); + Type *t = type_build_from_ast(context, (AstType *) node); if (t) { - return &((AstStructType *) t->ast_type)->scope; + return &t->Struct.scope; } return NULL; } @@ -1460,18 +1550,18 @@ static Scope **get_scope_from_node_helper(AstNode *node) { return NULL; } -Scope *get_scope_from_node(AstNode *node) { +Scope *get_scope_from_node(Context *context, AstNode *node) { if (!node) return NULL; - Scope **pscope = get_scope_from_node_helper(node); + Scope **pscope = get_scope_from_node_helper(context, node); if (!pscope) return NULL; return *pscope; } -Scope *get_scope_from_node_or_create(AstNode *node) { +Scope *get_scope_from_node_or_create(Context *context, AstNode *node) { if (!node) return NULL; - Scope **pscope = get_scope_from_node_helper(node); + Scope **pscope = get_scope_from_node_helper(context, node); if (!pscope) return NULL; // Create the scope if it does not exist. @@ -1482,17 +1572,17 @@ Scope *get_scope_from_node_or_create(AstNode *node) { OnyxFilePos pos = {0}; if (node->token) pos = node->token->pos; - *pscope = scope_create(context.ast_alloc, NULL, pos); + *pscope = scope_create(context, NULL, pos); } return *pscope; } -u32 levenshtein_distance(const char *str1, const char *str2) { +u32 levenshtein_distance(Context *context, const char *str1, const char *str2) { i32 m = strlen(str1) + 1; i32 n = strlen(str2) + 1; - i32 *d = bh_alloc_array(global_scratch_allocator, i32, m * n); + i32 *d = bh_alloc_array(context->scratch_alloc, i32, m * n); fori (i, 0, m * n) d[i] = 0; fori (i, 0, m) d[i * n + 0] = i; @@ -1513,7 +1603,7 @@ u32 levenshtein_distance(const char *str1, const char *str2) { return d[m * n - 1]; } -char *find_closest_symbol_in_scope(Scope *scope, char *sym, u32 *out_distance) { +char *find_closest_symbol_in_scope(Context *context, Scope *scope, char *sym, u32 *out_distance) { *out_distance = 0x7fffffff; if (scope == NULL) return NULL; @@ -1523,7 +1613,7 @@ char *find_closest_symbol_in_scope(Scope *scope, char *sym, u32 *out_distance) { if (scope->symbols[i].value && scope->symbols[i].value->flags & Ast_Flag_Symbol_Invisible) continue; char *key = scope->symbols[i].key; - u32 d = levenshtein_distance(key, sym); + u32 d = levenshtein_distance(context, key, sym); if (d < *out_distance) { *out_distance = d; closest = (char *) key; @@ -1533,13 +1623,13 @@ char *find_closest_symbol_in_scope(Scope *scope, char *sym, u32 *out_distance) { return closest; } -char *find_closest_symbol_in_scope_and_parents(Scope *scope, char *sym) { +char *find_closest_symbol_in_scope_and_parents(Context *context, Scope *scope, char *sym) { u32 min_dist = 0x7fffffff; u32 tmp_dist; char *closest = NULL; while (scope != NULL) { - char *tmp_closest = find_closest_symbol_in_scope(scope, sym, &tmp_dist); + char *tmp_closest = find_closest_symbol_in_scope(context, scope, sym, &tmp_dist); if (tmp_dist < min_dist) { min_dist = tmp_dist; closest = tmp_closest; @@ -1551,30 +1641,23 @@ char *find_closest_symbol_in_scope_and_parents(Scope *scope, char *sym) { return closest; } -char *find_closest_symbol_in_node(AstNode* node, char *sym) { - Scope *scope = get_scope_from_node(node); +char *find_closest_symbol_in_node(Context *context, AstNode* node, char *sym) { + Scope *scope = get_scope_from_node(context, node); if (!scope) { if (node && node->kind == Ast_Kind_Poly_Call_Type) { AstPolyCallType* pcall = (AstPolyCallType *) node; - return find_closest_symbol_in_node((AstNode *) pcall->callee, sym); + return find_closest_symbol_in_node(context, (AstNode *) pcall->callee, sym); } return NULL; } u32 dist; - return find_closest_symbol_in_scope(scope, sym, &dist); + return find_closest_symbol_in_scope(context, scope, sym, &dist); } -void track_declaration_for_tags(AstNode *node) { - if (context.options->generate_tag_file) { - bh_arr_push(context.tag_locations, node); - } -} - - static u32 symbol_info_get_file_id(SymbolInfoTable *syminfo, const char *filename) { u32 file_id; if (shgeti(syminfo->files, filename) == -1) { @@ -1588,11 +1671,11 @@ static u32 symbol_info_get_file_id(SymbolInfoTable *syminfo, const char *filenam return file_id; } -void track_declaration_for_symbol_info(OnyxFilePos pos, AstNode *node) { - if (!context.options->generate_symbol_info_file) return; +void track_declaration_for_symbol_info(Context *context, OnyxFilePos pos, AstNode *node) { + if (!context->options->generate_symbol_info_file) return; if (pos.filename == NULL) return; - SymbolInfoTable *syminfo = context.symbol_info; + SymbolInfoTable *syminfo = context->symbol_info; assert(syminfo); if (bh_imap_has(&syminfo->node_to_id, (u64) node)) return; @@ -1606,29 +1689,39 @@ void track_declaration_for_symbol_info(OnyxFilePos pos, AstNode *node) { symbol.line = pos.line; symbol.column = pos.column; symbol.documentation = NULL; + symbol.documentation_length = 0; bh_arr_push(syminfo->symbols, symbol); bh_imap_put(&syminfo->node_to_id, (u64) node, (u64) symbol_id); } -void track_documentation_for_symbol_info(AstNode *node, OnyxToken *documentation) { - if (!context.options->generate_lsp_info_file) return; - if (!context.options->generate_symbol_info_file) return; +void track_documentation_for_symbol_info(Context *context, AstNode *node, AstBinding *binding) { + if (!context->options->generate_lsp_info_file) return; + if (!context->options->generate_symbol_info_file) return; - SymbolInfoTable *syminfo = context.symbol_info; + SymbolInfoTable *syminfo = context->symbol_info; assert(syminfo); if (!bh_imap_has(&syminfo->node_to_id, (u64) node)) return; u64 symbol_id = bh_imap_get(&syminfo->node_to_id, (u64) node); - syminfo->symbols[symbol_id].documentation = documentation; + if (binding->documentation_token_old) { + syminfo->symbols[symbol_id].documentation = binding->documentation_token_old->text; + syminfo->symbols[symbol_id].documentation_length = binding->documentation_token_old->length; + } else if (binding->documentation_string) { + syminfo->symbols[symbol_id].documentation = binding->documentation_string; + syminfo->symbols[symbol_id].documentation_length = strlen(binding->documentation_string); + } else { + syminfo->symbols[symbol_id].documentation = ""; + syminfo->symbols[symbol_id].documentation_length = 0; + } } -void track_resolution_for_symbol_info(AstNode *original, AstNode *resolved) { - if (!context.options->generate_symbol_info_file) return; +void track_resolution_for_symbol_info(Context *context, AstNode *original, AstNode *resolved) { + if (!context->options->generate_symbol_info_file) return; if (!resolved) return; - SymbolInfoTable *syminfo = context.symbol_info; + SymbolInfoTable *syminfo = context->symbol_info; assert(syminfo); if (!bh_imap_has(&syminfo->node_to_id, (u64) resolved)) return; @@ -1648,3 +1741,59 @@ void track_resolution_for_symbol_info(AstNode *original, AstNode *resolved) { } + +// +// Compiler Events +// + +void compiler_events_init(Context *context) { + bh_arena_init(&context->events.event_arena, context->gp_alloc, 1024 * 1024); + context->events.event_alloc = bh_arena_allocator(&context->events.event_arena); + + // All other fields should be already set to 0/NULL. +} + +void compiler_events_clear(Context *context) { + bh_arena_clear(&context->events.event_arena); + context->events.first = NULL; + context->events.last = NULL; + context->events.event_count = 0; +} + +CompilerEvent *compiler_event_add(Context *context, u32 event_type) { + CompilerEvent *new_event = bh_alloc_item(context->events.event_alloc, CompilerEvent); + new_event->type = event_type; + new_event->first_field = NULL; + + new_event->next = context->events.last; + if (context->events.last) context->events.last->next = new_event; + context->events.last = new_event; + if (!context->events.first) context->events.first = new_event; + + context->events.event_count++; + + return new_event; +} + +void compiler_event_add_field_str(Context *context, CompilerEvent *event, char *field, char *value) { + if (!value) return; + + CompilerEventField *new_field = bh_alloc_item(context->events.event_alloc, CompilerEventField); + new_field->type = 0; // 0 for string + new_field->field = bh_strdup(context->events.event_alloc, field); + new_field->s = bh_strdup(context->events.event_alloc, value); + + new_field->next = event->first_field; + event->first_field = new_field; +} + +void compiler_event_add_field_int(Context *context, CompilerEvent *event, char *field, i32 value) { + CompilerEventField *new_field = bh_alloc_item(context->events.event_alloc, CompilerEventField); + new_field->type = 1; // 1 for int + new_field->field = bh_strdup(context->events.event_alloc, field); + new_field->i = value; + + new_field->next = event->first_field; + event->first_field = new_field; +} + diff --git a/compiler/src/wasm_emit.c b/compiler/src/wasm_emit.c index 0c067fe28..87f917ce7 100644 --- a/compiler/src/wasm_emit.c +++ b/compiler/src/wasm_emit.c @@ -18,6 +18,14 @@ #include "wasm_emit.h" #include "utils.h" +#undef ONYX_ERROR +#undef ONYX_WARNING +#define ONYX_ERROR(pos, rank, ...) (onyx_report_error(mod->context, (pos), (rank), __VA_ARGS__)) +#define ONYX_WARNING(pos, ...) (onyx_report_warning(mod->context, (pos), __VA_ARGS__)) + +#undef BH_INTERNAL_ALLOCATOR +#define BH_INTERNAL_ALLOCATOR (mod->context->gp_alloc) + #define WASM_TYPE_INT32 0x7F #define WASM_TYPE_INT64 0x7E #define WASM_TYPE_FLOAT32 0x7D @@ -35,6 +43,7 @@ static b32 onyx_type_is_stored_in_memory(Type *type) { if (type_struct_is_just_one_basic_value(type)) return 0; return type->kind == Type_Kind_Struct + || type->kind == Type_Kind_Array || type->kind == Type_Kind_DynArray || type->kind == Type_Kind_Union; } @@ -223,7 +232,7 @@ static u32 debug_introduce_symbol(OnyxWasmModule *mod, OnyxToken *token, DebugSy if (token) { token_toggle_end(token); - sym_info.name = bh_strdup(context.ast_alloc, token->text); + sym_info.name = bh_strdup(mod->context->ast_alloc, token->text); token_toggle_end(token); } else { sym_info.name = NULL; @@ -261,7 +270,7 @@ static u32 debug_get_file_id(OnyxWasmModule *mod, const char *name) { DebugFileInfo file_info; file_info.file_id = id; - bh_arr_each(bh_file_contents, fc, context.loaded_files) { + bh_arr_each(bh_file_contents, fc, mod->context->loaded_files) { if (!strcmp(fc->filename, name)) { file_info.line_count = fc->line_count; } @@ -297,7 +306,7 @@ static void debug_set_position(OnyxWasmModule *mod, OnyxToken *token) { // - REP // - SET, REP 0 static void debug_emit_instruction(OnyxWasmModule *mod, OnyxToken *token) { - if (!context.options->debug_info_enabled) { + if (!mod->context->options->debug_info_enabled) { return; } @@ -519,7 +528,6 @@ EMIT_FUNC(compound_store, Type* type, u64 offset, b32 location_ EMIT_FUNC(struct_store, Type* type, u32 offset); EMIT_FUNC(struct_literal, AstStructLiteral* sl); EMIT_FUNC(struct_as_separate_values, Type *type, u32 offset); -EMIT_FUNC(array_store, Type* type, u32 offset); EMIT_FUNC(array_literal, AstArrayLiteral* al); EMIT_FUNC(range_literal, AstRangeLiteral* range); EMIT_FUNC_NO_ARGS(load_slice); @@ -532,7 +540,7 @@ EMIT_FUNC(stack_enter, u64 stacksize); EMIT_FUNC(zero_value, WasmType wt); EMIT_FUNC(zero_value_for_type, Type* type, OnyxToken* where, AstTyped *alloc_node); EMIT_FUNC(stack_address, u32 offset, OnyxToken *token); -EMIT_FUNC(values_into_contiguous_memory, u64 base_ptr_local, Type *type, u32 offset, i32 value_count, AstTyped **values); +EMIT_FUNC(values_into_contiguous_memory, u64 base_ptr_local, u32 offset, u32 value_count, ValueWithOffset *values); EMIT_FUNC(struct_literal_into_contiguous_memory, AstStructLiteral* sl, u64 base_ptr_local, u32 offset); EMIT_FUNC(wasm_copy, OnyxToken *token); EMIT_FUNC(wasm_fill, OnyxToken *token); @@ -547,7 +555,7 @@ static void emit_raw_string(OnyxWasmModule* mod, char *data, i32 len, u64 *out_d static void emit_constexpr(ConstExprContext *ctx, AstTyped *node, u32 offset); static b32 emit_constexpr_(ConstExprContext *ctx, AstTyped *node, u32 offset); -static void ensure_node_has_been_submitted_for_emission(AstNode *node) { +static void ensure_node_has_been_submitted_for_emission(Context *context, AstNode *node) { assert(node->entity); if (node->flags & Ast_Flag_Has_Been_Scheduled_For_Emit) return; @@ -565,21 +573,34 @@ static void ensure_node_has_been_submitted_for_emission(AstNode *node) { func->entity_header->macro_attempts = 0; func->entity_body->macro_attempts = 0; - entity_change_state(&context.entities, func->entity_header, Entity_State_Code_Gen); - entity_change_state(&context.entities, func->entity_body, Entity_State_Code_Gen); - entity_heap_insert_existing(&context.entities, func->entity_header); - entity_heap_insert_existing(&context.entities, func->entity_body); + entity_change_state(&context->entities, func->entity_header, Entity_State_Code_Gen); + entity_change_state(&context->entities, func->entity_body, Entity_State_Code_Gen); + entity_heap_insert_existing(&context->entities, func->entity_header); + entity_heap_insert_existing(&context->entities, func->entity_body); return; } submit_normal_node: - entity_change_state(&context.entities, node->entity, Entity_State_Code_Gen); - entity_heap_insert_existing(&context.entities, node->entity); + entity_change_state(&context->entities, node->entity, Entity_State_Code_Gen); + entity_heap_insert_existing(&context->entities, node->entity); +} + +static void ensure_type_has_been_submitted_for_emission(OnyxWasmModule *mod, Type *type) { + assert(type); + + if (type->flags & Ast_Flag_Has_Been_Scheduled_For_Emit) return; + type->flags |= Ast_Flag_Has_Been_Scheduled_For_Emit; + + bh_arr_push(mod->types_enqueued_for_info, type->id); } #include "wasm_intrinsics.h" #include "wasm_type_table.h" +// Need to reset the allocator because it is changed in wasm_type_table.h +#undef BH_INTERNAL_ALLOCATOR +#define BH_INTERNAL_ALLOCATOR (mod->context->gp_alloc) + EMIT_FUNC(function_body, AstFunction* fd) { if (fd->body == NULL) return; @@ -715,7 +736,7 @@ EMIT_FUNC(structured_jump, AstJump* jump) { if (bh_arr_last(code).type != WI_JUMP) WID(jump->token, WI_JUMP, labelidx); } else { - onyx_report_error(jump->token->pos, Error_Critical, "Invalid structured jump."); + ONYX_ERROR(jump->token->pos, Error_Critical, "Invalid structured jump."); } *pcode = code; @@ -757,7 +778,7 @@ EMIT_FUNC_RETURNING(u64, local_allocation, AstTyped* stmt) { // never used, therefore never declaring its type. if (stmt->type == NULL) { assert(stmt->kind == Ast_Kind_Local); - onyx_report_warning(stmt->token->pos, "Unused local variable with unassigned type."); + ONYX_WARNING(stmt->token->pos, "Unused local variable with unassigned type."); return 0; } @@ -851,7 +872,7 @@ EMIT_FUNC(stack_address, u32 offset, OnyxToken *token) { WIL(token, WI_LOCAL_GET, mod->stack_base_idx); - if (offset > 0) { + if (offset != 0) { WIL(token, WI_PTR_CONST, offset); WI(token, WI_PTR_ADD); } @@ -937,13 +958,13 @@ static void flatten_nested_array_literals_for_emit_helper(bh_arr(AstTyped *) *po } else { bh_arr(AstTyped *) output = *poutput; fori (i, 0, elem_count) { - bh_arr_push(output, al->values[i]); + bh_arr_push_unsafe(output, al->values[i]); } *poutput = output; } } -static bh_arr(AstTyped *) flatten_nested_array_literals_for_emit(AstArrayLiteral *al, Type **elem_type) { +static bh_arr(AstTyped *) flatten_nested_array_literals_for_emit(Context *context, AstArrayLiteral *al, Type **elem_type) { u32 ec = 1; Type *et = al->type; @@ -955,7 +976,7 @@ static bh_arr(AstTyped *) flatten_nested_array_literals_for_emit(AstArrayLiteral *elem_type = et; bh_arr(AstTyped *) result = NULL; - bh_arr_new(global_heap_allocator, result, ec); + bh_arr_new(context->gp_alloc, result, ec); flatten_nested_array_literals_for_emit_helper(&result, al); return result; @@ -971,7 +992,7 @@ EMIT_FUNC(assignment_of_array, AstTyped* left, AstTyped* right) { AstArrayLiteral* al = (AstArrayLiteral *) right; Type* elem_type; - bh_arr(AstTyped *) values = flatten_nested_array_literals_for_emit(al, &elem_type); + bh_arr(AstTyped *) values = flatten_nested_array_literals_for_emit(mod->context, al, &elem_type); u32 elem_count = bh_arr_length(values); u32 elem_size = type_size_of(elem_type); @@ -997,7 +1018,7 @@ EMIT_FUNC(assignment_of_array, AstTyped* left, AstTyped* right) { u64 offset = 0; emit_location_return_offset(mod, &code, left, &offset); emit_expression(mod, &code, right); - emit_array_store(mod, &code, rtype, offset); + emit_struct_store(mod, &code, rtype, offset); } *pcode = code; @@ -1040,11 +1061,6 @@ EMIT_FUNC(store_instruction, Type* type, u32 offset) { return; } - if (type->kind == Type_Kind_Array) { - emit_array_store(mod, pcode, type, offset); - return; - } - if (type_is_compound(type)) { emit_compound_store(mod, pcode, type, offset, 0); return; @@ -1062,9 +1078,9 @@ EMIT_FUNC(store_instruction, Type* type, u32 offset) { i32 is_basic = type->kind == Type_Kind_Basic || type->kind == Type_Kind_Pointer || type->kind == Type_Kind_MultiPointer; if (!is_basic) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "Failed to generate store instruction for type '%s'. (compiler bug)", - type_get_name(type)); + type_get_name(mod->context, type)); } if (type->Basic.flags & Basic_Flag_Pointer) { @@ -1209,9 +1225,9 @@ EMIT_FUNC(load_instruction, Type* type, u32 offset) { WID(NULL, instr, ((WasmInstructionData) { alignment, offset })); if (instr == WI_NOP) { - onyx_report_error((OnyxFilePos) { 0 }, Error_Critical, + ONYX_ERROR((OnyxFilePos) { 0 }, Error_Critical, "Failed to generate load instruction for type '%s'.", - type_get_name(type)); + type_get_name(mod->context, type)); } *pcode = code; @@ -1229,7 +1245,7 @@ EMIT_FUNC(if, AstIfWhile* if_node) { } if (if_node->kind == Ast_Kind_Static_If) { - if (static_if_resolution(if_node)) { + if (static_if_resolution(mod->context, if_node)) { if (if_node->true_stmt) emit_block(mod, &code, if_node->true_stmt, 1); } else { if (if_node->false_stmt) emit_block(mod, &code, if_node->false_stmt, 1); @@ -1331,6 +1347,19 @@ EMIT_FUNC(for__prologue, AstFor* for_node, u64 iter_local, i64 index_local) { } WIL(for_node->token, WI_LOCAL_SET, index_local); + + WasmInstruction* increment_instructions = bh_alloc_array(mod->allocator, WasmInstruction, 4); + increment_instructions[0] = (WasmInstruction) { WI_LOCAL_GET, { .l = index_local } }; + if (type_is_small_integer(for_node->index_var->type)) { + increment_instructions[1] = (WasmInstruction) { WI_I32_CONST, { .l = 1 } }; + increment_instructions[2] = (WasmInstruction) { WI_I32_ADD, { .l = 0x00 } }; + } else { + increment_instructions[1] = (WasmInstruction) { WI_I64_CONST, { .l = 1 } }; + increment_instructions[2] = (WasmInstruction) { WI_I64_ADD, { .l = 0x00 } }; + } + increment_instructions[3] = (WasmInstruction) { WI_LOCAL_SET, { .l = index_local } }; + + emit_defer_code(mod, &code, increment_instructions, 4); } *pcode = code; @@ -1373,21 +1402,34 @@ EMIT_FUNC(for_range, AstFor* for_node, u64 iter_local, i64 index_local) { AstStructLiteral *range = (AstStructLiteral *) for_node->iter; u64 offset = 0; + assert(for_node->iter->type); + StructMember high_mem, step_mem; - type_lookup_member(builtin_range_type_type, "high", &high_mem); - type_lookup_member(builtin_range_type_type, "step", &step_mem); + type_lookup_member(mod->context, for_node->iter->type, "high", &high_mem); + type_lookup_member(mod->context, for_node->iter->type, "step", &step_mem); u64 high_local = local_raw_allocate(mod->local_alloc, onyx_type_to_wasm_type(high_mem.type)); u64 step_local = local_raw_allocate(mod->local_alloc, onyx_type_to_wasm_type(step_mem.type)); - emit_struct_as_separate_values(mod, &code, builtin_range_type_type, 0); + emit_struct_as_separate_values(mod, &code, for_node->iter->type, 0); WIL(for_node->token, WI_LOCAL_SET, step_local); WIL(for_node->token, WI_LOCAL_SET, high_local); WIL(for_node->token, WI_LOCAL_SET, iter_local); - emit_for__prologue(mod, &code, for_node, iter_local, index_local); + u64 INT_GE = WI_I32_GE_S; + u64 INT_LT = WI_I32_LT_S; + u64 INT_CONST = WI_I32_CONST; + u64 INT_ADD = WI_I32_ADD; + + if (high_mem.type == mod->context->types.basic[Basic_Kind_I64]) { + INT_GE = WI_I64_GE_S; + INT_LT = WI_I64_LT_S; + INT_CONST = WI_I64_CONST; + INT_ADD = WI_I64_ADD; + } emit_enter_structured_block(mod, &code, SBT_Breakable_Block, for_node->token); + emit_for__prologue(mod, &code, for_node, iter_local, index_local); emit_enter_structured_block(mod, &code, SBT_Basic_Loop, for_node->token); emit_enter_structured_block(mod, &code, SBT_Continue_Block, for_node->token); @@ -1398,28 +1440,28 @@ EMIT_FUNC(for_range, AstFor* for_node, u64 iter_local, i64 index_local) { if (step_value->value.l >= 0) { WIL(for_node->token, WI_LOCAL_GET, iter_local); WIL(for_node->token, WI_LOCAL_GET, high_local); - WI(for_node->token, WI_I32_GE_S); + WI(for_node->token, INT_GE); WID(for_node->token, WI_COND_JUMP, 0x02); } else { WIL(for_node->token, WI_LOCAL_GET, iter_local); WIL(for_node->token, WI_LOCAL_GET, high_local); - WI(for_node->token, WI_I32_LT_S); + WI(for_node->token, INT_LT); WID(for_node->token, WI_COND_JUMP, 0x02); } } else { WIL(for_node->token, WI_LOCAL_GET, step_local); - WID(for_node->token, WI_I32_CONST, 0); - WI(for_node->token, WI_I32_GE_S); + WID(for_node->token, INT_CONST, 0); + WI(for_node->token, INT_GE); WID(for_node->token, WI_IF_START, 0x40); WIL(for_node->token, WI_LOCAL_GET, iter_local); WIL(for_node->token, WI_LOCAL_GET, high_local); - WI(for_node->token, WI_I32_GE_S); + WI(for_node->token, INT_GE); WID(for_node->token, WI_COND_JUMP, 0x03); WI(for_node->token, WI_ELSE); WIL(for_node->token, WI_LOCAL_GET, iter_local); WIL(for_node->token, WI_LOCAL_GET, high_local); - WI(for_node->token, WI_I32_LT_S); + WI(for_node->token, INT_LT); WID(for_node->token, WI_COND_JUMP, 0x03); WI(for_node->token, WI_IF_END); } @@ -1431,7 +1473,7 @@ EMIT_FUNC(for_range, AstFor* for_node, u64 iter_local, i64 index_local) { WIL(for_node->token, WI_LOCAL_GET, iter_local); WIL(for_node->token, WI_LOCAL_GET, step_local); - WI(for_node->token, WI_I32_ADD); + WI(for_node->token, INT_ADD); WIL(for_node->token, WI_LOCAL_SET, iter_local); emit_for__epilogue(mod, &code, for_node, iter_local, index_local); @@ -1479,9 +1521,8 @@ EMIT_FUNC(for_slice, AstFor* for_node, u64 iter_local, i64 index_local) { WI(for_node->token, WI_PTR_ADD); WIL(for_node->token, WI_LOCAL_SET, end_ptr_local); - emit_for__prologue(mod, &code, for_node, iter_local, index_local); - emit_enter_structured_block(mod, &code, SBT_Breakable_Block, for_node->token); + emit_for__prologue(mod, &code, for_node, iter_local, index_local); emit_enter_structured_block(mod, &code, SBT_Basic_Loop, for_node->token); emit_enter_structured_block(mod, &code, SBT_Continue_Block, for_node->token); @@ -1534,8 +1575,8 @@ EMIT_FUNC(for_iterator, AstFor* for_node, u64 iter_local, i64 index_local) { u64 iterator_next_func = local_raw_allocate(mod->local_alloc, WASM_TYPE_FUNC); u64 iterator_close_func = local_raw_allocate(mod->local_alloc, WASM_TYPE_FUNC); u64 iterator_remove_func = local_raw_allocate(mod->local_alloc, WASM_TYPE_FUNC); - u64 iterator_done_bool = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32); - WI(for_node->token, WI_DROP); + u64 iterator_done_res = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); + WI(for_node->token, WI_DROP); // TODO: These need to not be dropped but actually used because they are the closure pointers! WIL(for_node->token, WI_LOCAL_SET, iterator_remove_func); WI(for_node->token, WI_DROP); WIL(for_node->token, WI_LOCAL_SET, iterator_close_func); @@ -1553,27 +1594,26 @@ EMIT_FUNC(for_iterator, AstFor* for_node, u64 iter_local, i64 index_local) { remove_info.iterator_remove_func = iterator_remove_func; StructMember remove_func_type; - type_lookup_member_by_idx(for_node->iter->type, 3, &remove_func_type); + type_lookup_member_by_idx(mod->context, for_node->iter->type, 3, &remove_func_type); remove_info.remove_func_type_idx = generate_type_idx(mod, remove_func_type.type); bh_arr_push(mod->for_remove_info, remove_info); } - emit_for__prologue(mod, &code, for_node, iter_local, index_local); - AstLocal* var = for_node->var; - b32 it_is_local = (b32) ((iter_local & LOCAL_IS_WASM) != 0); - u64 offset = 0; + assert((iter_local & LOCAL_IS_WASM) == 0); // Enter a deferred statement for the auto-close emit_enter_structured_block(mod, &code, SBT_Basic_Block, for_node->token); + emit_for__prologue(mod, &code, for_node, iter_local, index_local); + if (!for_node->no_close) { StructMember close_func_type; - type_lookup_member_by_idx(for_node->iter->type, 2, &close_func_type); + type_lookup_member_by_idx(mod->context, for_node->iter->type, 2, &close_func_type); i32 close_type_idx = generate_type_idx(mod, close_func_type.type); - WasmInstruction* close_instructions = bh_alloc_array(global_heap_allocator, WasmInstruction, 8); + WasmInstruction* close_instructions = bh_alloc_array(mod->context->gp_alloc, WasmInstruction, 8); close_instructions[0] = (WasmInstruction) { WI_LOCAL_GET, { .l = iterator_close_func } }; close_instructions[1] = (WasmInstruction) { WI_I32_CONST, { .l = mod->null_proc_func_idx } }; close_instructions[2] = (WasmInstruction) { WI_I32_NE, { .l = 0x00 } }; @@ -1587,19 +1627,14 @@ EMIT_FUNC(for_iterator, AstFor* for_node, u64 iter_local, i64 index_local) { } emit_enter_structured_block(mod, &code, SBT_Breakable_Block, for_node->token); - emit_enter_structured_block(mod, &code, SBT_Continue_Loop, for_node->token); - - if (!it_is_local) emit_local_location(mod, &code, var, &offset); - - { - WIL(for_node->token, WI_LOCAL_GET, iterator_data_ptr); - WIL(for_node->token, WI_LOCAL_GET, iterator_next_func); + emit_enter_structured_block(mod, &code, SBT_Basic_Loop, for_node->token); + emit_enter_structured_block(mod, &code, SBT_Continue_Block, for_node->token); // CLEANUP: Calling a function is way too f-ing complicated. FACTOR IT!! - u64 stack_top_idx = bh_imap_get(&mod->index_map, (u64) &builtin_stack_top); + u64 stack_top_idx = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.stack_top); StructMember next_func_type; - type_lookup_member_by_idx(for_node->iter->type, 1, &next_func_type); + type_lookup_member_by_idx(mod->context, for_node->iter->type, 1, &next_func_type); Type* return_type = next_func_type.type->Function.return_type; u32 return_size = type_size_of(return_type); @@ -1611,41 +1646,44 @@ EMIT_FUNC(for_iterator, AstFor* for_node, u64 iter_local, i64 index_local) { WID(for_node->token, WI_GLOBAL_GET, stack_top_idx); WID(for_node->token, WI_PTR_CONST, reserve_size); - WI(for_node->token, WI_PTR_ADD); + WI(for_node->token, WI_PTR_SUB); WID(for_node->token, WI_GLOBAL_SET, stack_top_idx); + WIL(for_node->token, WI_LOCAL_GET, iterator_data_ptr); + WIL(for_node->token, WI_LOCAL_GET, iterator_next_func); i32 type_idx = generate_type_idx(mod, next_func_type.type); WID(for_node->token, WI_CALL_INDIRECT, ((WasmInstructionData) { type_idx, 0x00 })); WID(for_node->token, WI_GLOBAL_GET, stack_top_idx); WID(for_node->token, WI_PTR_CONST, reserve_size); - WI(for_node->token, WI_PTR_SUB); + WI(for_node->token, WI_PTR_ADD); WID(for_node->token, WI_GLOBAL_SET, stack_top_idx); - WID(for_node->token, WI_GLOBAL_GET, stack_top_idx); - emit_load_instruction(mod, &code, return_type, reserve_size - return_size); - } - - WIL(for_node->token, WI_LOCAL_SET, iterator_done_bool); + WIL(for_node->token, WI_LOCAL_TEE, iterator_done_res); - if (!it_is_local) emit_store_instruction(mod, &code, var->type, offset); - else WIL(for_node->token, WI_LOCAL_SET, iter_local); - - WIL(for_node->token, WI_LOCAL_GET, iterator_done_bool); + emit_load_instruction(mod, &code, mod->context->types.basic[Basic_Kind_U8], 0); WI(for_node->token, WI_I32_EQZ); - WID(for_node->token, WI_COND_JUMP, 0x01); + WID(for_node->token, WI_COND_JUMP, 0x02); + + u64 offset = 0; + emit_local_location(mod, &code, var, &offset); + WIL(for_node->token, WI_LOCAL_GET, iterator_done_res); + emit_load_instruction(mod, &code, var->type, type_alignment_of(return_type)); + emit_store_instruction(mod, &code, var->type, offset); emit_block(mod, &code, for_node->stmt, 0); + emit_leave_structured_block(mod, &code); // CONTINUE_BLOCK + emit_for__epilogue(mod, &code, for_node, iter_local, index_local); WID(for_node->token, WI_JUMP, 0x00); - emit_leave_structured_block(mod, &code); - emit_leave_structured_block(mod, &code); + emit_leave_structured_block(mod, &code); // BASIC_LOOP + emit_leave_structured_block(mod, &code); // BREAKABLE_BLOCK emit_deferred_stmts(mod, &code); - emit_leave_structured_block(mod, &code); + emit_leave_structured_block(mod, &code); // BASIC_BLOCK bh_arr_pop(mod->for_remove_info); @@ -1654,7 +1692,7 @@ EMIT_FUNC(for_iterator, AstFor* for_node, u64 iter_local, i64 index_local) { local_raw_free(mod->local_alloc, WASM_TYPE_FUNC); local_raw_free(mod->local_alloc, WASM_TYPE_FUNC); local_raw_free(mod->local_alloc, WASM_TYPE_FUNC); - local_raw_free(mod->local_alloc, WASM_TYPE_INT32); + local_raw_free(mod->local_alloc, WASM_TYPE_PTR); *pcode = code; } @@ -1704,7 +1742,7 @@ EMIT_FUNC(for, AstFor* for_node) { case For_Loop_Slice: emit_for_slice(mod, &code, for_node, iter_local, index_local); break; case For_Loop_Iterator: emit_for_iterator(mod, &code, for_node, iter_local, index_local); break; - default: onyx_report_error(for_node->token->pos, Error_Critical, "Invalid for loop type. You should probably not be seeing this..."); + default: ONYX_ERROR(for_node->token->pos, Error_Critical, "Invalid for loop type. You should probably not be seeing this..."); } local_free(mod->local_alloc, (AstTyped *) var); @@ -1717,7 +1755,7 @@ EMIT_FUNC(switch, AstSwitch* switch_node) { bh_arr(WasmInstruction) code = *pcode; bh_imap block_map; - bh_imap_init(&block_map, global_heap_allocator, bh_arr_length(switch_node->cases)); + bh_imap_init(&block_map, mod->context->gp_alloc, bh_arr_length(switch_node->cases)); u64 expr_result_local = 0; if (switch_node->is_expr) { @@ -2011,6 +2049,63 @@ EMIT_FUNC(binop, AstBinaryOp* binop) { return; } + if (binop->operation == Binary_Op_Bool_And) { + emit_enter_structured_block(mod, &code, SBT_Basic_Block, binop->token); + + u64 tmp_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32); + + emit_expression(mod, &code, binop->left); + if (binop->left->type->kind == Type_Kind_Function) { + WI(NULL, WI_DROP); + } + + WIL(NULL, WI_LOCAL_TEE, tmp_local); + WI(NULL, WI_I32_EQZ); + WIL(NULL, WI_COND_JUMP, 0); + + emit_expression(mod, &code, binop->right); + if (binop->right->type->kind == Type_Kind_Function) { + WI(NULL, WI_DROP); + } + + WIL(NULL, WI_LOCAL_SET, tmp_local); + emit_leave_structured_block(mod, &code); + + WIL(NULL, WI_LOCAL_GET, tmp_local); + local_raw_free(mod->local_alloc, WASM_TYPE_INT32); + + *pcode = code; + return; + } + + if (binop->operation == Binary_Op_Bool_Or) { + emit_enter_structured_block(mod, &code, SBT_Basic_Block, binop->token); + + u64 tmp_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32); + + emit_expression(mod, &code, binop->left); + if (binop->left->type->kind == Type_Kind_Function) { + WI(NULL, WI_DROP); + } + + WIL(NULL, WI_LOCAL_TEE, tmp_local); + WIL(NULL, WI_COND_JUMP, 0); + + emit_expression(mod, &code, binop->right); + if (binop->right->type->kind == Type_Kind_Function) { + WI(NULL, WI_DROP); + } + + WIL(NULL, WI_LOCAL_SET, tmp_local); + emit_leave_structured_block(mod, &code); + + WIL(NULL, WI_LOCAL_GET, tmp_local); + local_raw_free(mod->local_alloc, WASM_TYPE_INT32); + + *pcode = code; + return; + } + b32 is_sign_significant = 0; switch (binop->operation) { case Binary_Op_Divide: case Binary_Op_Modulus: @@ -2121,6 +2216,7 @@ EMIT_FUNC(unaryop, AstUnaryOp* unop) { case Unary_Op_Cast: emit_cast(mod, &code, unop); break; case Unary_Op_Try: // Should be handled in operator overload + case Unary_Op_Unwrap: case Unary_Op_Count: break; } @@ -2160,7 +2256,8 @@ EMIT_FUNC(unaryop, AstUnaryOp* unop) { EMIT_FUNC(call, AstCall* call) { bh_arr(WasmInstruction) code = *pcode; - u64 stack_top_idx = bh_imap_get(&mod->index_map, (u64) &builtin_stack_top); + u64 stack_top_idx = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.stack_top); + u64 stack_top_restore_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); u64 stack_top_store_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); OnyxToken* call_token = call->token; @@ -2171,9 +2268,10 @@ EMIT_FUNC(call, AstCall* call) { // in later. u32 reserve_space_patch = bh_arr_length(code); WID(call_token, WI_GLOBAL_GET, stack_top_idx); - WIL(call_token, WI_LOCAL_TEE, stack_top_store_local); + WIL(call_token, WI_LOCAL_TEE, stack_top_restore_local); WID(call_token, WI_PTR_CONST, 0); // This will be filled in later. - WI(call_token, WI_PTR_ADD); + WI(call_token, WI_PTR_SUB); + WIL(call_token, WI_LOCAL_TEE, stack_top_store_local); WID(call_token, WI_GLOBAL_SET, stack_top_idx); u32 reserve_size = 0; @@ -2183,8 +2281,8 @@ EMIT_FUNC(call, AstCall* call) { u32* vararg_any_offsets=NULL; u32* vararg_any_types=NULL; if (call->va_kind == VA_Kind_Any) { - vararg_any_offsets = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(call->args.values)); - vararg_any_types = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(call->args.values)); + vararg_any_offsets = bh_alloc_array(mod->context->scratch_alloc, u32, bh_arr_length(call->args.values)); + vararg_any_types = bh_alloc_array(mod->context->scratch_alloc, u32, bh_arr_length(call->args.values)); } bh_arr_each(AstTyped *, parg, call->args.values) { @@ -2198,19 +2296,17 @@ EMIT_FUNC(call, AstCall* call) { place_on_stack = 1; } + if (arg->pass_as_any || arg->va_kind != VA_Kind_Not_VA) { + place_on_stack = 1; + } + if (arg->va_kind != VA_Kind_Not_VA) { // This is a variadic argument and needs to be written to the stack. If the starting // location of the vararg array hasn't been noted, note it. if (vararg_offset < 0) vararg_offset = reserve_size; - - place_on_stack = 1; vararg_count += 1; } - if (arg->pass_as_any) { - place_on_stack = 1; - } - if (arg->value->kind == Ast_Kind_Struct_Literal && onyx_type_is_stored_in_memory(arg->value->type)) { emit_struct_literal_into_contiguous_memory(mod, &code, (AstStructLiteral *) arg->value, stack_top_store_local, reserve_size); @@ -2233,34 +2329,34 @@ EMIT_FUNC(call, AstCall* call) { if (arg->va_kind == VA_Kind_Any) { vararg_any_offsets[vararg_count - 1] = reserve_size; vararg_any_types[vararg_count - 1] = arg->value->type->id; + ensure_type_has_been_submitted_for_emission(mod, arg->value->type); } reserve_size += type_size_of(arg->value->type); if (arg->pass_as_any) { - Type *any_type = type_build_from_ast(context.ast_alloc, builtin_any_type); + Type *any_type = type_build_from_ast(mod->context, mod->context->builtins.any_type); assert(any_type); - u32 arg_size = type_size_of(any_type); - u64 ugly_temporary = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); WIL(call_token, WI_LOCAL_SET, ugly_temporary); WIL(call_token, WI_LOCAL_GET, stack_top_store_local); WIL(call_token, WI_LOCAL_GET, ugly_temporary); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_Rawptr], reserve_size + 0); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_Rawptr], reserve_size + 0); WIL(call_token, WI_LOCAL_GET, stack_top_store_local); WID(call_token, WI_I32_CONST, arg->value->type->id); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_Type_Index], reserve_size + 4); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_Type_Index], reserve_size + 4); + ensure_type_has_been_submitted_for_emission(mod, arg->value->type); local_raw_free(mod->local_alloc, WASM_TYPE_PTR); WIL(call_token, WI_LOCAL_GET, stack_top_store_local); - WIL(call_token, WI_I32_CONST, reserve_size); - WI(call_token, WI_I32_ADD); + WIL(call_token, WI_PTR_CONST, reserve_size); + WI(call_token, WI_PTR_ADD); - reserve_size += arg_size; + reserve_size += type_size_of(any_type); } } } @@ -2269,18 +2365,18 @@ EMIT_FUNC(call, AstCall* call) { case VA_Kind_Any: { vararg_offset = reserve_size; - i32 any_size = type_size_of(type_build_from_ast(context.ast_alloc, builtin_any_type)); + i32 any_size = type_size_of(type_build_from_ast(mod->context, mod->context->builtins.any_type)); fori (i, 0, vararg_count) { WIL(call_token, WI_LOCAL_GET, stack_top_store_local); WIL(call_token, WI_LOCAL_GET, stack_top_store_local); WID(call_token, WI_PTR_CONST, vararg_any_offsets[i]); WI(call_token, WI_PTR_ADD); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_Rawptr], vararg_offset + i * any_size); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_Rawptr], reserve_size); WIL(call_token, WI_LOCAL_GET, stack_top_store_local); WID(call_token, WI_I32_CONST, vararg_any_types[i]); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_Type_Index], vararg_offset + i * any_size + POINTER_SIZE); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_Type_Index], reserve_size + POINTER_SIZE); reserve_size += any_size; } @@ -2305,13 +2401,13 @@ EMIT_FUNC(call, AstCall* call) { WID(call_token, WI_PTR_CONST, vararg_offset); WI(call_token, WI_PTR_ADD); } - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_Rawptr], reserve_size); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_Rawptr], reserve_size); // NOTE: There may be 4 uninitialized bytes here, because pointers are only 4 bytes in WASM. WIL(call_token, WI_LOCAL_GET, stack_top_store_local); WID(call_token, WI_I32_CONST, vararg_count); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_I32], reserve_size + POINTER_SIZE); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_I32], reserve_size + POINTER_SIZE); WIL(call_token, WI_LOCAL_GET, stack_top_store_local); if (reserve_size > 0) { @@ -2334,14 +2430,23 @@ EMIT_FUNC(call, AstCall* call) { u32 return_size = type_size_of(return_type); assert(return_size % type_alignment_of(return_type) == 0); - if (cc == CC_Return_Stack) reserve_size += return_size; + u32 return_location = 0; + if (cc == CC_Return_Stack) { + return_location = reserve_size; + + WIL(call_token, WI_LOCAL_GET, stack_top_store_local); + WID(call_token, WI_PTR_CONST, return_location); + WI(call_token, WI_PTR_ADD); - if (context.options->stack_trace_enabled) { + reserve_size += return_size; + } + + if (mod->context->options->stack_trace_enabled) { emit_stack_address(mod, &code, mod->stack_trace_idx, NULL); WIL(NULL, WI_I32_CONST, call->token->pos.line); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_U32], 8); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_U32], 8); - u64 stack_trace_pass_global = bh_imap_get(&mod->index_map, (u64) &builtin_stack_trace); + u64 stack_trace_pass_global = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.stack_trace); emit_stack_address(mod, &code, mod->stack_trace_idx, NULL); WIL(NULL, WI_GLOBAL_SET, stack_trace_pass_global); } @@ -2356,12 +2461,12 @@ EMIT_FUNC(call, AstCall* call) { WIL(NULL, WI_CALL, 0); // This will be patched later. - ensure_node_has_been_submitted_for_emission((AstNode *) call->callee); + ensure_node_has_been_submitted_for_emission(mod->context, (AstNode *) call->callee); } else { emit_expression(mod, &code, call->callee); - u64 global_closure_base_idx = bh_imap_get(&mod->index_map, (u64) &builtin_closure_base); + u64 global_closure_base_idx = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.closure_base); WIL(NULL, WI_GLOBAL_SET, global_closure_base_idx); i32 type_idx = generate_type_idx(mod, call->callee->type); @@ -2369,21 +2474,22 @@ EMIT_FUNC(call, AstCall* call) { } if (reserve_size > 0) { - WIL(call_token, WI_LOCAL_GET, stack_top_store_local); + WIL(call_token, WI_LOCAL_GET, stack_top_restore_local); WID(call_token, WI_GLOBAL_SET, stack_top_idx); bh_align(reserve_size, 16); code[reserve_space_patch + 2].data.l = reserve_size; } else { - fori (i, 0, 5) code[reserve_space_patch + i].type = WI_NOP; + fori (i, 0, 6) code[reserve_space_patch + i].type = WI_NOP; } if (cc == CC_Return_Stack) { - WID(call_token, WI_GLOBAL_GET, stack_top_idx); - emit_load_with_ignored_instruction(mod, &code, return_type, reserve_size - return_size, call->ignored_return_value_count); + WIL(call_token, WI_LOCAL_GET, stack_top_store_local); + emit_load_with_ignored_instruction(mod, &code, return_type, return_location, call->ignored_return_value_count); } + local_raw_free(mod->local_alloc, WASM_TYPE_PTR); local_raw_free(mod->local_alloc, WASM_TYPE_PTR); *pcode = code; } @@ -2417,7 +2523,7 @@ EMIT_FUNC(method_call, AstBinaryOp *mcall) { // // Create a local variable to store the result of the lookup. - AstLocal *tmp_local = make_local_with_type(context.ast_alloc, NULL, (*object)->type); + AstLocal *tmp_local = make_local_with_type(mod->context, NULL, (*object)->type); tmp_local->flags |= Ast_Flag_Decl_Followed_By_Init; u64 tmp_local_idx = emit_local_allocation(mod, &code, (AstTyped *) tmp_local); b32 tmp_is_wasm_local = (b32) ((tmp_local_idx & LOCAL_IS_WASM) != 0); @@ -2442,7 +2548,7 @@ EMIT_FUNC(method_call, AstBinaryOp *mcall) { // of the local variable. AstArgument *first_arg = (AstArgument *) call_node->args.values[0]; if (first_arg->value->kind == Ast_Kind_Address_Of && ((AstAddressOf *) first_arg->value)->can_be_removed) { - first_arg->value = (AstTyped *) make_address_of(context.ast_alloc, (AstTyped *) tmp_local); + first_arg->value = (AstTyped *) make_address_of(mod->context, (AstTyped *) tmp_local); } else { first_arg->value = (AstTyped *) tmp_local; } @@ -2463,7 +2569,7 @@ EMIT_FUNC(method_call, AstBinaryOp *mcall) { bh_arr(AstArgument *) arg_arr = (bh_arr(AstArgument *)) call->args.values; \ fori (i, 0, count) { \ if (arg_arr[i]->value->kind != Ast_Kind_NumLit) { \ - onyx_report_error(arg_arr[i]->token->pos, Error_Critical, \ + ONYX_ERROR(arg_arr[i]->token->pos, Error_Critical, \ "SIMD constants expect compile time constants as parameters. The %d%s parameter was not.", \ i, bh_num_suffix(i)); \ *pcode = code; \ @@ -2477,7 +2583,7 @@ EMIT_FUNC(method_call, AstBinaryOp *mcall) { #define SIMD_EXTRACT_LANE_INSTR(instr, arg_arr) \ emit_expression(mod, &code, arg_arr[0]->value);\ if (arg_arr[1]->value->kind != Ast_Kind_NumLit) { \ - onyx_report_error(arg_arr[1]->token->pos, Error_Critical, "SIMD lane instructions expect a compile time lane number."); \ + ONYX_ERROR(arg_arr[1]->token->pos, Error_Critical, "SIMD lane instructions expect a compile time lane number."); \ *pcode = code; \ return; \ } \ @@ -2486,7 +2592,7 @@ EMIT_FUNC(method_call, AstBinaryOp *mcall) { #define SIMD_REPLACE_LANE_INSTR(instr, arg_arr) { \ emit_expression(mod, &code, arg_arr[0]->value);\ if (arg_arr[1]->value->kind != Ast_Kind_NumLit) { \ - onyx_report_error(arg_arr[1]->token->pos, Error_Critical, "SIMD lane instructions expect a compile time lane number."); \ + ONYX_ERROR(arg_arr[1]->token->pos, Error_Critical, "SIMD lane instructions expect a compile time lane number."); \ *pcode = code; \ return; \ } \ @@ -2605,7 +2711,7 @@ EMIT_FUNC(intrinsic_call, AstCall* call) { bh_arr(AstArgument *) arg_arr = (bh_arr(AstArgument *)) call->args.values; fori (i, 0, 4) { if (arg_arr[i]->value->kind != Ast_Kind_NumLit) { - onyx_report_error(arg_arr[i]->token->pos, Error_Critical, + ONYX_ERROR(arg_arr[i]->token->pos, Error_Critical, "SIMD constants expect compile time constants as parameters. The %d%s parameter was not.", i, bh_num_suffix(i)); *pcode = code; @@ -2622,7 +2728,7 @@ EMIT_FUNC(intrinsic_call, AstCall* call) { bh_arr(AstArgument *) arg_arr = (bh_arr(AstArgument *)) call->args.values; fori (i, 0, 2) { if (arg_arr[i]->value->kind != Ast_Kind_NumLit) { - onyx_report_error(arg_arr[i]->token->pos, Error_Critical, + ONYX_ERROR(arg_arr[i]->token->pos, Error_Critical, "SIMD constants expect compile time constants as parameters. The %d%s parameter was not.", i, bh_num_suffix(i)); *pcode = code; @@ -2645,7 +2751,7 @@ EMIT_FUNC(intrinsic_call, AstCall* call) { fori (i, 0, 16) { if (arg_arr[i + 2]->value->kind != Ast_Kind_NumLit) { - onyx_report_error(arg_arr[i + 2]->token->pos, Error_Critical, + ONYX_ERROR(arg_arr[i + 2]->token->pos, Error_Critical, "SIMD constants expect compile time constants as parameters. The %d%s parameter was not.", i, bh_num_suffix(i)); *pcode = code; @@ -2958,20 +3064,24 @@ EMIT_FUNC(field_access_location, AstFieldAccess* field, u64* offset_return) { source_expr = (AstTyped *) ((AstFieldAccess *) source_expr)->expr; } - if (source_expr->kind == Ast_Kind_Subscript - && source_expr->type->kind != Type_Kind_Pointer && source_expr->type->kind != Type_Kind_MultiPointer) { + b32 is_pointer = source_expr->type->kind == Type_Kind_Pointer || source_expr->type->kind == Type_Kind_MultiPointer; + + if (source_expr->kind == Ast_Kind_Subscript && !is_pointer) { u64 o2 = 0; emit_subscript_location(mod, &code, (AstSubscript *) source_expr, &o2); offset += o2; - } else if ((source_expr->kind == Ast_Kind_Local || source_expr->kind == Ast_Kind_Param) - && source_expr->type->kind != Type_Kind_Pointer && source_expr->type->kind != Type_Kind_MultiPointer) { + } else if ((source_expr->kind == Ast_Kind_Local || source_expr->kind == Ast_Kind_Param) && !is_pointer) { u64 o2 = 0; emit_local_location(mod, &code, (AstLocal *) source_expr, &o2); offset += o2; - } else if (source_expr->kind == Ast_Kind_Memres - && source_expr->type->kind != Type_Kind_Pointer && source_expr->type->kind != Type_Kind_MultiPointer) { + } else if (source_expr->kind == Ast_Kind_Capture_Local && !is_pointer) { + u64 o2 = 0; + emit_capture_local_location(mod, &code, (AstCaptureLocal *) source_expr, &o2); + offset += o2; + + } else if (source_expr->kind == Ast_Kind_Memres && !is_pointer) { emit_memory_reservation_location(mod, &code, (AstMemRes *) source_expr); } else { @@ -2986,10 +3096,10 @@ EMIT_FUNC(field_access_location, AstFieldAccess* field, u64* offset_return) { EMIT_FUNC(memory_reservation_location, AstMemRes* memres) { bh_arr(WasmInstruction) code = *pcode; - ensure_node_has_been_submitted_for_emission((AstNode *) memres); + ensure_node_has_been_submitted_for_emission(mod->context, (AstNode *) memres); if (memres->threadlocal) { - u64 tls_base_idx = bh_imap_get(&mod->index_map, (u64) &builtin_tls_base); + u64 tls_base_idx = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.tls_base); CodePatchInfo code_patch; code_patch.kind = Code_Patch_Tls_Offset; @@ -3048,7 +3158,7 @@ EMIT_FUNC(compound_load, Type* type, u64 offset, i32 ignored_value_count) { mem_count -= ignored_value_count; if (mem_count == 1) { - type_linear_member_lookup(type, 0, &two); + type_linear_member_lookup(mod->context, type, 0, &two); emit_load_instruction(mod, &code, two.type, offset + two.offset); // two.offset should be 0 } else { @@ -3056,7 +3166,7 @@ EMIT_FUNC(compound_load, Type* type, u64 offset, i32 ignored_value_count) { WIL(NULL, WI_LOCAL_TEE, tmp_idx); fori (i, 0, mem_count) { - type_linear_member_lookup(type, i, &two); + type_linear_member_lookup(mod->context, type, i, &two); if (i != 0) WIL(NULL, WI_LOCAL_GET, tmp_idx); emit_load_instruction(mod, &code, two.type, offset + two.offset); } @@ -3077,10 +3187,10 @@ EMIT_FUNC(compound_store, Type* type, u64 offset, b32 location_first) { if (location_first) WIL(NULL, WI_LOCAL_SET, loc_idx); i32 elem_count = type_linear_member_count(type); - u64 *temp_locals = bh_alloc_array(global_scratch_allocator, u64, elem_count); + u64 *temp_locals = bh_alloc_array(mod->context->scratch_alloc, u64, elem_count); forir (i, elem_count - 1, 0) { - type_linear_member_lookup(type, i, &two); + type_linear_member_lookup(mod->context, type, i, &two); WasmType wt = onyx_type_to_wasm_type(two.type); if (wt != WASM_TYPE_VOID) { @@ -3092,7 +3202,7 @@ EMIT_FUNC(compound_store, Type* type, u64 offset, b32 location_first) { if (!location_first) WIL(NULL, WI_LOCAL_SET, loc_idx); fori (i, 0, elem_count) { - type_linear_member_lookup(type, i, &two); + type_linear_member_lookup(mod->context, type, i, &two); WasmType wt = onyx_type_to_wasm_type(two.type); if (wt != WASM_TYPE_VOID) { @@ -3108,7 +3218,7 @@ EMIT_FUNC(compound_store, Type* type, u64 offset, b32 location_first) { local_raw_free(mod->local_alloc, WASM_TYPE_PTR); // This shouldn't be necessary because the scratch allocator doesn't free. - bh_free(global_scratch_allocator, temp_locals); + bh_free(mod->context->scratch_alloc, temp_locals); *pcode = code; } @@ -3116,7 +3226,7 @@ EMIT_FUNC(compound_store, Type* type, u64 offset, b32 location_first) { EMIT_FUNC(wasm_copy, OnyxToken *token) { bh_arr(WasmInstruction) code = *pcode; - if (context.options->use_post_mvp_features) { + if (mod->context->options->use_post_mvp_features) { WIL(token, WI_MEMORY_COPY, 0x00); } else { emit_intrinsic_memory_copy(mod, &code); @@ -3128,7 +3238,7 @@ EMIT_FUNC(wasm_copy, OnyxToken *token) { EMIT_FUNC(wasm_fill, OnyxToken *token) { bh_arr(WasmInstruction) code = *pcode; - if (context.options->use_post_mvp_features) { + if (mod->context->options->use_post_mvp_features) { WID(token, WI_MEMORY_FILL, 0x00); } else { emit_intrinsic_memory_fill(mod, &code); @@ -3143,38 +3253,35 @@ EMIT_FUNC(wasm_memory_equal, OnyxToken *token) { *pcode = code; } -EMIT_FUNC(values_into_contiguous_memory, u64 base_ptr_local, Type *type, u32 offset, i32 value_count, AstTyped **values) { +EMIT_FUNC(values_into_contiguous_memory, u64 base_ptr_local, u32 offset, u32 value_count, ValueWithOffset *values) { bh_arr(WasmInstruction) code = *pcode; - assert(onyx_type_is_stored_in_memory(type)); - assert(value_count == (i32) type_structlike_mem_count(type)); - - StructMember smem; fori (i, 0, value_count) { - type_lookup_member_by_idx(type, i, &smem); + AstTyped *value = values[i].value; + u32 value_offset = values[i].offset; // When emitting a structure literal into memory, simply place it directly into the memory. // Otherwise, the structure literal would be placed somewhere in memory, and then needlessly // copied to its final destination. - if (values[i]->kind == Ast_Kind_Struct_Literal && onyx_type_is_stored_in_memory(values[i]->type)) { - emit_struct_literal_into_contiguous_memory(mod, &code, (AstStructLiteral *) values[i], base_ptr_local, smem.offset + offset); + if (value->kind == Ast_Kind_Struct_Literal && onyx_type_is_stored_in_memory(value->type)) { + emit_struct_literal_into_contiguous_memory(mod, &code, (AstStructLiteral *) value, base_ptr_local, value_offset + offset); // When emitting a zero-value, simple zero the bytes in memory. Otherwise you run into the // same problem described above. - } else if (values[i]->kind == Ast_Kind_Zero_Value && onyx_type_is_stored_in_memory(values[i]->type)) { + } else if (value->kind == Ast_Kind_Zero_Value && onyx_type_is_stored_in_memory(value->type)) { WIL(NULL, WI_LOCAL_GET, base_ptr_local); - WIL(NULL, WI_PTR_CONST, smem.offset + offset); + WIL(NULL, WI_PTR_CONST, value_offset + offset); WI(NULL, WI_PTR_ADD); WIL(NULL, WI_I32_CONST, 0); - WIL(NULL, WI_I32_CONST, type_size_of(values[i]->type)); + WIL(NULL, WI_I32_CONST, type_size_of(value->type)); emit_wasm_fill(mod, &code, NULL); } else { WIL(NULL, WI_LOCAL_GET, base_ptr_local); - emit_expression(mod, &code, values[i]); - emit_store_instruction(mod, &code, values[i]->type, smem.offset + offset); + emit_expression(mod, &code, value); + emit_store_instruction(mod, &code, value->type, value_offset + offset); } } @@ -3182,15 +3289,15 @@ EMIT_FUNC(values_into_contiguous_memory, u64 base_ptr_local, Type *type, u32 off } EMIT_FUNC(struct_literal_into_contiguous_memory, AstStructLiteral* sl, u64 base_ptr_local, u32 offset) { - emit_values_into_contiguous_memory(mod, pcode, base_ptr_local, sl->type, offset, bh_arr_length(sl->args.values), sl->args.values); + emit_values_into_contiguous_memory(mod, pcode, base_ptr_local, offset, bh_arr_length(sl->values_to_initialize), sl->values_to_initialize); } EMIT_FUNC(struct_literal, AstStructLiteral* sl) { bh_arr(WasmInstruction) code = *pcode; if (!onyx_type_is_stored_in_memory(sl->type)) { - bh_arr_each(AstTyped *, val, sl->args.values) { - emit_expression(mod, &code, *val); + bh_arr_each(ValueWithOffset, val, sl->values_to_initialize) { + emit_expression(mod, &code, val->value); } *pcode = code; @@ -3244,7 +3351,7 @@ EMIT_FUNC(struct_as_separate_values, Type *type, u32 offset) { StructMember smem; fori (i, 0, mem_count) { - type_lookup_member_by_idx(type, i, &smem); + type_lookup_member_by_idx(mod->context, type, i, &smem); WIL(NULL, WI_LOCAL_GET, value_location); emit_load_instruction(mod, &code, smem.type, offset + smem.offset); @@ -3260,62 +3367,6 @@ EMIT_FUNC(struct_as_separate_values, Type *type, u32 offset) { return; } -EMIT_FUNC(array_store, Type* type, u32 offset) { - assert(type->kind == Type_Kind_Array); - bh_arr(WasmInstruction) code = *pcode; - - Type* elem_type = type; - u32 elem_count = 1; - while (elem_type->kind == Type_Kind_Array) { - elem_count *= elem_type->Array.count; - elem_type = elem_type->Array.elem; - } - u32 elem_size = type_size_of(elem_type); - - u64 lptr_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); - u64 rptr_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); - WIL(NULL, WI_LOCAL_SET, rptr_local); - WIL(NULL, WI_LOCAL_SET, lptr_local); - - WIL(NULL, WI_LOCAL_GET, rptr_local); - WID(NULL, WI_I32_CONST, 0); - WI(NULL, WI_I32_NE); - emit_enter_structured_block(mod, &code, SBT_Basic_If, NULL); - - { - WIL(NULL, WI_LOCAL_GET, lptr_local); - if (offset != 0) { - WIL(NULL, WI_PTR_CONST, offset); - WI(NULL, WI_PTR_ADD); - } - - WIL(NULL, WI_LOCAL_GET, rptr_local); - WIL(NULL, WI_I32_CONST, elem_count * elem_size); - emit_wasm_copy(mod, &code, NULL); - } - - WI(NULL, WI_ELSE); - - { // If the source ptr is null (0), then just copy in 0 bytes. - WIL(NULL, WI_LOCAL_GET, lptr_local); - if (offset != 0) { - WIL(NULL, WI_PTR_CONST, offset); - WI(NULL, WI_PTR_ADD); - } - - WIL(NULL, WI_I32_CONST, 0); - WIL(NULL, WI_I32_CONST, elem_count * elem_size); - emit_wasm_fill(mod, &code, NULL); - } - - local_raw_free(mod->local_alloc, WASM_TYPE_PTR); - local_raw_free(mod->local_alloc, WASM_TYPE_PTR); - - emit_leave_structured_block(mod, &code); - *pcode = code; - return; -} - EMIT_FUNC(array_literal, AstArrayLiteral* al) { bh_arr(WasmInstruction) code = *pcode; @@ -3332,12 +3383,7 @@ EMIT_FUNC(array_literal, AstArrayLiteral* al) { emit_store_instruction(mod, &code, al->type->Array.elem, local_offset + i * elem_size); } - WIL(al->token, WI_LOCAL_GET, mod->stack_base_idx); - if (local_offset > 0) { - WIL(al->token, WI_PTR_CONST, local_offset); - WI(al->token, WI_PTR_ADD); - } - + emit_stack_address(mod, &code, local_offset, al->token); *pcode = code; } @@ -3347,17 +3393,17 @@ EMIT_FUNC(range_literal, AstRangeLiteral* range) { u64 local_offset = emit_local_allocation(mod, &code, (AstTyped *) range); assert((local_offset & LOCAL_IS_WASM) == 0); - AstTyped *values[] = { range->low, range->high, range->step }; - emit_values_into_contiguous_memory(mod, &code, mod->stack_base_idx, - range->type, local_offset, 3, values); + i32 elem_size = range->type->Struct.alignment; - WIL(range->token, WI_LOCAL_GET, mod->stack_base_idx); + ValueWithOffset values[] = { + range->low, elem_size * 0, + range->high, elem_size * 1, + range->step, elem_size * 2, + }; - if (local_offset > 0) { - WIL(NULL, WI_PTR_CONST, local_offset); - WI(NULL, WI_PTR_ADD); - } + emit_values_into_contiguous_memory(mod, &code, mod->stack_base_idx, local_offset, 3, values); + emit_stack_address(mod, &code, local_offset, range->token); *pcode = code; } @@ -3366,10 +3412,10 @@ EMIT_FUNC_NO_ARGS(load_slice) { u64 ugly_temporary = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); WIL(NULL, WI_LOCAL_TEE, ugly_temporary); - emit_load_instruction(mod, &code, &basic_types[Basic_Kind_Rawptr], 0); + emit_load_instruction(mod, &code, mod->context->types.basic[Basic_Kind_Rawptr], 0); WIL(NULL, WI_LOCAL_GET, ugly_temporary); - emit_load_instruction(mod, &code, &basic_types[Basic_Kind_I32], POINTER_SIZE); + emit_load_instruction(mod, &code, mod->context->types.basic[Basic_Kind_I32], POINTER_SIZE); local_raw_free(mod->local_alloc, WASM_TYPE_PTR); @@ -3487,10 +3533,10 @@ EMIT_FUNC(location_return_offset, AstTyped* expr, u64* offset_return) { default: { if (expr->token) { - onyx_report_error(expr->token->pos, Error_Critical, "Unable to generate location for '%s'.", onyx_ast_node_kind_string(expr->kind)); + ONYX_ERROR(expr->token->pos, Error_Critical, "Unable to generate location for '%s'.", onyx_ast_node_kind_string(expr->kind)); } else { OnyxFilePos pos = {0}; - onyx_report_error(pos, Error_Critical, "Unable to generate location for '%s'.", onyx_ast_node_kind_string(expr->kind)); + ONYX_ERROR(pos, Error_Critical, "Unable to generate location for '%s'.", onyx_ast_node_kind_string(expr->kind)); } break; } @@ -3520,12 +3566,17 @@ EMIT_FUNC(expression, AstTyped* expr) { if (type->flags & Ast_Flag_Expr_Ignored) return; if (type->type_id != 0) { - WID(NULL, WI_I32_CONST, ((AstType *) expr)->type_id); + WID(NULL, WI_I32_CONST, type->type_id); + + Type *t = type_lookup_by_id(mod->context, type->type_id); + assert(t); + ensure_type_has_been_submitted_for_emission(mod, t); } else { - Type* t = type_build_from_ast(context.ast_alloc, type); + Type *t = type_build_from_ast(mod->context, type); WID(NULL, WI_I32_CONST, t->id); - } + ensure_type_has_been_submitted_for_emission(mod, t); + } *pcode = code; return; @@ -3620,7 +3671,7 @@ EMIT_FUNC(expression, AstTyped* expr) { case Ast_Kind_StrLit: { // :ProperLinking AstStrLit *strlit = (AstStrLit *) expr; - ensure_node_has_been_submitted_for_emission((AstNode *) strlit); + ensure_node_has_been_submitted_for_emission(mod->context, (AstNode *) strlit); emit_data_relocation_for_node(mod, &code, (AstNode *) strlit); if (strlit->is_cstr == 0) { @@ -3670,18 +3721,18 @@ EMIT_FUNC(expression, AstTyped* expr) { code_patch.kind = Code_Patch_Callee; code_patch.func_idx = mod->current_func_idx; code_patch.instr = bh_arr_length(code); - code_patch.node_related_to_patch = (AstNode *) builtin_closure_block_allocate; + code_patch.node_related_to_patch = (AstNode *) mod->context->builtins.closure_block_allocate; bh_arr_push(mod->code_patches, code_patch); WIL(NULL, WI_CALL, 0); - ensure_node_has_been_submitted_for_emission((AstNode *) builtin_closure_block_allocate); + ensure_node_has_been_submitted_for_emission(mod->context, (AstNode *) mod->context->builtins.closure_block_allocate); u64 capture_block_ptr = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); WIL(NULL, WI_LOCAL_TEE, capture_block_ptr); WIL(NULL, WI_LOCAL_GET, capture_block_ptr); WIL(NULL, WI_I32_CONST, func->captures->total_size_in_bytes); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_U32], 0); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_U32], 0); // Populate the block bh_arr_each(AstCaptureLocal *, capture, func->captures->captures) { @@ -3762,13 +3813,13 @@ EMIT_FUNC(expression, AstTyped* expr) { emit_expression(mod, &code, field->expr); u64 source_base_ptr = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); WIL(NULL, WI_LOCAL_TEE, source_base_ptr); - emit_load_instruction(mod, &code, &basic_types[Basic_Kind_U32], 0); + emit_load_instruction(mod, &code, field->type->Union.tag_type, 0); WIL(NULL, WI_I32_CONST, field->idx); WI(NULL, WI_I32_EQ); emit_enter_structured_block(mod, &code, SBT_Basic_If, field->token); emit_stack_address(mod, &code, intermediate_local, field->token); WIL(NULL, WI_I32_CONST, 1); // 1 is Some - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_I32], 0); + emit_store_instruction(mod, &code, field->type->Union.tag_type, 0); emit_stack_address(mod, &code, intermediate_local + type_alignment_of(field->type), field->token); WIL(NULL, WI_LOCAL_GET, source_base_ptr); @@ -3787,7 +3838,7 @@ EMIT_FUNC(expression, AstTyped* expr) { WI(NULL, WI_ELSE); emit_stack_address(mod, &code, intermediate_local, field->token); WIL(NULL, WI_I32_CONST, 0); // 0 is None - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_I32], 0); + emit_store_instruction(mod, &code, field->type->Union.tag_type, 0); emit_leave_structured_block(mod, &code); local_raw_free(mod->local_alloc, WASM_TYPE_PTR); @@ -3822,12 +3873,12 @@ EMIT_FUNC(expression, AstTyped* expr) { // that I cannot find a good way to factor them all without just introducing a ton of complexity. fori (i, 0, total_linear_members - idx - field_linear_members) WI(NULL, WI_DROP); - u64 *temporaries = bh_alloc_array(global_scratch_allocator, u64, field_linear_members); + u64 *temporaries = bh_alloc_array(mod->context->scratch_alloc, u64, field_linear_members); fori (i, 0, field_linear_members) temporaries[i] = 0; TypeWithOffset two = { 0 }; forir (i, field_linear_members - 1, 0) { - type_linear_member_lookup(field->type, i, &two); + type_linear_member_lookup(mod->context, field->type, i, &two); WasmType wt = onyx_type_to_wasm_type(two.type); temporaries[i] = local_raw_allocate(mod->local_alloc, wt); @@ -3837,7 +3888,7 @@ EMIT_FUNC(expression, AstTyped* expr) { fori (i, 0, idx) WI(NULL, WI_DROP); fori (i, 0, field_linear_members) { - type_linear_member_lookup(field->type, i, &two); + type_linear_member_lookup(mod->context, field->type, i, &two); WIL(NULL, WI_LOCAL_GET, temporaries[i]); @@ -3845,7 +3896,7 @@ EMIT_FUNC(expression, AstTyped* expr) { local_raw_free(mod->local_alloc, wt); } - bh_free(global_scratch_allocator, temporaries); + bh_free(mod->context->scratch_alloc, temporaries); } } @@ -3899,7 +3950,7 @@ EMIT_FUNC(expression, AstTyped* expr) { WasmType backing_type = onyx_type_to_wasm_type(ev->type); if (backing_type == WASM_TYPE_INT32) WID(NULL, WI_I32_CONST, num->value.i); else if (backing_type == WASM_TYPE_INT64) WID(NULL, WI_I64_CONST, num->value.l); - else onyx_report_error(ev->token->pos, Error_Critical, "Invalid backing type for enum."); + else ONYX_ERROR(ev->token->pos, Error_Critical, "Invalid backing type for enum."); break; } @@ -3914,7 +3965,7 @@ EMIT_FUNC(expression, AstTyped* expr) { AstFileContents* fc = (AstFileContents *) expr; // :ProperLinking - ensure_node_has_been_submitted_for_emission((AstNode *) fc); + ensure_node_has_been_submitted_for_emission(mod->context, (AstNode *) fc); emit_data_relocation_for_node(mod, &code, (AstNode *) fc); CodePatchInfo code_patch; @@ -3943,17 +3994,14 @@ EMIT_FUNC(expression, AstTyped* expr) { u64 local_offset = emit_local_allocation(mod, &code, (AstTyped *) callsite); assert((local_offset & LOCAL_IS_WASM) == 0); - AstTyped *values[] = { (AstTyped *) callsite->filename, (AstTyped *) callsite->line, (AstTyped *) callsite->column }; - emit_values_into_contiguous_memory(mod, &code, mod->stack_base_idx, - callsite->type, local_offset, 3, values); - - WIL(NULL, WI_LOCAL_GET, mod->stack_base_idx); - - if (local_offset > 0) { - WIL(NULL, WI_PTR_CONST, local_offset); - WI(NULL, WI_PTR_ADD); - } + ValueWithOffset values[] = { + (AstTyped *) callsite->filename, 0, + (AstTyped *) callsite->line, 2 * POINTER_SIZE, + (AstTyped *) callsite->column, 3 * POINTER_SIZE + }; + emit_values_into_contiguous_memory(mod, &code, mod->stack_base_idx, local_offset, 3, values); + emit_stack_address(mod, &code, local_offset, NULL); break; } @@ -3966,14 +4014,14 @@ EMIT_FUNC(expression, AstTyped* expr) { case Ast_Kind_Switch_Case: { // This error message should be moved to checking, but this is the // best place to do it right now. - onyx_report_error(expr->token->pos, Error_Critical, "'case' statements are only allowed in a 'switch' statement."); + ONYX_ERROR(expr->token->pos, Error_Critical, "'case' statements are only allowed in a 'switch' statement."); break; } case Ast_Kind_Code_Block: { // Like above, this error message should be moved to checking, but // this is the best place to do it right now. - onyx_report_error(expr->token->pos, Error_Critical, "'#quote' blocks are only to be used at compile-time. Using them as a runtime value is not allowed."); + ONYX_ERROR(expr->token->pos, Error_Critical, "'#quote' blocks are only to be used at compile-time. Using them as a runtime value is not allowed."); break; } @@ -4176,8 +4224,12 @@ EMIT_FUNC(return, AstReturn* ret) { AstLocal* result_destination = NULL; i64 jump_label = get_structured_jump_label(mod, Jump_Type_Return, ret->count + 1); + if (ret->from_proc) { + jump_label = -1; + } + // - // If this is return statement if an inner return of a `do` block, + // If this is return statement of an inner return of a `do` block, // we have to get the result destination out of the return location stack. // This can be computed as the -ret->count element. // @@ -4197,10 +4249,8 @@ EMIT_FUNC(return, AstReturn* ret) { emit_generic_store_instruction(mod, &code, (AstTyped *) result_destination, NULL); } else if (mod->curr_cc == CC_Return_Stack) { - WIL(NULL, WI_LOCAL_GET, mod->stack_base_idx); - WID(NULL, WI_I32_CONST, type_size_of(ret->expr->type)); - WI(NULL, WI_I32_SUB); - + assert(mod->stack_return_location_idx); + WIL(NULL, WI_LOCAL_GET, mod->stack_return_location_idx); emit_expression(mod, &code, ret->expr); emit_store_instruction(mod, &code, ret->expr->type, 0); @@ -4222,7 +4272,6 @@ EMIT_FUNC(return, AstReturn* ret) { b32 need_to_copy_to_separate_buffer_to_avoid_corrupted_from_deferred_calls = 0; u64 return_value_buffer; if (onyx_type_is_stored_in_memory(ret->expr->type) - && !is_lval((AstNode *) ret->expr) && bh_arr_length(mod->deferred_stmts) > 0) { need_to_copy_to_separate_buffer_to_avoid_corrupted_from_deferred_calls = 1; @@ -4272,21 +4321,23 @@ EMIT_FUNC(stack_enter, u64 stacksize) { bh_align(stacksize, 16); - u64 stack_top_idx = bh_imap_get(&mod->index_map, (u64) &builtin_stack_top); + u64 stack_top_idx = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.stack_top); - // HACK: slightly... There will be space for 5 instructions + // HACK: slightly... There will be space for 6 instructions if (stacksize == 0) { code[0] = (WasmInstruction) { WI_GLOBAL_GET, { .l = stack_top_idx } }; - code[1] = (WasmInstruction) { WI_LOCAL_SET, { .l = mod->stack_base_idx} }; + code[1] = (WasmInstruction) { WI_LOCAL_SET, { .l = mod->stack_restore_idx} }; code[2] = (WasmInstruction) { WI_NOP, 0 }; code[3] = (WasmInstruction) { WI_NOP, 0 }; code[4] = (WasmInstruction) { WI_NOP, 0 }; + code[5] = (WasmInstruction) { WI_NOP, 0 }; } else { code[0] = (WasmInstruction) { WI_GLOBAL_GET, { .l = stack_top_idx } }; - code[1] = (WasmInstruction) { WI_LOCAL_TEE, { .l = mod->stack_base_idx} }; + code[1] = (WasmInstruction) { WI_LOCAL_TEE, { .l = mod->stack_restore_idx} }; code[2] = (WasmInstruction) { WI_I32_CONST, { .l = stacksize } }; - code[3] = (WasmInstruction) { WI_I32_ADD, 0 }; - code[4] = (WasmInstruction) { WI_GLOBAL_SET, { .l = stack_top_idx } }; + code[3] = (WasmInstruction) { WI_I32_SUB, 0 }; + code[4] = (WasmInstruction) { WI_LOCAL_TEE, { .l = mod->stack_base_idx} }; + code[5] = (WasmInstruction) { WI_GLOBAL_SET, { .l = stack_top_idx } }; } *pcode = code; @@ -4318,7 +4369,7 @@ EMIT_FUNC(zero_value_for_type, Type* type, OnyxToken* where, AstTyped *alloc_nod TypeWithOffset two; fori (i, 0, mem_count) { - type_linear_member_lookup(type, i, &two); + type_linear_member_lookup(mod->context, type, i, &two); emit_zero_value_for_type(mod, &code, two.type, where, NULL); } @@ -4340,13 +4391,13 @@ EMIT_FUNC(zero_value_for_type, Type* type, OnyxToken* where, AstTyped *alloc_nod emit_zero_value_for_type(mod, &code, type->Distinct.base_type, where, alloc_node); } else { - if (type == &basic_types[Basic_Kind_Void]) { + if (type == mod->context->types.basic[Basic_Kind_Void]) { return; } WasmType wt = onyx_type_to_wasm_type(type); if (wt == WASM_TYPE_VOID) { - onyx_report_error(where->pos, Error_Critical, "Cannot produce a zero-value for this type."); + ONYX_ERROR(where->pos, Error_Critical, "Cannot produce a zero-value for this type."); } emit_zero_value(mod, &code, wt); } @@ -4367,14 +4418,14 @@ static i32 generate_type_idx(OnyxWasmModule* mod, Type* ft) { while (params_left-- > 0) { switch (type_get_param_pass(*param_type)) { case Param_Pass_By_Value: *(t++) = (char) onyx_type_to_wasm_type(*param_type); break; - case Param_Pass_By_Implicit_Pointer: *(t++) = (char) onyx_type_to_wasm_type(&basic_types[Basic_Kind_Rawptr]); break; + case Param_Pass_By_Implicit_Pointer: *(t++) = (char) onyx_type_to_wasm_type(mod->context->types.basic[Basic_Kind_Rawptr]); break; case Param_Pass_By_Multiple_Values: { u32 mem_count = type_structlike_mem_count(*param_type); StructMember smem; fori (i, 0, mem_count) { - type_lookup_member_by_idx(*param_type, i, &smem); + type_lookup_member_by_idx(mod->context, *param_type, i, &smem); *(t++) = (char) onyx_type_to_wasm_type(smem.type); } @@ -4387,39 +4438,41 @@ static i32 generate_type_idx(OnyxWasmModule* mod, Type* ft) { param_type++; } + + if (type_function_get_cc(ft) == CC_Return_Stack) { + *(t++) = onyx_type_to_wasm_type(mod->context->types.basic[Basic_Kind_Rawptr]); + param_count += 1; + } + *(t++) = ':'; WasmType return_type = onyx_type_to_wasm_type(ft->Function.return_type); - *(t++) = (char) return_type; + *t = '\0'; - i32 type_idx = 0; i32 index = shgeti(mod->type_map, type_repr_buf); if (index != -1) { - type_idx = mod->type_map[index].value; - } else { - // NOTE: Make a new type - WasmFuncType* type = (WasmFuncType*) bh_alloc(mod->allocator, sizeof(WasmFuncType) + sizeof(WasmType) * param_count); - type->return_type = return_type; - type->param_count = param_count; - - fori (i, 0, type->param_count) { - type->param_types[i] = type_repr_buf[i]; - } + return mod->type_map[index].value; + } - bh_arr_push(mod->types, type); + // NOTE: Make a new type + WasmFuncType* type = (WasmFuncType*) bh_alloc(mod->allocator, sizeof(WasmFuncType) + sizeof(WasmType) * param_count); + type->return_type = return_type; + type->param_count = param_count; - shput(mod->type_map, type_repr_buf, mod->next_type_idx); - type_idx = mod->next_type_idx; - mod->next_type_idx++; + fori (i, 0, type->param_count) { + type->param_types[i] = type_repr_buf[i]; } - return type_idx; + bh_arr_push(mod->types, type); + + shput(mod->type_map, type_repr_buf, mod->next_type_idx); + return mod->next_type_idx++; } static i32 get_element_idx(OnyxWasmModule* mod, AstFunction* func) { - ensure_node_has_been_submitted_for_emission((AstNode *) func); + ensure_node_has_been_submitted_for_emission(mod->context, (AstNode *) func); if (bh_imap_has(&mod->elem_map, (u64) func)) { return bh_imap_get(&mod->elem_map, (u64) func); @@ -4449,17 +4502,16 @@ EMIT_FUNC(stack_trace_blob, AstFunction *fd) { assert(!(mod->stack_trace_idx & LOCAL_IS_WASM)); u64 file_name_id, func_name_id; - u8* node_data = bh_alloc_array(context.ast_alloc, u8, 6 * POINTER_SIZE); + u8* node_data = bh_alloc_array(mod->context->ast_alloc, u8, 5 * POINTER_SIZE); - char *name = get_function_name(fd); + char *name = get_function_name(mod->context, fd); emit_raw_string(mod, (char *) fd->token->pos.filename, strlen(fd->token->pos.filename), &file_name_id, (u64 *) &node_data[4]); emit_raw_string(mod, name, strlen(name), &func_name_id, (u64 *) &node_data[16]); *((u32 *) &node_data[8]) = fd->token->pos.line; - *((u32 *) &node_data[20]) = fd->type->id; WasmDatum stack_node_data = ((WasmDatum) { .data = node_data, - .length = 6 * POINTER_SIZE, + .length = 5 * POINTER_SIZE, .alignment = POINTER_SIZE, }); u32 stack_node_data_id = emit_data_entry(mod, &stack_node_data); @@ -4477,16 +4529,16 @@ EMIT_FUNC(stack_trace_blob, AstFunction *fd) { bh_arr_push(mod->data_patches, patch); u64 offset = 0; - u64 stack_trace_pass_global = bh_imap_get(&mod->index_map, (u64) &builtin_stack_trace); + u64 stack_trace_pass_global = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.stack_trace); emit_location_return_offset(mod, &code, (AstTyped *) fd->stack_trace_local, &offset); WIL(NULL, WI_GLOBAL_GET, stack_trace_pass_global); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_Rawptr], offset); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_Rawptr], offset); offset = 0; emit_location_return_offset(mod, &code, (AstTyped *) fd->stack_trace_local, &offset); emit_data_relocation(mod, &code, stack_node_data_id); - emit_store_instruction(mod, &code, &basic_types[Basic_Kind_Rawptr], offset + 4); + emit_store_instruction(mod, &code, mod->context->types.basic[Basic_Kind_Rawptr], offset + 4); *pcode = code; } @@ -4506,7 +4558,7 @@ static i32 assign_function_index(OnyxWasmModule *mod, AstFunction *fd) { static void emit_function(OnyxWasmModule* mod, AstFunction* fd) { i32 func_idx = assign_function_index(mod, fd); - if (fd == builtin_initialize_data_segments && !mod->doing_linking) { + if (fd == mod->context->builtins.initialize_data_segments && !mod->doing_linking) { // This is a large hack, but is necessary. // This particular function (__initialize_data_segments) should not be generated // until the module is in its linking phase. This is because we have to wait @@ -4521,14 +4573,16 @@ static void emit_function(OnyxWasmModule* mod, AstFunction* fd) { WasmFunc wasm_func = { 0 }; wasm_func.type_idx = type_idx; wasm_func.location = fd->token; + wasm_func.name = get_function_assembly_name(mod->context, fd); bh_arr_new(mod->allocator, wasm_func.code, 16); mod->current_func_idx = func_idx; + mod->stack_return_location_idx = 0; - debug_begin_function(mod, func_idx, fd->token, get_function_name(fd)); + debug_begin_function(mod, func_idx, fd->token, get_function_name(mod->context, fd)); - if (fd == builtin_initialize_data_segments && context.options->use_post_mvp_features) { + if (fd == mod->context->builtins.initialize_data_segments && mod->context->options->use_post_mvp_features) { emit_initialize_data_segments_body(mod, &wasm_func.code); debug_emit_instruction(mod, NULL); @@ -4541,7 +4595,7 @@ static void emit_function(OnyxWasmModule* mod, AstFunction* fd) { return; } - if (fd == builtin_run_init_procedures) { + if (fd == mod->context->builtins.run_init_procedures) { emit_run_init_procedures(mod, &wasm_func.code); debug_emit_instruction(mod, NULL); @@ -4577,21 +4631,28 @@ static void emit_function(OnyxWasmModule* mod, AstFunction* fd) { } } - mod->local_alloc->param_count = localidx; - mod->curr_cc = type_function_get_cc(fd->type); assert(mod->curr_cc != CC_Undefined); + if (mod->curr_cc == CC_Return_Stack) { + // When returning on the stack, the location to write + // the result to is passed as the last parameter. + mod->stack_return_location_idx = localidx | LOCAL_IS_WASM; + localidx += 1; + + // TODO: Make this next line work. + // debug_introduce_symbol_by_name(mod, "$return", DSL_REGISTER, mod->stack_return_location_idx, mod->context->types.basic[Basic_Kind_Rawptr]); + } + + mod->local_alloc->param_count = localidx; + bh_arr_clear(mod->stack_leave_patches); - debug_emit_instruction(mod, fd->token); - debug_emit_instruction(mod, fd->token); - debug_emit_instruction(mod, fd->token); - debug_emit_instruction(mod, fd->token); - debug_emit_instruction(mod, fd->token); - bh_arr_insert_end(wasm_func.code, 5); - fori (i, 0, 5) wasm_func.code[i] = (WasmInstruction) { WI_NOP, 0 }; + fori (i, 0, 6) debug_emit_instruction(mod, fd->token); + bh_arr_insert_end(wasm_func.code, 6); + fori (i, 0, 6) wasm_func.code[i] = (WasmInstruction) { WI_NOP, 0 }; + mod->stack_restore_idx = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); mod->stack_base_idx = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR); debug_function_set_ptr_idx(mod, func_idx, mod->stack_base_idx); @@ -4601,7 +4662,7 @@ static void emit_function(OnyxWasmModule* mod, AstFunction* fd) { debug_emit_instruction(mod, NULL); debug_emit_instruction(mod, NULL); - u64 global_closure_base_idx = bh_imap_get(&mod->index_map, (u64) &builtin_closure_base); + u64 global_closure_base_idx = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.closure_base); bh_arr_push(wasm_func.code, ((WasmInstruction) { WI_GLOBAL_GET, { .l = global_closure_base_idx } })); bh_arr_push(wasm_func.code, ((WasmInstruction) { WI_LOCAL_SET, { .l = mod->closure_base_idx } })); } @@ -4621,12 +4682,12 @@ static void emit_function(OnyxWasmModule* mod, AstFunction* fd) { debug_emit_instruction(mod, NULL); debug_emit_instruction(mod, NULL); - u64 stack_top_idx = bh_imap_get(&mod->index_map, (u64) &builtin_stack_top); - bh_arr_push(wasm_func.code, ((WasmInstruction) { WI_LOCAL_GET, { .l = mod->stack_base_idx } })); + u64 stack_top_idx = bh_imap_get(&mod->index_map, (u64) &mod->context->builtins.stack_top); + bh_arr_push(wasm_func.code, ((WasmInstruction) { WI_LOCAL_GET, { .l = mod->stack_restore_idx } })); bh_arr_push(wasm_func.code, ((WasmInstruction) { WI_GLOBAL_SET, { .l = stack_top_idx } })); bh_arr_each(PatchInfo, patch, mod->stack_leave_patches) { - wasm_func.code[patch->instruction_index + 0] = (WasmInstruction) { WI_LOCAL_GET, { .l = mod->stack_base_idx } }; + wasm_func.code[patch->instruction_index + 0] = (WasmInstruction) { WI_LOCAL_GET, { .l = mod->stack_restore_idx } }; wasm_func.code[patch->instruction_index + 1] = (WasmInstruction) { WI_GLOBAL_SET, { .l = stack_top_idx } }; } } @@ -4692,7 +4753,7 @@ static void emit_foreign_function(OnyxWasmModule* mod, AstFunction* fd) { OnyxToken *foreign_import = fd->foreign.import_name->token; if (fd->is_foreign_dyncall) { - module = bh_aprintf(global_heap_allocator, "dyncall:%b", foreign_module->text, foreign_module->length); + module = bh_aprintf(mod->context->gp_alloc, "dyncall:%b", foreign_module->text, foreign_module->length); char type_encoding[65] = {0}; encode_type_as_dyncall_symbol(type_encoding, fd->type->Function.return_type); @@ -4701,11 +4762,11 @@ static void emit_foreign_function(OnyxWasmModule* mod, AstFunction* fd) { encode_type_as_dyncall_symbol(type_encoding, param->local->type); } - name = bh_aprintf(global_heap_allocator, "%b:%s", foreign_import->text, foreign_import->length, type_encoding); + name = bh_aprintf(mod->context->gp_alloc, "%b:%s", foreign_import->text, foreign_import->length, type_encoding); } else { - module = bh_aprintf(global_heap_allocator, "%b", foreign_module->text, foreign_module->length); - name = bh_aprintf(global_heap_allocator, "%b", foreign_import->text, foreign_import->length); + module = bh_aprintf(mod->context->gp_alloc, "%b", foreign_module->text, foreign_module->length); + name = bh_aprintf(mod->context->gp_alloc, "%b", foreign_import->text, foreign_import->length); } WasmImport import = { @@ -4726,7 +4787,7 @@ static void emit_export_directive(OnyxWasmModule* mod, AstDirectiveExport* expor token_toggle_end(export->export_name); if (shgeti(mod->exports, export->export_name->text) != -1) { - onyx_report_error(export->token->pos, Error_Critical, "Duplicate export name, '%s'.", export->export_name->text); + ONYX_ERROR(export->token->pos, Error_Critical, "Duplicate export name, '%s'.", export->export_name->text); token_toggle_end(export->export_name); return; } @@ -4735,7 +4796,7 @@ static void emit_export_directive(OnyxWasmModule* mod, AstDirectiveExport* expor AstTyped *the_export = (AstTyped *) strip_aliases((AstNode *) export->export); assert(the_export); - ensure_node_has_been_submitted_for_emission((AstNode *) the_export); + ensure_node_has_been_submitted_for_emission(mod->context, (AstNode *) the_export); CodePatchInfo code_patch; code_patch.kind = Code_Patch_Export; @@ -4763,7 +4824,7 @@ static void emit_export_directive(OnyxWasmModule* mod, AstDirectiveExport* expor return; } -static void emit_global(OnyxWasmModule* module, AstGlobal* global) { +static void emit_global(OnyxWasmModule* mod, AstGlobal* global) { WasmType global_type = onyx_type_to_wasm_type(global->type); WasmGlobal glob = { @@ -4772,9 +4833,9 @@ static void emit_global(OnyxWasmModule* module, AstGlobal* global) { .initial_value = NULL, }; - i32 global_idx = (i32) bh_imap_get(&module->index_map, (u64) global); + i32 global_idx = (i32) bh_imap_get(&mod->index_map, (u64) global); - bh_arr_new(global_heap_allocator, glob.initial_value, 1); + bh_arr_new(mod->context->gp_alloc, glob.initial_value, 1); switch (global_type) { case WASM_TYPE_INT32: bh_arr_push(glob.initial_value, ((WasmInstruction) { WI_I32_CONST, 0 })); break; @@ -4785,23 +4846,23 @@ static void emit_global(OnyxWasmModule* module, AstGlobal* global) { default: assert("Invalid global type" && 0); break; } - bh_arr_set_at(module->globals, global_idx, glob); + bh_arr_set_at(mod->globals, global_idx, glob); - if (global == &builtin_stack_top) - module->stack_top_ptr = &module->globals[global_idx].initial_value[0].data.i1; + if (global == &mod->context->builtins.stack_top) + mod->stack_top_ptr = &mod->globals[global_idx].initial_value[0].data.i1; - if (global == &builtin_heap_start) - module->heap_start_ptr = &module->globals[global_idx].initial_value[0].data.i1; + if (global == &mod->context->builtins.heap_start) + mod->heap_start_ptr = &mod->globals[global_idx].initial_value[0].data.i1; - if (global == &builtin_tls_size) - module->tls_size_ptr = &module->globals[global_idx].initial_value[0].data.i1; + if (global == &mod->context->builtins.tls_size) + mod->tls_size_ptr = &mod->globals[global_idx].initial_value[0].data.i1; } static void emit_raw_string(OnyxWasmModule* mod, char *data, i32 len, u64 *out_data_id, u64 *out_len) { // NOTE: Allocating more than necessary, but there are no cases // in a string literal that create more bytes than already // existed. You can create less however ('\n' => 0x0a). - char* strdata = bh_alloc_array(global_heap_allocator, char, len + 1); + char* strdata = bh_alloc_array(mod->context->gp_alloc, char, len + 1); i32 length = string_process_escape_seqs(strdata, data, len); i32 index = shgeti(mod->string_literals, (char *) strdata); @@ -4810,7 +4871,7 @@ static void emit_raw_string(OnyxWasmModule* mod, char *data, i32 len, u64 *out_d *out_data_id = sti.data_id; *out_len = sti.len; - bh_free(global_heap_allocator, strdata); + bh_free(mod->context->gp_alloc, strdata); return; } @@ -4844,7 +4905,9 @@ static u32 emit_data_entry(OnyxWasmModule *mod, WasmDatum *datum) { static void emit_constexpr(ConstExprContext *ctx, AstTyped *node, u32 offset) { if (!emit_constexpr_(ctx, node, offset)) { - onyx_report_error(node->token->pos, Error_Critical, + onyx_report_error( + ctx->module->context, + node->token->pos, Error_Critical, "Cannot generate constant data for '%s'.", onyx_ast_node_kind_string(node->kind)); } @@ -4859,8 +4922,9 @@ static b32 emit_constexpr_(ConstExprContext *ctx, AstTyped *node, u32 offset) { node = (AstTyped *) strip_aliases((AstNode *) node); if (node_is_type((AstNode *) node)) { - Type* constructed_type = type_build_from_ast(context.ast_alloc, (AstType *) node); + Type* constructed_type = type_build_from_ast(ctx->module->context, (AstType *) node); CE(i32, 0) = constructed_type->id; + ensure_type_has_been_submitted_for_emission(ctx->module, constructed_type); return 1; } @@ -4893,7 +4957,7 @@ static b32 emit_constexpr_(ConstExprContext *ctx, AstTyped *node, u32 offset) { StructMember smem; fori (i, 0, mem_count) { - type_lookup_member_by_idx(sl_type, i, &smem); + type_lookup_member_by_idx(ctx->module->context, sl_type, i, &smem); retval &= emit_constexpr_(ctx, sl->args.values[i], smem.offset + offset); } @@ -4912,7 +4976,7 @@ static b32 emit_constexpr_(ConstExprContext *ctx, AstTyped *node, u32 offset) { case Ast_Kind_StrLit: { AstStrLit* sl = (AstStrLit *) node; - ensure_node_has_been_submitted_for_emission((AstNode *) sl); + ensure_node_has_been_submitted_for_emission(ctx->module->context, (AstNode *) sl); DatumPatchInfo patch; patch.kind = Datum_Patch_Data; @@ -4921,14 +4985,14 @@ static b32 emit_constexpr_(ConstExprContext *ctx, AstTyped *node, u32 offset) { patch.data_id = 0; patch.offset = 0; patch.node_to_use_if_data_id_is_null = (AstNode *) sl; - bh_arr_push(ctx->module->data_patches, patch); + bh_arr_push_unsafe(ctx->module->data_patches, patch); CodePatchInfo code_patch; code_patch.kind = Code_Patch_String_Length_In_Data; code_patch.func_idx = ctx->data_id; // Repurposing func_idx for this. code_patch.instr = offset + POINTER_SIZE; // Repurposing instr for offset into section code_patch.node_related_to_patch = (AstNode *) sl; - bh_arr_push(ctx->module->code_patches, code_patch); + bh_arr_push_unsafe(ctx->module->code_patches, code_patch); break; } @@ -4989,7 +5053,7 @@ static b32 emit_constexpr_(ConstExprContext *ctx, AstTyped *node, u32 offset) { // to get the actual data id of the addressed node. patch.node_to_use_if_data_id_is_null = expr; - bh_arr_push(ctx->module->data_patches, patch); + bh_arr_push_unsafe(ctx->module->data_patches, patch); break; } @@ -5059,27 +5123,27 @@ static void emit_memory_reservation(OnyxWasmModule* mod, AstMemRes* memres) { u64 alignment = type_alignment_of(effective_type); u64 size = type_size_of(effective_type); - if (context.options->generate_type_info) { - if (type_table_node != NULL && (AstMemRes *) type_table_node == memres) { - u64 table_location = build_type_table(mod); + if (mod->context->options->generate_type_info) { + if (mod->context->builtins.type_table_node != NULL && (AstMemRes *) mod->context->builtins.type_table_node == memres) { + u64 table_location = prepare_type_table(mod); memres->data_id = table_location; return; } - if (tagged_procedures_node != NULL && (AstMemRes *) tagged_procedures_node == memres) { + if (mod->context->builtins.tagged_procedures_node != NULL && (AstMemRes *) mod->context->builtins.tagged_procedures_node == memres) { u64 tagged_procedures_location = build_tagged_procedures(mod); memres->data_id = tagged_procedures_location; return; } - if (tagged_globals_node != NULL && (AstMemRes *) tagged_globals_node == memres) { + if (mod->context->builtins.tagged_globals_node != NULL && (AstMemRes *) mod->context->builtins.tagged_globals_node == memres) { u64 tagged_globals_location = build_tagged_globals(mod); memres->data_id = tagged_globals_location; return; } } - if (foreign_blocks_node != NULL && (AstMemRes *) foreign_blocks_node == memres) { + if (mod->context->builtins.foreign_blocks_node != NULL && (AstMemRes *) mod->context->builtins.foreign_blocks_node == memres) { u64 foreign_blocks_location = build_foreign_blocks(mod); memres->data_id = foreign_blocks_location; return; @@ -5095,7 +5159,7 @@ static void emit_memory_reservation(OnyxWasmModule* mod, AstMemRes* memres) { u8* data = NULL; if (memres->initial_value != NULL) { assert(!memres->threadlocal); - data = bh_alloc(global_heap_allocator, size); + data = bh_alloc(mod->context->gp_alloc, size); } WasmDatum datum = { @@ -5124,15 +5188,15 @@ static void emit_file_contents(OnyxWasmModule* mod, AstFileContents* fc) { const char* parent_file = fc->token->pos.filename; if (parent_file == NULL) parent_file = "."; - char* parent_folder = bh_path_get_parent(parent_file, global_scratch_allocator); + char* parent_folder = bh_path_get_parent(parent_file, mod->context->scratch_alloc); OnyxToken *filename_token = fc->filename_expr->token; token_toggle_end(filename_token); - char* temp_fn = bh_alloc_array(global_scratch_allocator, char, filename_token->length); + char* temp_fn = bh_alloc_array(mod->context->scratch_alloc, char, filename_token->length); i32 temp_fn_len = string_process_escape_seqs(temp_fn, filename_token->text, filename_token->length); - char* filename = bh_lookup_file(temp_fn, parent_folder, "", 0, NULL, 0); - fc->filename = bh_strdup(global_heap_allocator, filename); + char* filename = bh_lookup_file(temp_fn, parent_folder, NULL, NULL, NULL, mod->context->scratch_alloc); + fc->filename = bh_strdup(mod->context->gp_alloc, filename); token_toggle_end(filename_token); } @@ -5145,7 +5209,7 @@ static void emit_file_contents(OnyxWasmModule* mod, AstFileContents* fc) { } if (!bh_file_exists(fc->filename)) { - onyx_report_error(fc->token->pos, Error_Critical, + ONYX_ERROR(fc->token->pos, Error_Critical, "Unable to open file for reading, '%s'.", fc->filename); return; @@ -5155,8 +5219,8 @@ static void emit_file_contents(OnyxWasmModule* mod, AstFileContents* fc) { // if the filename is prefixed with a './' or '.\\' then it should be relative to the // file in which is was inclded. The loaded file info above should probably use the full // file path in order to avoid duplicates. - bh_file_contents contents = bh_file_read_contents(global_heap_allocator, fc->filename); - u8* actual_data = bh_alloc(global_heap_allocator, contents.length + 1); + bh_file_contents contents = bh_file_read_contents(mod->context->gp_alloc, fc->filename); + u8* actual_data = bh_alloc(mod->context->gp_alloc, contents.length + 1); u32 length = contents.length + 1; memcpy(actual_data, contents.data, contents.length); actual_data[contents.length] = 0; @@ -5176,9 +5240,67 @@ static void emit_file_contents(OnyxWasmModule* mod, AstFileContents* fc) { })); } -OnyxWasmModule onyx_wasm_module_create(bh_allocator alloc) { - OnyxWasmModule module = { - .allocator = alloc, +static void emit_js_node(OnyxWasmModule* mod, AstJsNode *js) { + char *contents = NULL; + + if (js->filepath) { + const char* parent_file = js->token->pos.filename; + if (parent_file == NULL) parent_file = "."; + + char* parent_folder = bh_path_get_parent(parent_file, mod->context->scratch_alloc); + + OnyxToken *filename_token = js->filepath->token; + token_toggle_end(filename_token); + + char* temp_fn = bh_alloc_array(mod->context->scratch_alloc, char, filename_token->length); + i32 temp_fn_len = string_process_escape_seqs(temp_fn, filename_token->text, filename_token->length); + char* filename = bh_strdup( + mod->context->gp_alloc, + bh_lookup_file(temp_fn, parent_folder, NULL, NULL, NULL, mod->context->scratch_alloc) + ); + + token_toggle_end(filename_token); + + if (!bh_file_exists(filename)) { + ONYX_ERROR(js->token->pos, Error_Critical, + "Unable to open file for reading, '%s'.", + filename); + return; + } + + bh_file_contents file_contents = bh_file_read_contents(mod->context->gp_alloc, filename); + contents = bh_alloc(mod->context->gp_alloc, file_contents.length + 1); + memcpy(contents, file_contents.data, file_contents.length); + contents[file_contents.length] = 0; + bh_file_contents_free(&file_contents); + + } else { + contents = get_expression_string_value(mod->context, js->code, NULL); + } + + JsPartial partial; + partial.order = js->order; + partial.code = contents; + + bh_arr_push(mod->js_partials, partial); +} + +static void flush_enqueued_types_for_info(OnyxWasmModule *mod) { + if (mod->global_type_table_data_id < 0) return; + + while (bh_arr_length(mod->types_enqueued_for_info) > 0) { + i32 type_id = bh_arr_pop(mod->types_enqueued_for_info); + + Type *type = type_lookup_by_id(mod->context, type_id); + build_type_info_for_type(mod, type); + } +} + + +void onyx_wasm_module_initialize(Context *context, OnyxWasmModule *module) { + *module = ((OnyxWasmModule) { + .allocator = context->gp_alloc, + .context = context, .type_map = NULL, .next_type_idx = 0, @@ -5219,6 +5341,8 @@ OnyxWasmModule onyx_wasm_module_create(bh_allocator alloc) { .stack_top_ptr = NULL, .stack_base_idx = 0, + .stack_restore_idx = 0, + .stack_return_location_idx = 0, .closure_base_idx = 0, @@ -5230,146 +5354,181 @@ OnyxWasmModule onyx_wasm_module_create(bh_allocator alloc) { .foreign_blocks = NULL, .next_foreign_block_idx = 0, - .procedures_with_tags = NULL - }; + .procedures_with_tags = NULL, - bh_arena* eid = bh_alloc(global_heap_allocator, sizeof(bh_arena)); - bh_arena_init(eid, global_heap_allocator, 16 * 1024 * 1024); - module.extended_instr_data = eid; - module.extended_instr_alloc = bh_arena_allocator(eid); - - bh_arr_new(alloc, module.types, 4); - bh_arr_new(alloc, module.funcs, 4); - bh_arr_new(alloc, module.imports, 4); - bh_arr_new(alloc, module.globals, 4); - bh_arr_new(alloc, module.data, 4); - bh_arr_new(alloc, module.elems, 4); - bh_arr_new(alloc, module.libraries, 4); - bh_arr_new(alloc, module.library_paths, 4); - bh_arr_new(alloc, module.for_remove_info, 4); - - bh_arr_new(global_heap_allocator, module.return_location_stack, 4); - bh_arr_new(global_heap_allocator, module.structured_jump_target, 16); - bh_arr_set_length(module.structured_jump_target, 0); - - sh_new_arena(module.type_map); - sh_new_arena(module.exports); - sh_new_arena(module.loaded_file_info); - sh_new_arena(module.string_literals); - - bh_imap_init(&module.index_map, global_heap_allocator, 128); - bh_imap_init(&module.local_map, global_heap_allocator, 16); - bh_imap_init(&module.elem_map, global_heap_allocator, 16); - - bh_arr_new(global_heap_allocator, module.deferred_stmts, 4); - bh_arr_new(global_heap_allocator, module.local_allocations, 4); - bh_arr_new(global_heap_allocator, module.stack_leave_patches, 4); - bh_arr_new(global_heap_allocator, module.foreign_blocks, 4); - bh_arr_new(global_heap_allocator, module.procedures_with_tags, 4); - bh_arr_new(global_heap_allocator, module.globals_with_tags, 4); - bh_arr_new(global_heap_allocator, module.all_procedures, 4); - bh_arr_new(global_heap_allocator, module.data_patches, 4); - bh_arr_new(global_heap_allocator, module.code_patches, 4); + .types_enqueued_for_info = NULL, + .global_type_table_data_id = -1, + .type_info_size = 0, + }); + + bh_arena* eid = bh_alloc(context->gp_alloc, sizeof(bh_arena)); + bh_arena_init(eid, context->gp_alloc, 16 * 1024 * 1024); + module->extended_instr_data = eid; + module->extended_instr_alloc = bh_arena_allocator(eid); + + bh_arr_new(context->gp_alloc, module->types, 4); + bh_arr_new(context->gp_alloc, module->funcs, 4); + bh_arr_new(context->gp_alloc, module->imports, 4); + bh_arr_new(context->gp_alloc, module->globals, 4); + bh_arr_new(context->gp_alloc, module->data, 4); + bh_arr_new(context->gp_alloc, module->elems, 4); + bh_arr_new(context->gp_alloc, module->libraries, 4); + bh_arr_new(context->gp_alloc, module->library_paths, 4); + bh_arr_new(context->gp_alloc, module->js_partials, 4); + bh_arr_new(context->gp_alloc, module->for_remove_info, 4); + + bh_arr_new(context->gp_alloc, module->return_location_stack, 4); + bh_arr_new(context->gp_alloc, module->structured_jump_target, 16); + bh_arr_set_length(module->structured_jump_target, 0); + + sh_new_arena(module->type_map); + sh_new_arena(module->exports); + sh_new_arena(module->loaded_file_info); + sh_new_arena(module->string_literals); + sh_new_arena(module->custom_sections); + + bh_imap_init(&module->index_map, context->gp_alloc, 128); + bh_imap_init(&module->local_map, context->gp_alloc, 16); + bh_imap_init(&module->elem_map, context->gp_alloc, 16); + + bh_arr_new(context->gp_alloc, module->deferred_stmts, 4); + bh_arr_new(context->gp_alloc, module->local_allocations, 4); + bh_arr_new(context->gp_alloc, module->stack_leave_patches, 4); + bh_arr_new(context->gp_alloc, module->foreign_blocks, 4); + bh_arr_new(context->gp_alloc, module->procedures_with_tags, 4); + bh_arr_new(context->gp_alloc, module->globals_with_tags, 4); + bh_arr_new(context->gp_alloc, module->all_procedures, 4); + bh_arr_new(context->gp_alloc, module->data_patches, 4); + bh_arr_new(context->gp_alloc, module->code_patches, 4); + + bh_arr_new(context->gp_alloc, module->types_enqueued_for_info, 32); #ifdef ENABLE_DEBUG_INFO - module.debug_context = bh_alloc_item(context.ast_alloc, DebugContext); - module.debug_context->allocator = global_heap_allocator; - module.debug_context->next_file_id = 0; - module.debug_context->next_sym_id = 0; - module.debug_context->last_token = NULL; - module.debug_context->sym_info = NULL; - module.debug_context->sym_patches = NULL; - module.debug_context->funcs = NULL; - - sh_new_arena(module.debug_context->file_info); - bh_arr_new(global_heap_allocator, module.debug_context->sym_info, 32); - bh_arr_new(global_heap_allocator, module.debug_context->sym_patches, 32); - bh_arr_new(global_heap_allocator, module.debug_context->funcs, 16); - - bh_buffer_init(&module.debug_context->op_buffer, global_heap_allocator, 1024); + module->debug_context = bh_alloc_item(context->ast_alloc, DebugContext); + module->debug_context->allocator = context->gp_alloc; + module->debug_context->next_file_id = 0; + module->debug_context->next_sym_id = 0; + module->debug_context->last_token = NULL; + module->debug_context->sym_info = NULL; + module->debug_context->sym_patches = NULL; + module->debug_context->funcs = NULL; + + sh_new_arena(module->debug_context->file_info); + bh_arr_new(context->gp_alloc, module->debug_context->sym_info, 32); + bh_arr_new(context->gp_alloc, module->debug_context->sym_patches, 32); + bh_arr_new(context->gp_alloc, module->debug_context->funcs, 16); + + bh_buffer_init(&module->debug_context->op_buffer, context->gp_alloc, 1024); #endif - - return module; } -void emit_entity(Entity* ent) { - OnyxWasmModule* module = context.wasm_module; - module->current_func_idx = -1; +void emit_entity(Context *context, Entity* ent) { + OnyxWasmModule* mod = context->wasm_module; + mod->current_func_idx = -1; switch (ent->type) { case Entity_Type_Foreign_Function_Header: - emit_foreign_function(module, ent->function); - bh_imap_put(&module->index_map, (u64) ent->function, module->next_foreign_func_idx++); - bh_arr_push(module->all_procedures, ent->function); + emit_foreign_function(mod, ent->function); + bh_imap_put(&mod->index_map, (u64) ent->function, mod->next_foreign_func_idx++); + bh_arr_push(mod->all_procedures, ent->function); if (ent->function->tags != NULL) { - bh_arr_push(module->procedures_with_tags, ent->function); + bh_arr_push(mod->procedures_with_tags, ent->function); } break; case Entity_Type_Function_Header: if (ent->function->flags & Ast_Flag_Proc_Is_Null) { - if (module->null_proc_func_idx == -1) module->null_proc_func_idx = get_element_idx(module, ent->function); + if (mod->null_proc_func_idx == -1) mod->null_proc_func_idx = get_element_idx(mod, ent->function); } if (ent->function->tags != NULL) { - bh_arr_push(module->procedures_with_tags, ent->function); + bh_arr_push(mod->procedures_with_tags, ent->function); } break; case Entity_Type_Global_Header: - bh_imap_put(&module->index_map, (u64) ent->global, module->next_global_idx++); + bh_imap_put(&mod->index_map, (u64) ent->global, mod->next_global_idx++); break; case Entity_Type_String_Literal: { - emit_string_literal(module, (AstStrLit *) ent->strlit); + emit_string_literal(mod, (AstStrLit *) ent->strlit); break; } case Entity_Type_File_Contents: { - emit_file_contents(module, (AstFileContents *) ent->file_contents); + emit_file_contents(mod, (AstFileContents *) ent->file_contents); break; } case Entity_Type_Memory_Reservation_Type: { if (ent->mem_res->tags != NULL) { - bh_arr_push(module->globals_with_tags, ent->mem_res); + bh_arr_push(mod->globals_with_tags, ent->mem_res); } break; } case Entity_Type_Memory_Reservation: { - emit_memory_reservation(module, (AstMemRes *) ent->mem_res); + emit_memory_reservation(mod, (AstMemRes *) ent->mem_res); break; } case Entity_Type_Process_Directive: { if (ent->expr->kind == Ast_Kind_Directive_Export) { - emit_export_directive(module, (AstDirectiveExport *) ent->expr); + emit_export_directive(mod, (AstDirectiveExport *) ent->expr); } if (ent->expr->kind == Ast_Kind_Directive_Library) { - bh_arr_push(module->libraries, ent->library->library_name); + bh_arr_push(mod->libraries, ent->library->library_name); + } + + if (ent->expr->kind == Ast_Kind_Directive_Wasm_Section) { + AstDirectiveWasmSection *section = (AstDirectiveWasmSection *) ent->expr; + + WasmCustomSection custom; + custom.name = section->name; + custom.contents = section->contents; + custom.len = section->length; + + if (shgeti(mod->custom_sections, section->name) >= 0) { + onyx_report_warning(mod->context, section->token->pos, "Duplicate definitions for custom section '%s'.", section->name); + } + + shput(mod->custom_sections, section->name, custom); } break; } case Entity_Type_Foreign_Block: { - ent->foreign_block->foreign_block_number = module->next_foreign_block_idx++; - bh_arr_push(module->foreign_blocks, (AstForeignBlock *) ent->foreign_block); + ent->foreign_block->foreign_block_number = mod->next_foreign_block_idx++; + bh_arr_push(mod->foreign_blocks, (AstForeignBlock *) ent->foreign_block); break; } - case Entity_Type_Function: emit_function(module, ent->function); break; - case Entity_Type_Global: emit_global(module, ent->global); break; + case Entity_Type_Function: emit_function(mod, ent->function); break; + case Entity_Type_Global: emit_global(mod, ent->global); break; + + case Entity_Type_JS: emit_js_node(mod, ent->js); break; default: break; } ent->state = Entity_State_Finalized; + + // HACK + flush_enqueued_types_for_info(mod); +} + + +static i32 cmp_type_info(const void *a, const void *b) { + return *(i32 *) a - *(i32 *) b; } -void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options) { + +#undef BH_INTERNAL_ALLOCATOR +#define BH_INTERNAL_ALLOCATOR (context->gp_alloc) + +void onyx_wasm_module_link(Context *context, OnyxWasmModule *module, OnyxWasmLinkOptions *options) { // If the pointer size is going to change, // the code will probably need to be altered. assert(POINTER_SIZE == 4); @@ -5460,14 +5619,14 @@ void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options) module->memory_min_size = options->memory_min_size; module->memory_max_size = options->memory_max_size; - if (context.options->use_multi_threading || options->import_memory) { + if (context->options->use_multi_threading || options->import_memory) { module->needs_memory_section = 0; WasmImport mem_import = { .kind = WASM_FOREIGN_MEMORY, .min = options->memory_min_size, - .max = options->memory_max_size, // NOTE: Why not use all 4 Gigs of memory? - .shared = context.options->use_multi_threading && context.options->runtime != Runtime_Onyx, + .max = options->memory_max_size, + .shared = context->options->use_multi_threading && context->options->runtime != Runtime_Onyx, .mod = options->import_memory_module_name, .name = options->import_memory_import_name, @@ -5497,9 +5656,20 @@ void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options) shput(module->exports, options->export_func_table_name, func_table_export); module->export_count++; + + WasmExport closure_export = { + .kind = WASM_FOREIGN_GLOBAL, + .idx = bh_imap_get(&module->index_map, (u64) &context->builtins.closure_base), + }; + + shput(module->exports, "__closure_base", closure_export); + module->export_count++; } - u32 datum_offset = options->null_reserve_size; + i32 datum_offset = options->null_reserve_size; + + datum_offset += options->stack_size; + bh_arr_each(WasmDatum, datum, module->data) { assert(datum->id > 0); @@ -5511,7 +5681,7 @@ void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options) // Now that we know where the data elements will go (and to avoid a lot of patches), // we can emit the __initialize_data_segments function. - emit_function(module, builtin_initialize_data_segments); + emit_function(module, context->builtins.initialize_data_segments); #ifdef ENABLE_DEBUG_INFO if (module->debug_context) { @@ -5540,7 +5710,7 @@ void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options) WasmFunc *func = &module->funcs[patch->index]; assert(func->code[patch->location].type == WI_PTR_CONST); - func->code[patch->location].data.l = (u64) datum->offset_ + patch->offset; + func->code[patch->location].data.l = (i64) datum->offset_ + patch->offset; break; } @@ -5557,9 +5727,7 @@ void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options) assert(datum_to_alter->id == patch->index); u32 *addr = (u32 *) bh_pointer_add(datum_to_alter->data, patch->location); - if (*addr != 0) { - *addr += (u32) datum->offset_ + patch->offset; - } + *addr += (u32) datum->offset_ + patch->offset; break; } @@ -5568,12 +5736,15 @@ void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options) } } + WasmDatum *type_table_data = &module->data[module->global_type_table_data_id - 1]; + qsort(type_table_data->data, *module->type_info_entry_count, 2 * POINTER_SIZE, cmp_type_info); + assert(module->stack_top_ptr && module->heap_start_ptr); - *module->stack_top_ptr = datum_offset; - bh_align(*module->stack_top_ptr, options->stack_alignment); + *module->stack_top_ptr = options->null_reserve_size + options->stack_size; + *module->heap_start_ptr = datum_offset; - *module->heap_start_ptr = *module->stack_top_ptr + options->stack_size; + bh_align(*module->stack_top_ptr, options->stack_alignment); bh_align(*module->heap_start_ptr, 16); if (module->tls_size_ptr) { @@ -5582,23 +5753,23 @@ void onyx_wasm_module_link(OnyxWasmModule *module, OnyxWasmLinkOptions *options) } - if (context.options->print_function_mappings) { - bh_arr_each(AstFunction *, pfunc, module->all_procedures) { - AstFunction *func = *pfunc; + // if (context->options->print_function_mappings) { + // bh_arr_each(AstFunction *, pfunc, module->all_procedures) { + // AstFunction *func = *pfunc; - u64 func_idx = (u64) bh_imap_get(&module->index_map, (u64) func); + // u64 func_idx = (u64) bh_imap_get(&module->index_map, (u64) func); - if (!func->is_foreign) { - func_idx += module->next_foreign_func_idx; - } + // if (!func->is_foreign) { + // func_idx += module->next_foreign_func_idx; + // } - bh_printf("%d -> %s:%d:%d\n", - func_idx, - func->token->pos.filename, - func->token->pos.line, - func->token->pos.column); - } - } + // bh_printf("%d -> %s:%d:%d\n", + // func_idx, + // func->token->pos.filename, + // func->token->pos.line, + // func->token->pos.column); + // } + // } } void onyx_wasm_module_free(OnyxWasmModule* module) { @@ -5611,16 +5782,19 @@ void onyx_wasm_module_free(OnyxWasmModule* module) { bh_imap_free(&module->index_map); shfree(module->type_map); shfree(module->exports); + shfree(module->loaded_file_info); + shfree(module->string_literals); + shfree(module->custom_sections); } -b32 onyx_wasm_build_link_options_from_node(OnyxWasmLinkOptions *opts, AstTyped *node) { +b32 onyx_wasm_build_link_options_from_node(Context *context, OnyxWasmLinkOptions *opts, AstTyped *node) { node = (AstTyped *) strip_aliases((AstNode *) node); assert(node && node->kind == Ast_Kind_Struct_Literal); - assert(builtin_link_options_type); + assert(context->builtins.link_options_type); - Type *link_options_type = type_build_from_ast(context.ast_alloc, builtin_link_options_type); + Type *link_options_type = type_build_from_ast(context, context->builtins.link_options_type); AstStructLiteral *input = (AstStructLiteral *) node; @@ -5628,56 +5802,52 @@ b32 onyx_wasm_build_link_options_from_node(OnyxWasmLinkOptions *opts, AstTyped * b32 out_is_valid; // TODO: These should be properly error handled. - assert(type_lookup_member(link_options_type, "stack_first", &smem)); - opts->stack_first = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid) != 0; - if (!out_is_valid) return 0; - - assert(type_lookup_member(link_options_type, "stack_size", &smem)); - opts->stack_size = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "stack_size", &smem)); + opts->stack_size = get_expression_integer_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "stack_alignment", &smem)); - opts->stack_alignment = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "stack_alignment", &smem)); + opts->stack_alignment = get_expression_integer_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "null_reserve_size", &smem)); - opts->null_reserve_size = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "null_reserve_size", &smem)); + opts->null_reserve_size = get_expression_integer_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "import_memory", &smem)); - opts->import_memory = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid) != 0; + assert(type_lookup_member(context, link_options_type, "import_memory", &smem)); + opts->import_memory = get_expression_integer_value(context, input->args.values[smem.idx], &out_is_valid) != 0; if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "import_memory_module_name", &smem)); - opts->import_memory_module_name = get_expression_string_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "import_memory_module_name", &smem)); + opts->import_memory_module_name = get_expression_string_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "import_memory_import_name", &smem)); - opts->import_memory_import_name = get_expression_string_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "import_memory_import_name", &smem)); + opts->import_memory_import_name = get_expression_string_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "export_memory", &smem)); - opts->export_memory = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid) != 0; + assert(type_lookup_member(context, link_options_type, "export_memory", &smem)); + opts->export_memory = get_expression_integer_value(context, input->args.values[smem.idx], &out_is_valid) != 0; if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "export_memory_name", &smem)); - opts->export_memory_name = get_expression_string_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "export_memory_name", &smem)); + opts->export_memory_name = get_expression_string_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "export_func_table", &smem)); - opts->export_func_table = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid) != 0; + assert(type_lookup_member(context, link_options_type, "export_func_table", &smem)); + opts->export_func_table = get_expression_integer_value(context, input->args.values[smem.idx], &out_is_valid) != 0; if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "export_func_table_name", &smem)); - opts->export_func_table_name = get_expression_string_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "export_func_table_name", &smem)); + opts->export_func_table_name = get_expression_string_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "memory_min_size", &smem)); - opts->memory_min_size = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "memory_min_size", &smem)); + opts->memory_min_size = get_expression_integer_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; - assert(type_lookup_member(link_options_type, "memory_max_size", &smem)); - opts->memory_max_size = get_expression_integer_value(input->args.values[smem.idx], &out_is_valid); + assert(type_lookup_member(context, link_options_type, "memory_max_size", &smem)); + opts->memory_max_size = get_expression_integer_value(context, input->args.values[smem.idx], &out_is_valid); if (!out_is_valid) return 0; return 1; diff --git a/compiler/src/wasm_intrinsics.h b/compiler/src/wasm_intrinsics.h index 55e221f3d..23ced5d7f 100644 --- a/compiler/src/wasm_intrinsics.h +++ b/compiler/src/wasm_intrinsics.h @@ -253,7 +253,7 @@ EMIT_FUNC(intrinsic_atomic_wait, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic wait, '%s'. Only i32 and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic wait, '%s'. Only i32 and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC_NO_ARGS(intrinsic_atomic_notify) { @@ -290,7 +290,7 @@ EMIT_FUNC(intrinsic_atomic_load, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic load, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic load, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC(intrinsic_atomic_store, Type* type, OnyxToken* where) { @@ -315,7 +315,7 @@ EMIT_FUNC(intrinsic_atomic_store, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic store, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic store, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC(intrinsic_atomic_add, Type* type, OnyxToken* where) { @@ -340,7 +340,7 @@ EMIT_FUNC(intrinsic_atomic_add, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic add, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic add, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC(intrinsic_atomic_sub, Type* type, OnyxToken* where) { @@ -365,7 +365,7 @@ EMIT_FUNC(intrinsic_atomic_sub, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic sub, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic sub, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC(intrinsic_atomic_and, Type* type, OnyxToken* where) { @@ -390,7 +390,7 @@ EMIT_FUNC(intrinsic_atomic_and, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic and, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic and, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC(intrinsic_atomic_or, Type* type, OnyxToken* where) { @@ -415,7 +415,7 @@ EMIT_FUNC(intrinsic_atomic_or, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic or, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic or, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC(intrinsic_atomic_xor, Type* type, OnyxToken* where) { @@ -440,7 +440,7 @@ EMIT_FUNC(intrinsic_atomic_xor, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic xor, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic xor, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC(intrinsic_atomic_xchg, Type* type, OnyxToken* where) { @@ -465,7 +465,7 @@ EMIT_FUNC(intrinsic_atomic_xchg, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic xchg, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic xchg, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC(intrinsic_atomic_cmpxchg, Type* type, OnyxToken* where) { @@ -490,21 +490,15 @@ EMIT_FUNC(intrinsic_atomic_cmpxchg, Type* type, OnyxToken* where) { return; bad_type: - onyx_report_error(where->pos, Error_Critical, "Bad type for atomic cmpxchg, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type)); + ONYX_ERROR(where->pos, Error_Critical, "Bad type for atomic cmpxchg, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(mod->context, type)); } EMIT_FUNC_NO_ARGS(initialize_data_segments_body) { // :ProperLinking - if (!context.options->use_multi_threading || !context.options->use_post_mvp_features) return; + if (!mod->context->options->use_multi_threading || !mod->context->options->use_post_mvp_features) return; bh_arr(WasmInstruction) code = *pcode; - // - // Because this code is generated direction in the function - // it is assumed that EVERY data entry will be entered by - // this point. If data section entries can be entered after - // function body generation starts, this code will have to - // move to a link phase thing. i32 index = 0; bh_arr_each(WasmDatum, datum, mod->data) { assert(datum->id > 0); @@ -524,7 +518,7 @@ EMIT_FUNC_NO_ARGS(initialize_data_segments_body) { EMIT_FUNC_NO_ARGS(run_init_procedures) { bh_arr(WasmInstruction) code = *pcode; - bh_arr_each(AstFunction *, func, init_procedures) { + bh_arr_each(AstFunction *, func, mod->context->builtins.init_procedures) { CodePatchInfo code_patch; code_patch.kind = Code_Patch_Callee; code_patch.func_idx = mod->current_func_idx; @@ -532,7 +526,7 @@ EMIT_FUNC_NO_ARGS(run_init_procedures) { code_patch.node_related_to_patch = (AstNode *) *func; bh_arr_push(mod->code_patches, code_patch); - ensure_node_has_been_submitted_for_emission((AstNode *) *func); + ensure_node_has_been_submitted_for_emission(mod->context, (AstNode *) *func); debug_emit_instruction(mod, NULL); bh_arr_push(code, ((WasmInstruction){ WI_CALL, 0 })); diff --git a/compiler/src/wasm_output.h b/compiler/src/wasm_output.h index 8320ad788..5eff0b02f 100644 --- a/compiler/src/wasm_output.h +++ b/compiler/src/wasm_output.h @@ -38,7 +38,7 @@ static inline i32 output_custom_section_name(char *name, bh_buffer *buff) { return len_len + len; } -static void output_instruction(WasmFunc* func, WasmInstruction* instr, bh_buffer* buff); +static void output_instruction(WasmFunc* func, WasmInstruction* instr, b32 debug_enabled, bh_buffer* buff); static i32 output_vector(void** arr, i32 stride, i32 arrlen, vector_func elem, bh_buffer* vec_buff) { i32 len; @@ -156,8 +156,6 @@ static i32 output_funcsection(OnyxWasmModule* module, bh_buffer* buff) { } static i32 output_tablesection(OnyxWasmModule* module, bh_buffer* buff) { - // if (bh_arr_length(module->elems) == 0) return 0; - i32 prev_len = buff->length; bh_buffer_write_byte(buff, WASM_SECTION_ID_TABLE); @@ -183,7 +181,6 @@ static i32 output_tablesection(OnyxWasmModule* module, bh_buffer* buff) { static i32 output_memorysection(OnyxWasmModule* module, bh_buffer* buff) { // :ProperLinking - // if (context.options->use_multi_threading) return 0; if (!module->needs_memory_section) return 0; i32 prev_len = buff->length; @@ -223,7 +220,7 @@ static i32 output_globalsection(OnyxWasmModule* module, bh_buffer* buff) { bh_buffer_write_byte(&vec_buff, 0x01); bh_arr_each(WasmInstruction, instr, global->initial_value) - output_instruction(NULL, instr, &vec_buff); + output_instruction(NULL, instr, module->context->options->debug_info_enabled, &vec_buff); // NOTE: Initial value expression terminator bh_buffer_write_byte(&vec_buff, (u8) WI_BLOCK_END); @@ -426,11 +423,11 @@ static i32 output_locals(WasmFunc* func, bh_buffer* buff) { return buff->length - prev_len; } -static void output_instruction(WasmFunc* func, WasmInstruction* instr, bh_buffer* buff) { +static void output_instruction(WasmFunc* func, WasmInstruction* instr, b32 debug_enabled, bh_buffer* buff) { i32 leb_len; u8* leb; - if (instr->type == WI_NOP && !context.options->debug_info_enabled) return; + if (instr->type == WI_NOP && !debug_enabled) return; if (instr->type & SIMD_INSTR_MASK) { bh_buffer_write_byte(buff, 0xFD); @@ -568,7 +565,7 @@ static void output_instruction(WasmFunc* func, WasmInstruction* instr, bh_buffer } } -static i32 output_code(WasmFunc* func, bh_buffer* buff) { +static i32 output_code(WasmFunc* func, b32 debug_enabled, bh_buffer* buff) { bh_buffer code_buff; bh_buffer_init(&code_buff, buff->allocator, 128); @@ -579,7 +576,7 @@ static i32 output_code(WasmFunc* func, bh_buffer* buff) { assert(func->code); // Output code - bh_arr_each(WasmInstruction, instr, func->code) output_instruction(func, instr, &code_buff); + bh_arr_each(WasmInstruction, instr, func->code) output_instruction(func, instr, debug_enabled, &code_buff); i32 leb_len; u8* leb = uint_to_uleb128((u64) code_buff.length, &leb_len); @@ -605,7 +602,7 @@ static i32 output_codesection(OnyxWasmModule* module, bh_buffer* buff) { bh_arr_each(WasmFunc, func, module->funcs) { assert(func->code); - output_code(func, &vec_buff); + output_code(func, module->context->options->debug_info_enabled, &vec_buff); } leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len); @@ -618,7 +615,7 @@ static i32 output_codesection(OnyxWasmModule* module, bh_buffer* buff) { } static i32 output_datacountsection(OnyxWasmModule* module, bh_buffer* buff) { - if (!context.options->use_post_mvp_features) return 0; + if (!module->context->options->use_post_mvp_features) return 0; i32 prev_len = buff->length; @@ -655,12 +652,12 @@ static i32 output_datasection(OnyxWasmModule* module, bh_buffer* buff) { bh_arr_each(WasmDatum, datum, module->data) { i32 memory_flags = 0x00; // :ProperLinking - if (context.options->use_multi_threading) memory_flags |= 0x01; + if (module->context->options->use_multi_threading) memory_flags |= 0x01; bh_buffer_write_byte(&vec_buff, memory_flags); // :ProperLinking - if (!context.options->use_multi_threading) { + if (!module->context->options->use_multi_threading) { bh_buffer_write_byte(&vec_buff, WI_I32_CONST); leb = int_to_leb128((i64) datum->offset_, &leb_len); bh_buffer_append(&vec_buff, leb, leb_len); @@ -727,7 +724,7 @@ static i32 output_onyx_libraries_section(OnyxWasmModule* module, bh_buffer* buff #ifdef ENABLE_DEBUG_INFO static i32 output_ovm_debug_sections(OnyxWasmModule* module, bh_buffer* buff) { - if (!module->debug_context || !context.options->debug_info_enabled) return 0; + if (!module->debug_context || !module->context->options->debug_info_enabled) return 0; DebugContext *ctx = module->debug_context; @@ -838,13 +835,13 @@ static i32 output_ovm_debug_sections(OnyxWasmModule* module, bh_buffer* buff) { output_custom_section_name("ovm_debug_types", §ion_buff); - i32 type_count = bh_arr_length(type_map.entries); + i32 type_count = bh_arr_length(module->context->types.type_map.entries); output_unsigned_integer(type_count, §ion_buff); - bh_arr_each(bh__imap_entry, entry, type_map.entries) { + bh_arr_each(bh__imap_entry, entry, module->context->types.type_map.entries) { u32 id = entry->key; Type *type = (Type *) entry->value; - const char *name = type_get_name(type); + const char *name = type_get_name(module->context, type); output_unsigned_integer(id, §ion_buff); output_name(name, strlen(name), §ion_buff); @@ -861,7 +858,7 @@ static i32 output_ovm_debug_sections(OnyxWasmModule* module, bh_buffer* buff) { if (type->Basic.kind == Basic_Kind_Type_Index) { output_unsigned_integer(5, §ion_buff); output_unsigned_integer(2, §ion_buff); - output_unsigned_integer(basic_types[Basic_Kind_U32].id, §ion_buff); + output_unsigned_integer(module->context->types.basic[Basic_Kind_U32]->id, §ion_buff); continue; } @@ -869,7 +866,7 @@ static i32 output_ovm_debug_sections(OnyxWasmModule* module, bh_buffer* buff) { // rawptr -> ^void output_unsigned_integer(2, §ion_buff); output_unsigned_integer(1, §ion_buff); - output_unsigned_integer(basic_types[Basic_Kind_Void].id, §ion_buff); + output_unsigned_integer(module->context->types.basic[Basic_Kind_Void]->id, §ion_buff); continue; } @@ -921,7 +918,7 @@ static i32 output_ovm_debug_sections(OnyxWasmModule* module, bh_buffer* buff) { bh_arr_each(AstEnumValue *, pev, e_type->values) { AstEnumValue *ev = *pev; - output_unsigned_integer(get_expression_integer_value(ev->value, NULL), §ion_buff); + output_unsigned_integer(get_expression_integer_value(module->context, ev->value, NULL), §ion_buff); output_name(ev->token->text, ev->token->length, §ion_buff); } continue; @@ -987,7 +984,7 @@ static i32 output_ovm_debug_sections(OnyxWasmModule* module, bh_buffer* buff) { fori (i, 0, mem_count) { StructMember smem; - type_lookup_member_by_idx(type, i, &smem); + type_lookup_member_by_idx(module->context, type, i, &smem); output_unsigned_integer(smem.offset, §ion_buff); output_unsigned_integer(smem.type->id, §ion_buff); @@ -1042,13 +1039,103 @@ static i32 output_ovm_debug_sections(OnyxWasmModule* module, bh_buffer* buff) { } #endif +static i32 output_name_section(OnyxWasmModule* module, bh_buffer* buff) { + i32 prev_len = buff->length; + + bh_buffer_write_byte(buff, WASM_SECTION_ID_CUSTOM); + + bh_buffer name_buff; + bh_buffer_init(&name_buff, buff->allocator, 128); + + output_custom_section_name("name", &name_buff); + + output_unsigned_integer(1, &name_buff); // 1 for function names + + bh_buffer func_name_buff; + bh_buffer_init(&func_name_buff, buff->allocator, 128); + + output_unsigned_integer(bh_arr_length(module->funcs), &func_name_buff); + bh_arr_each(WasmFunc, func, module->funcs) { + if (func->name == NULL) continue; + + u64 func_idx = func - module->funcs; + func_idx += module->next_foreign_func_idx; + + output_unsigned_integer(func_idx, &func_name_buff); + + output_name(func->name, strlen(func->name), &func_name_buff); + } + + output_unsigned_integer(func_name_buff.length, &name_buff); + bh_buffer_concat(&name_buff, func_name_buff); + bh_buffer_free(&func_name_buff); + + output_unsigned_integer(name_buff.length, buff); + bh_buffer_concat(buff, name_buff); + bh_buffer_free(&name_buff); + + return buff->length - prev_len; +} + +#define VERSION__(m,i,p) #m "." #i "." #p +#define VERSION_(m,i,p) VERSION__(m,i,p) +#define VERSION VERSION_(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) + +static i32 output_producer_section(OnyxWasmModule* module, bh_buffer *buff) { + i32 prev_len = buff->length; + + bh_buffer_write_byte(buff, WASM_SECTION_ID_CUSTOM); + + bh_buffer prod_buff; + bh_buffer_init(&prod_buff, buff->allocator, 128); + + output_custom_section_name("producers", &prod_buff); + + output_unsigned_integer(2, &prod_buff); + + output_name("language", 8, &prod_buff); + output_unsigned_integer(1, &prod_buff); + output_name("onyx", 4, &prod_buff); + output_name(VERSION, strlen(VERSION), &prod_buff); + + output_name("processed-by", 12, &prod_buff); + output_unsigned_integer(1, &prod_buff); + output_name("onyx", 4, &prod_buff); + output_name(VERSION, strlen(VERSION), &prod_buff); + + output_unsigned_integer(prod_buff.length, buff); + bh_buffer_concat(buff, prod_buff); + bh_buffer_free(&prod_buff); + + return buff->length - prev_len; +} + +i32 output_custom_section(OnyxWasmModule *module, bh_buffer *buff, WasmCustomSection *section) { + i32 prev_len = buff->length; + + bh_buffer_write_byte(buff, WASM_SECTION_ID_CUSTOM); + + bh_buffer inner_buff; + bh_buffer_init(&inner_buff, buff->allocator, 128); + + output_custom_section_name(section->name, &inner_buff); + + bh_buffer_append(&inner_buff, section->contents, section->len); + + output_unsigned_integer(inner_buff.length, buff); + bh_buffer_concat(buff, inner_buff); + bh_buffer_free(&inner_buff); + + return buff->length - prev_len; +} + void onyx_wasm_module_write_to_buffer(OnyxWasmModule* module, bh_buffer* buffer) { - bh_buffer_init(buffer, global_heap_allocator, 128); + bh_buffer_init(buffer, module->context->gp_alloc, 128); bh_buffer_append(buffer, WASM_MAGIC_STRING, 4); bh_buffer_append(buffer, WASM_VERSION, 4); #ifdef ENABLE_DEBUG_INFO - if (context.options->debug_info_enabled) { + if (module->context->options->debug_info_enabled) { output_ovm_debug_sections(module, buffer); } #endif @@ -1066,14 +1153,41 @@ void onyx_wasm_module_write_to_buffer(OnyxWasmModule* module, bh_buffer* buffer) output_datasection(module, buffer); output_onyx_libraries_section(module, buffer); + if (module->context->options->generate_name_section) { + output_name_section(module, buffer); + } + + output_producer_section(module, buffer); + + fori (i, 0, shlen(module->custom_sections)) { + output_custom_section(module, buffer, &module->custom_sections[i].value); + } + // TODO: Consider if this should always be included? // It can amount to a lot of extra data. // output_onyx_func_offset_section(module, buffer); } -void onyx_wasm_module_write_to_file(OnyxWasmModule* module, bh_file file) { - bh_buffer master_buffer; - onyx_wasm_module_write_to_buffer(module, &master_buffer); - bh_file_write(&file, master_buffer.data, master_buffer.length); + + +// +// JS File +// + +static i32 compare_js_partials(const void *p1, const void *p2) { + return ((JsPartial *) p2)->order - ((JsPartial *) p1)->order; } + +void onyx_wasm_module_write_js_partials_to_buffer(OnyxWasmModule* module, bh_buffer* buffer) { + bh_buffer_init(buffer, module->context->gp_alloc, 128); + + qsort(module->js_partials, + bh_arr_length(module->js_partials), + sizeof(JsPartial), + compare_js_partials); + + bh_arr_each(JsPartial, partial, module->js_partials) { + bh_buffer_write_string(buffer, partial->code); + } +} \ No newline at end of file diff --git a/compiler/src/wasm_runtime.c b/compiler/src/wasm_runtime.c index a58326559..c6d7c73de 100644 --- a/compiler/src/wasm_runtime.c +++ b/compiler/src/wasm_runtime.c @@ -103,17 +103,18 @@ static void *locate_symbol_in_dynamic_library_raw(char *libname, char *sym) { static void *locate_symbol_in_dynamic_library(LinkLibraryContext *ctx, char *libname, char *sym) { char *library_name; + bh_allocator alloc = bh_heap_allocator(); #ifdef _BH_LINUX - library_name = bh_lookup_file(libname, ".", ".so", 1, (const char **) ctx->library_paths, 1); + library_name = bh_lookup_file(libname, ".", ".so", (const char **) ctx->library_paths, NULL, alloc); #endif #ifdef _BH_DARWIN - library_name = bh_lookup_file(libname, ".", ".dylib", 1, (const char **) ctx->library_paths, 1); + library_name = bh_lookup_file(libname, ".", ".dylib", (const char **) ctx->library_paths, NULL, alloc); #endif #ifdef _BH_WINDOWS - library_name = bh_lookup_file(libname, ".", ".dll", 1, (const char **) ctx->library_paths, 1); + library_name = bh_lookup_file(libname, ".", ".dll", (const char **) ctx->library_paths, NULL, alloc); #endif return locate_symbol_in_dynamic_library_raw(library_name, sym); @@ -436,7 +437,7 @@ static void cleanup_wasm_objects() { } static wasm_trap_t *__error_on_call(void *env, const wasm_val_vec_t *args, wasm_val_vec_t *results) { - printf("Attempted to invoke imported function with no defintion, '%s'\n", (char *) env); + printf("[ERROR] Attempted to invoke imported function with no definition, '%s'\n", (char *) env); exit(1); return NULL; } @@ -546,14 +547,14 @@ static b32 link_wasm_imports( continue; bad_import: - bh_printf("Couldn't find import %b.%b.\n", module_name->data, module_name->size, import_name->data, import_name->size); + bh_printf("Unable to find import '%b.%b'.\n", module_name->data, module_name->size, import_name->data, import_name->size); return 0; } return 1; } -void onyx_run_initialize(b32 debug_enabled) { +void onyx_run_initialize(b32 debug_enabled, const char *debug_socket) { wasm_config = wasm_config_new(); if (!wasm_config) { cleanup_wasm_objects(); @@ -568,10 +569,16 @@ void onyx_run_initialize(b32 debug_enabled) { i32 getpid(); i32 pid = getpid(); - char *env_path = getenv("ONYX_PATH"); - char *socket_path = bh_aprintf(bh_heap_allocator(), "%s/debug.%d", env_path, pid); + const char *socket_path = NULL; + if (debug_socket != NULL) { + socket_path = debug_socket; - void wasm_config_set_listen_path(wasm_config_t *config, char *listen_path); + } else { + char *env_path = getenv("ONYX_PATH"); + socket_path = bh_aprintf(bh_heap_allocator(), "%s/debug.%d", env_path, pid); + } + + void wasm_config_set_listen_path(wasm_config_t *config, const char *listen_path); wasm_config_set_listen_path(wasm_config, socket_path); #endif #endif @@ -612,7 +619,7 @@ void onyx_run_initialize(b32 debug_enabled) { wasm_runtime.onyx_print_trap = &onyx_print_trap; } -b32 onyx_run_wasm(bh_buffer wasm_bytes, int argc, char *argv[]) { +b32 onyx_run_wasm_code(bh_buffer wasm_bytes, int argc, char *argv[]) { runtime = &wasm_runtime; wasm_raw_bytes = wasm_bytes; diff --git a/compiler/src/wasm_type_table.h b/compiler/src/wasm_type_table.h index 9ed96a3d1..adcf3f7b3 100644 --- a/compiler/src/wasm_type_table.h +++ b/compiler/src/wasm_type_table.h @@ -1,13 +1,76 @@ // This file is directly included in src/onxywasm.c // It is here purely to decrease the amount of clutter in the main file. +#undef BH_INTERNAL_ALLOCATOR +#define BH_INTERNAL_ALLOCATOR (ctx->context->gp_alloc) + typedef struct StructMethodData { u32 name_loc; u32 name_len; - u32 type; u32 data_loc; + Type *type; } StructMethodData; +typedef struct MethodDataInfo { + u32 base; + u32 count; +} MethodDataInfo; + +struct TypeBuilderContext { + bh_buffer buffer; + bh_arr(u32) patches; + + OnyxWasmModule *module; + Context *context; + ConstExprContext constexpr_ctx; +}; + +#define PATCH (bh_arr_push(ctx->patches, ctx->buffer.length)) + +#define WRITE_PTR(val) \ + bh_buffer_align(&ctx->buffer, POINTER_SIZE); \ + PATCH; \ + bh_buffer_write_u32(&ctx->buffer, val); + +#define WRITE_SLICE(ptr, count) \ + WRITE_PTR(ptr); \ + bh_buffer_write_u32(&ctx->buffer, count); + + +static void write_type_idx(struct TypeBuilderContext *ctx, Type *type) { + bh_buffer_write_u32(&ctx->buffer, type->id); + ensure_type_has_been_submitted_for_emission(ctx->module, type); +} + +static u32 build_constexpr( + AstTyped *value, + bh_buffer *buffer, + ConstExprContext *constexpr_ctx +) { + if ((value->flags & Ast_Flag_Comptime) == 0) { + return 0; + } + + u32 size = type_size_of(value->type); + bh_buffer_align(buffer, type_alignment_of(value->type)); + + bh_buffer_grow(buffer, buffer->length + size); + constexpr_ctx->data = buffer->data; + if (!emit_constexpr_(constexpr_ctx, value, buffer->length)) { + return 0; + + } else { + buffer->length += size; + return buffer->length - size; + } +} + +#if (POINTER_SIZE == 4) + #define Table_Info_Type i32 +#else + #error "Expected POINTER_SIZE to be 4" +#endif + static void build_polymorphic_solutions_array( bh_arr(AstPolySolution) slns, bh_buffer *table_buffer, @@ -23,6 +86,7 @@ static void build_polymorphic_solutions_array( case PSK_Type: { // NOTE: This assumes a little endian compiler (which is assumed in other part of the code too) bh_buffer_append(table_buffer, &sln->type->id, 4); + ensure_type_has_been_submitted_for_emission(constexpr_ctx->module, sln->type); break; } @@ -49,758 +113,645 @@ static void build_polymorphic_solutions_array( } } -static u32 build_constexpr( - AstTyped *value, - bh_buffer *table_buffer, - ConstExprContext *constexpr_ctx +static void write_polymorphic_solutions_array( + bh_arr(AstPolySolution) slns, + struct TypeBuilderContext *ctx, + u32 *param_locations ) { - if ((value->flags & Ast_Flag_Comptime) == 0) { - return 0; + u32 i = 0; + bh_arr_each(AstPolySolution, sln, slns) { + WRITE_PTR(param_locations[i++]); + + if (sln->kind == PSK_Type) { + write_type_idx(ctx, ctx->context->types.basic[Basic_Kind_Type_Index]); + } else { + write_type_idx(ctx, sln->value->type); + } } +} - u32 size = type_size_of(value->type); - bh_buffer_align(table_buffer, type_alignment_of(value->type)); +static void build_tag_array( + bh_arr(AstTyped *) tags, + struct TypeBuilderContext *ctx, + u32 *tag_locations +) { + u32 i = 0; + bh_arr_each(AstTyped *, tag, tags) { + AstTyped* value = *tag; + assert(value->flags & Ast_Flag_Comptime); + assert(value->type); - bh_buffer_grow(table_buffer, table_buffer->length + size); - constexpr_ctx->data = table_buffer->data; - if (!emit_constexpr_(constexpr_ctx, value, table_buffer->length)) { - return 0; + tag_locations[i++] = build_constexpr(value, &ctx->buffer, &ctx->constexpr_ctx); + } +} - } else { - table_buffer->length += size; - return table_buffer->length - size; +static void write_tag_array( + bh_arr(AstTyped *) tags, + struct TypeBuilderContext *ctx, + u32 *tag_locations +) { + fori (i, 0, bh_arr_length(tags)) { + WRITE_PTR(tag_locations[i]); + write_type_idx(ctx, tags[i]->type); } } -static u64 build_type_table(OnyxWasmModule* module) { +static MethodDataInfo write_method_data(struct TypeBuilderContext *ctx, Type *type) { + bh_arr(StructMethodData) method_data=NULL; - bh_arr(u32) base_patch_locations=NULL; - bh_arr_new(global_heap_allocator, base_patch_locations, 256); + if (!ctx->context->options->generate_method_info) { + goto no_methods; + } -#define PATCH (bh_arr_push(base_patch_locations, table_buffer.length)) -#define WRITE_PTR(val) \ - bh_buffer_align(&table_buffer, POINTER_SIZE); \ - PATCH; \ - if (POINTER_SIZE == 4) bh_buffer_write_u32(&table_buffer, val); \ - if (POINTER_SIZE == 8) bh_buffer_write_u64(&table_buffer, val); -#define WRITE_SLICE(ptr, count) \ - WRITE_PTR(ptr); \ - if (POINTER_SIZE == 4) bh_buffer_write_u32(&table_buffer, count); \ - if (POINTER_SIZE == 8) bh_buffer_write_u64(&table_buffer, count); + AstType *ast_type = type->ast_type; + Scope *scope = get_scope_from_node(ctx->context, (AstNode *) ast_type); + if (!scope) goto no_methods; + + fori (i, 0, shlen(scope->symbols)) { + AstFunction* node = (AstFunction *) strip_aliases(scope->symbols[i].value); + if (node->kind != Ast_Kind_Function) continue; + assert(node->entity); + assert(node->entity->function == node); + + // Name + char *name = scope->symbols[i].key; + u32 name_loc = ctx->buffer.length; + u32 name_len = strlen(name); + bh_buffer_append(&ctx->buffer, name, name_len); + + // any data member + bh_buffer_align(&ctx->buffer, 4); + u32 data_loc = ctx->buffer.length; + u32 func_idx = get_element_idx(ctx->module, node); + bh_buffer_write_u32(&ctx->buffer, func_idx); + bh_buffer_write_u32(&ctx->buffer, 0); + + bh_arr_push(method_data, ((StructMethodData) { + .name_loc = name_loc, + .name_len = name_len, + .data_loc = data_loc, + .type = node->type, + })); + } - // This is the data behind the "type_table" slice in runtime/info/types.onyx - #if (POINTER_SIZE == 4) - #define Table_Info_Type u32 - #else - #define Table_Info_Type u64 - #endif - u32 type_count = bh_arr_length(type_map.entries) + 1; - Table_Info_Type* table_info = bh_alloc_array(global_heap_allocator, Table_Info_Type, type_count); // HACK - memset(table_info, 0, type_count * sizeof(Table_Info_Type)); + no_methods: - bh_buffer table_buffer; - bh_buffer_init(&table_buffer, global_heap_allocator, 4096); + bh_buffer_align(&ctx->buffer, 4); + u32 method_data_base = ctx->buffer.length; + u32 method_data_count = bh_arr_length(method_data); - u32 type_table_info_data_id = NEXT_DATA_ID(module); + bh_arr_each(StructMethodData, method, method_data) { + WRITE_SLICE(method->name_loc, method->name_len); + WRITE_PTR(method->data_loc); + write_type_idx(ctx, method->type); + } - ConstExprContext constexpr_ctx; - constexpr_ctx.module = module; - constexpr_ctx.data_id = type_table_info_data_id; - - // Write a "NULL" at the beginning so nothing will have to point to the first byte of the buffer. - bh_buffer_write_u64(&table_buffer, 0); - - bh_arr_each(bh__imap_entry, type_entry, type_map.entries) { - u64 type_idx = type_entry->key; - Type* type = (Type *) type_entry->value; - - switch (type->kind) { - case Type_Kind_Basic: { - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->Basic.kind); - break; - } + bh_arr_free(method_data); + + return ((MethodDataInfo) { method_data_base, method_data_count }); +} - case Type_Kind_Pointer: { - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->Pointer.elem->id); - break; - } - case Type_Kind_MultiPointer: { - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->MultiPointer.elem->id); - break; - } - case Type_Kind_Array: { - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->Array.elem->id); - bh_buffer_write_u32(&table_buffer, type->Array.count); - break; - } +static void write_type_info_header(struct TypeBuilderContext *ctx, Type *type) { + bh_buffer_write_u32(&ctx->buffer, type->kind); + bh_buffer_write_u32(&ctx->buffer, type_size_of(type)); + bh_buffer_write_u32(&ctx->buffer, type_alignment_of(type)); +} - case Type_Kind_Slice: { - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->Slice.elem->id); - break; - } - case Type_Kind_DynArray: { - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->DynArray.elem->id); - break; - } - case Type_Kind_VarArgs: { - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->VarArgs.elem->id); - break; - } +static i32 build_type_info_for_basic(struct TypeBuilderContext *ctx, Type *type) { + write_type_info_header(ctx, type); + bh_buffer_write_u32(&ctx->buffer, type->Basic.kind); + return 0; +} - case Type_Kind_Compound: { - u32 components_base = table_buffer.length; +static i32 build_type_info_for_pointer(struct TypeBuilderContext *ctx, Type *type) { + write_type_info_header(ctx, type); + write_type_idx(ctx, type->Pointer.elem); + return 0; +} - u32 components_count = type->Compound.count; - fori (i, 0, components_count) { - u32 type_idx = type->Compound.types[i]->id; - bh_buffer_write_u32(&table_buffer, type_idx); - } +static i32 build_type_info_for_multipointer(struct TypeBuilderContext *ctx, Type *type) { + write_type_info_header(ctx, type); + write_type_idx(ctx, type->MultiPointer.elem); + return 0; +} - bh_buffer_align(&table_buffer, 8); - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - WRITE_SLICE(components_base, components_count); - break; - } +static i32 build_type_info_for_array(struct TypeBuilderContext *ctx, Type *type) { + write_type_info_header(ctx, type); + write_type_idx(ctx, type->Array.elem); + bh_buffer_write_u32(&ctx->buffer, type->Array.count); + return 0; +} - case Type_Kind_Function: { - u32 parameters_base = table_buffer.length; +static i32 build_type_info_for_slice(struct TypeBuilderContext *ctx, Type *type) { + write_type_info_header(ctx, type); + write_type_idx(ctx, type->Slice.elem); + return 0; +} - u32 parameters_count = type->Function.param_count; - fori (i, 0, parameters_count) { - u32 type_idx = type->Function.params[i]->id; - bh_buffer_write_u32(&table_buffer, type_idx); - } +static i32 build_type_info_for_dynarray(struct TypeBuilderContext *ctx, Type *type) { + write_type_info_header(ctx, type); + write_type_idx(ctx, type->DynArray.elem); + return 0; +} - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->Function.return_type->id); +static i32 build_type_info_for_varargs(struct TypeBuilderContext *ctx, Type *type) { + write_type_info_header(ctx, type); + write_type_idx(ctx, type->VarArgs.elem); + return 0; +} - WRITE_SLICE(parameters_base, parameters_count); +static i32 build_type_info_for_compound(struct TypeBuilderContext *ctx, Type *type) { + fori (i, 0, type->Compound.count) { + write_type_idx(ctx, type->Compound.types[i]); + } - bh_buffer_write_u32(&table_buffer, type->Function.vararg_arg_pos > 0 ? 1 : 0); - break; - } + i32 offset = ctx->buffer.length; - case Type_Kind_Enum: { - AstEnumType* ast_enum = (AstEnumType *) type->ast_type; - u32 member_count = bh_arr_length(ast_enum->values); - u32* name_locations = bh_alloc_array(global_scratch_allocator, u32, member_count); + write_type_info_header(ctx, type); + WRITE_SLICE(0, type->Compound.count); - u32 i = 0; - bh_arr_each(AstEnumValue *, value, ast_enum->values) { - name_locations[i++] = table_buffer.length; + return offset; +} - bh_buffer_append(&table_buffer, (*value)->token->text, (*value)->token->length); - } - bh_buffer_align(&table_buffer, 8); +static i32 build_type_info_for_function(struct TypeBuilderContext *ctx, Type *type) { + fori (i, 0, type->Function.param_count) { + write_type_idx(ctx, type->Function.params[i]); + } - u32 member_base = table_buffer.length; - i = 0; - bh_arr_each(AstEnumValue *, value, ast_enum->values) { - u32 name_loc = name_locations[i++]; + i32 offset = ctx->buffer.length; + write_type_info_header(ctx, type); + write_type_idx(ctx, type->Function.return_type); - bh_buffer_align(&table_buffer, 8); - WRITE_SLICE(name_loc, (*value)->token->length); + WRITE_SLICE(0, type->Function.param_count); - assert((*value)->value->kind == Ast_Kind_NumLit); - AstNumLit *num = (AstNumLit *) (*value)->value; - bh_buffer_write_u64(&table_buffer, num->value.l); - } + bh_buffer_write_u32(&ctx->buffer, type->Function.vararg_arg_pos > 0 ? 1 : 0); - u32 name_base = table_buffer.length; - u32 name_length = strlen(type->Enum.name); - bh_buffer_append(&table_buffer, type->Enum.name, name_length); - bh_buffer_align(&table_buffer, 8); - - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->Enum.backing->id); - WRITE_SLICE(name_base, name_length); - WRITE_SLICE(member_base, member_count); - bh_buffer_write_u32(&table_buffer, type->Enum.is_flags ? 1 : 0); - break; - } + return offset; +} - case Type_Kind_Struct: { - TypeStruct* s = &type->Struct; - u32* name_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count); - u32* param_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(s->poly_sln)); - u32* value_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count); - u32* meta_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count); - u32* struct_tag_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(s->meta_tags)); - memset(value_locations, 0, s->mem_count * sizeof(u32)); - memset(meta_locations, 0, s->mem_count * sizeof(u32)); - memset(struct_tag_locations, 0, bh_arr_length(s->meta_tags) * sizeof(u32)); - - // Member names - u32 i = 0; - bh_arr_each(StructMember*, pmem, s->memarr) { - StructMember* mem = *pmem; - - name_locations[i++] = table_buffer.length; - bh_buffer_append(&table_buffer, mem->name, strlen(mem->name)); - } +static i32 build_type_info_for_enum(struct TypeBuilderContext *ctx, Type *type) { + AstEnumType* ast_enum = (AstEnumType *) type->ast_type; + u32 member_count = bh_arr_length(ast_enum->values); + u32* name_locations = bh_alloc_array(ctx->context->gp_alloc, u32, member_count); + + u32 i = 0; + bh_arr_each(AstEnumValue *, value, ast_enum->values) { + name_locations[i++] = ctx->buffer.length; + + bh_buffer_append(&ctx->buffer, (*value)->token->text, (*value)->token->length); + } + bh_buffer_align(&ctx->buffer, 8); - bh_buffer_align(&table_buffer, 8); + u32 member_base = ctx->buffer.length; + i = 0; + bh_arr_each(AstEnumValue *, value, ast_enum->values) { + u32 name_loc = name_locations[i++]; - // Polymorphic solutions - build_polymorphic_solutions_array(s->poly_sln, &table_buffer, &constexpr_ctx, param_locations); + assert((*value)->value->kind == Ast_Kind_NumLit); + AstNumLit *num = (AstNumLit *) (*value)->value; - bh_buffer_align(&table_buffer, 8); + WRITE_SLICE(name_loc, (*value)->token->length); + bh_buffer_write_u64(&ctx->buffer, num->value.l); + } - // Member default values - i = 0; - bh_arr_each(StructMember*, pmem, s->memarr) { - StructMember* mem = *pmem; + u32 name_base = ctx->buffer.length; + u32 name_length = strlen(type->Enum.name); + bh_buffer_append(&ctx->buffer, type->Enum.name, name_length); - if (mem->initial_value == NULL || *mem->initial_value == NULL) { - i++; - continue; - } + bh_buffer_align(&ctx->buffer, 4); + i32 offset = ctx->buffer.length; - AstTyped* value = *mem->initial_value; - assert(value->type); + write_type_info_header(ctx, type); + write_type_idx(ctx, type->Enum.backing); + WRITE_SLICE(name_base, name_length); + WRITE_SLICE(member_base, member_count); + bh_buffer_write_u32(&ctx->buffer, type->Enum.is_flags ? 1 : 0); - value_locations[i++] = build_constexpr(value, &table_buffer, &constexpr_ctx); - } + return offset; +} - // Member tags - i = 0; - bh_arr_each(StructMember*, pmem, s->memarr) { - StructMember* mem = *pmem; +static i32 build_type_info_for_struct(struct TypeBuilderContext *ctx, Type *type) { + TypeStruct* s = &type->Struct; + u32* name_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, s->mem_count); + u32* param_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, bh_arr_length(s->poly_sln)); + u32* value_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, s->mem_count); + u32* meta_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, s->mem_count); + u32* struct_tag_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, bh_arr_length(s->meta_tags)); + memset(value_locations, 0, s->mem_count * sizeof(u32)); + memset(meta_locations, 0, s->mem_count * sizeof(u32)); + memset(struct_tag_locations, 0, bh_arr_length(s->meta_tags) * sizeof(u32)); + + // Member names + u32 i = 0; + bh_arr_each(StructMember*, pmem, s->memarr) { + StructMember* mem = *pmem; - if (mem->meta_tags == NULL) { - i += 1; - continue; - } + name_locations[i++] = ctx->buffer.length; + bh_buffer_append(&ctx->buffer, mem->name, strlen(mem->name)); + } - bh_arr(AstTyped *) meta_tags = mem->meta_tags; - assert(meta_tags); + bh_buffer_align(&ctx->buffer, 8); - bh_arr(u64) meta_tag_locations=NULL; - bh_arr_new(global_heap_allocator, meta_tag_locations, bh_arr_length(meta_tags)); + // Polymorphic solutions + build_polymorphic_solutions_array(s->poly_sln, &ctx->buffer, &ctx->constexpr_ctx, param_locations); - int j = 0; - bh_arr_each(AstTyped *, meta, meta_tags) { - AstTyped* value = *meta; - assert(value->flags & Ast_Flag_Comptime); - assert(value->type); + bh_buffer_align(&ctx->buffer, 8); - meta_tag_locations[j++] = build_constexpr(value, &table_buffer, &constexpr_ctx); - } + // Member default values + i = 0; + bh_arr_each(StructMember*, pmem, s->memarr) { + StructMember* mem = *pmem; - bh_buffer_align(&table_buffer, 8); - meta_locations[i] = table_buffer.length; + if (mem->initial_value == NULL || *mem->initial_value == NULL) { + i++; + continue; + } - fori (k, 0, bh_arr_length(meta_tags)) { - WRITE_SLICE(meta_tag_locations[k], meta_tags[k]->type->id); - } + AstTyped* value = *mem->initial_value; + assert(value->type); - bh_arr_free(meta_tag_locations); - i += 1; - } + value_locations[i++] = build_constexpr(value, &ctx->buffer, &ctx->constexpr_ctx); + } - bh_buffer_align(&table_buffer, 8); - u32 members_base = table_buffer.length; + // Member tags + i = 0; + bh_arr_each(StructMember*, pmem, s->memarr) { + StructMember* mem = *pmem; - // Member array - i = 0; - bh_arr_each(StructMember*, pmem, s->memarr) { - StructMember* mem = *pmem; + if (mem->meta_tags == NULL) { + i += 1; + continue; + } - u32 name_loc = name_locations[i]; - u32 value_loc = value_locations[i]; - u32 meta_loc = meta_locations[i++]; + bh_arr(AstTyped *) meta_tags = mem->meta_tags; - WRITE_SLICE(name_loc, strlen(mem->name)); - bh_buffer_write_u32(&table_buffer, mem->offset); - bh_buffer_write_u32(&table_buffer, mem->type->id); - bh_buffer_write_byte(&table_buffer, mem->used ? 1 : 0); - - WRITE_PTR(value_loc); + bh_arr(u32) meta_tag_locations=NULL; + bh_arr_new(ctx->context->gp_alloc, meta_tag_locations, bh_arr_length(meta_tags)); - WRITE_SLICE(meta_loc, bh_arr_length(mem->meta_tags)); - } + build_tag_array(meta_tags, ctx, meta_tag_locations); - bh_buffer_align(&table_buffer, 8); - u32 params_base = table_buffer.length; + bh_buffer_align(&ctx->buffer, 8); + meta_locations[i++] = ctx->buffer.length; + write_tag_array(meta_tags, ctx, meta_tag_locations); - // Polymorphic solution any array - i = 0; - bh_arr_each(AstPolySolution, sln, s->poly_sln) { - WRITE_PTR(param_locations[i++]); + bh_arr_free(meta_tag_locations); + } - if (sln->kind == PSK_Type) bh_buffer_write_u32(&table_buffer, basic_types[Basic_Kind_Type_Index].id); - else bh_buffer_write_u32(&table_buffer, sln->value->type->id); - } + bh_buffer_align(&ctx->buffer, 8); + u32 members_base = ctx->buffer.length; - // Struct tag array - i = 0; - bh_arr_each(AstTyped *, tag, s->meta_tags) { - AstTyped* value = *tag; - assert(value->flags & Ast_Flag_Comptime); - assert(value->type); + // Member array + i = 0; + bh_arr_each(StructMember*, pmem, s->memarr) { + StructMember* mem = *pmem; - struct_tag_locations[i++] = build_constexpr(value, &table_buffer, &constexpr_ctx); - } + u32 name_loc = name_locations[i]; + u32 value_loc = value_locations[i]; + u32 meta_loc = meta_locations[i++]; - // Struct methods - bh_arr(StructMethodData) method_data=NULL; + WRITE_SLICE(name_loc, strlen(mem->name)); + bh_buffer_write_u32(&ctx->buffer, mem->offset); + write_type_idx(ctx, mem->type); + bh_buffer_write_byte(&ctx->buffer, mem->used ? 1 : 0); + + if (value_loc == 0) { + bh_buffer_write_u32(&ctx->buffer, 0); + } else { + WRITE_PTR(value_loc); + } - AstType *ast_type = type->ast_type; - if (!context.options->generate_method_info) { - goto no_methods; - } + WRITE_SLICE(meta_loc, bh_arr_length(mem->meta_tags)); + } - if (ast_type && ast_type->kind == Ast_Kind_Struct_Type) { - AstStructType *struct_type = (AstStructType *) ast_type; - Scope* struct_scope = struct_type->scope; - - if (struct_scope == NULL) goto no_methods; - - fori (i, 0, shlen(struct_scope->symbols)) { - AstFunction* node = (AstFunction *) strip_aliases(struct_scope->symbols[i].value); - if (node->kind != Ast_Kind_Function) continue; - assert(node->entity); - assert(node->entity->function == node); - - // Name - char *name = struct_scope->symbols[i].key; - u32 name_loc = table_buffer.length; - u32 name_len = strlen(name); - bh_buffer_append(&table_buffer, name, name_len); - - // any data member - bh_buffer_align(&table_buffer, 4); - u32 data_loc = table_buffer.length; - u32 func_idx = get_element_idx(module, node); - bh_buffer_write_u32(&table_buffer, func_idx); - bh_buffer_write_u32(&table_buffer, 0); - - bh_arr_push(method_data, ((StructMethodData) { - .name_loc = name_loc, - .name_len = name_len, - .type = node->type->id, - .data_loc = data_loc, - })); - } - } + bh_buffer_align(&ctx->buffer, 8); + u32 params_base = ctx->buffer.length; - no_methods: + // Polymorphic solution any array + write_polymorphic_solutions_array(s->poly_sln, ctx, param_locations); - bh_buffer_align(&table_buffer, 4); - u32 method_data_base = table_buffer.length; + // Struct tag array + build_tag_array(s->meta_tags, ctx, struct_tag_locations); - i = 0; - bh_arr_each(StructMethodData, method, method_data) { - WRITE_SLICE(method->name_loc, method->name_len); - WRITE_PTR(method->data_loc); - bh_buffer_write_u32(&table_buffer, method->type); - } + // Struct methods + MethodDataInfo method_data_info = write_method_data(ctx, type); - bh_buffer_align(&table_buffer, 8); - u32 struct_tag_base = table_buffer.length; + bh_buffer_align(&ctx->buffer, 8); + u32 struct_tag_base = ctx->buffer.length; + write_tag_array(s->meta_tags, ctx, struct_tag_locations); - fori (i, 0, bh_arr_length(s->meta_tags)) { - WRITE_SLICE(struct_tag_locations[i], s->meta_tags[i]->type->id); - } + // Struct name + u32 name_base = 0; + u32 name_length = 0; + if (s->name) { + name_length = strlen(s->name); + name_base = ctx->buffer.length; + bh_buffer_append(&ctx->buffer, s->name, name_length); + } - // Struct name - u32 name_base = 0; - u32 name_length = 0; - if (s->name) { - name_length = strlen(s->name); - name_base = table_buffer.length; - bh_buffer_append(&table_buffer, s->name, name_length); - } + bh_buffer_align(&ctx->buffer, 8); - bh_buffer_align(&table_buffer, 8); - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); + i32 offset = ctx->buffer.length; + write_type_info_header(ctx, type); - if (type->Struct.constructed_from != NULL) { - bh_buffer_write_u32(&table_buffer, type->Struct.constructed_from->type_id); - } else { - bh_buffer_write_u32(&table_buffer, 0); - } + if (type->Struct.constructed_from != NULL) { + bh_buffer_write_u32(&ctx->buffer, type->Struct.constructed_from->type_id); - WRITE_SLICE(name_base, name_length); - WRITE_SLICE(members_base, s->mem_count); - WRITE_SLICE(params_base, bh_arr_length(s->poly_sln)); - WRITE_SLICE(struct_tag_base, bh_arr_length(s->meta_tags)); - WRITE_SLICE(method_data_base, bh_arr_length(method_data)); + Type *constructed_from = type_lookup_by_id(ctx->context, type->Struct.constructed_from->type_id); + ensure_type_has_been_submitted_for_emission(ctx->module, constructed_from); + } else { + bh_buffer_write_u32(&ctx->buffer, 0); + } - bh_arr_free(method_data); - break; - } + WRITE_SLICE(name_base, name_length); + WRITE_SLICE(members_base, s->mem_count); + WRITE_SLICE(params_base, bh_arr_length(s->poly_sln)); + WRITE_SLICE(struct_tag_base, bh_arr_length(s->meta_tags)); + WRITE_SLICE(method_data_info.base, method_data_info.count); - case Type_Kind_PolyStruct: { - u32* tag_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(type->PolyStruct.meta_tags)); - memset(tag_locations, 0, sizeof(u32) * bh_arr_length(type->PolyStruct.meta_tags)); + return offset; +} - u32 name_base = table_buffer.length; - u32 name_length = strlen(type->PolyStruct.name); - bh_buffer_append(&table_buffer, type->PolyStruct.name, name_length); +static i32 build_type_info_for_polystruct(struct TypeBuilderContext *ctx, Type *type) { + u32* tag_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, bh_arr_length(type->PolyStruct.meta_tags)); + memset(tag_locations, 0, sizeof(u32) * bh_arr_length(type->PolyStruct.meta_tags)); + + u32 name_base = ctx->buffer.length; + u32 name_length = strlen(type->PolyStruct.name); + bh_buffer_append(&ctx->buffer, type->PolyStruct.name, name_length); + + u32 tags_count = bh_arr_length(type->PolyStruct.meta_tags); + i32 i = 0; + bh_arr_each(AstTyped *, tag, type->PolyStruct.meta_tags) { + AstTyped* value = *tag; + + // Polymorphic structs are weird in this case, because the tag might not be constructed generically for + // the polymorphic structure so it should only be constructed for actual solidified structures. + // See core/containers/map.onyx with Custom_Format for an example. + if (!(value->flags & Ast_Flag_Comptime)) { + tags_count--; + continue; + } - u32 tags_count = bh_arr_length(type->PolyStruct.meta_tags); - i32 i = 0; - bh_arr_each(AstTyped *, tag, type->PolyStruct.meta_tags) { - AstTyped* value = *tag; + assert(value->type); - // Polymorphic structs are weird in this case, because the tag might not be constructed generically for - // the polymorphic structure so it should only be constructed for actual solidified structures. - // See core/containers/map.onyx with Custom_Format for an example. - if (!(value->flags & Ast_Flag_Comptime)) { - tags_count--; - continue; - } + u32 size = type_size_of(value->type); + bh_buffer_align(&ctx->buffer, type_alignment_of(value->type)); + tag_locations[i] = ctx->buffer.length; - assert(value->type); + bh_buffer_grow(&ctx->buffer, ctx->buffer.length + size); - u32 size = type_size_of(value->type); - bh_buffer_align(&table_buffer, type_alignment_of(value->type)); - tag_locations[i] = table_buffer.length; + ctx->constexpr_ctx.data = ctx->buffer.data; + assert(emit_constexpr_(&ctx->constexpr_ctx, value, ctx->buffer.length)); + ctx->buffer.length += size; - bh_buffer_grow(&table_buffer, table_buffer.length + size); + i += 1; + } - constexpr_ctx.data = table_buffer.data; - assert(emit_constexpr_(&constexpr_ctx, value, table_buffer.length)); - table_buffer.length += size; + bh_buffer_align(&ctx->buffer, 8); + u32 tags_base = ctx->buffer.length; - i += 1; - } + fori (i, 0, tags_count) { + WRITE_PTR(tag_locations[i]); + write_type_idx(ctx, type->PolyStruct.meta_tags[i]->type); + } - bh_buffer_align(&table_buffer, 8); - u32 tags_base = table_buffer.length; + bh_buffer_align(&ctx->buffer, 8); - fori (i, 0, tags_count) { - WRITE_SLICE(tag_locations[i], type->PolyStruct.meta_tags[i]->type->id); - } + i32 offset = ctx->buffer.length; + write_type_info_header(ctx, type); + WRITE_SLICE(name_base, name_length); + WRITE_SLICE(tags_base, tags_count); - bh_buffer_align(&table_buffer, 8); - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, 0); - bh_buffer_write_u32(&table_buffer, 0); - WRITE_SLICE(name_base, name_length); - WRITE_SLICE(tags_base, tags_count); + return offset; +} - break; - } - - case Type_Kind_Distinct: { - u32 name_base = table_buffer.length; - u32 name_length = strlen(type->Distinct.name); - bh_buffer_append(&table_buffer, type->Distinct.name, name_length); - bh_buffer_align(&table_buffer, 8); - - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); - bh_buffer_write_u32(&table_buffer, type->Distinct.base_type->id); - WRITE_SLICE(name_base, name_length); - break; - } +static i32 build_type_info_for_distinct(struct TypeBuilderContext *ctx, Type *type) { + u32 name_length = strlen(type->Distinct.name); + bh_buffer_append(&ctx->buffer, type->Distinct.name, name_length); + bh_buffer_align(&ctx->buffer, 8); - case Type_Kind_Union: { - TypeUnion* u = &type->Union; - u32 variant_count = bh_arr_length(u->variants_ordered); - u32* name_locations = bh_alloc_array(global_scratch_allocator, u32, variant_count); - u32* param_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(u->poly_sln)); - u32* meta_locations = bh_alloc_array(global_scratch_allocator, u32, variant_count); - u32* struct_tag_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(u->meta_tags)); - memset(meta_locations, 0, variant_count * sizeof(u32)); - memset(struct_tag_locations, 0, bh_arr_length(u->meta_tags) * sizeof(u32)); - - // Member names - u32 i = 0; - bh_arr_each(UnionVariant*, puv, u->variants_ordered) { - UnionVariant* uv = *puv; - - name_locations[i++] = table_buffer.length; - bh_buffer_append(&table_buffer, uv->name, strlen(uv->name)); - } + i32 offset = ctx->buffer.length; + write_type_info_header(ctx, type); + write_type_idx(ctx, type->Distinct.base_type); + WRITE_SLICE(0, name_length); - bh_buffer_align(&table_buffer, 8); + return offset; +} - // Polymorphic solutions - build_polymorphic_solutions_array(u->poly_sln, &table_buffer, &constexpr_ctx, param_locations); +static i32 build_type_info_for_union(struct TypeBuilderContext *ctx, Type *type) { + TypeUnion* u = &type->Union; + u32 variant_count = bh_arr_length(u->variants_ordered); + u32* name_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, variant_count); + u32* param_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, bh_arr_length(u->poly_sln)); + u32* meta_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, variant_count); + u32* struct_tag_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, bh_arr_length(u->meta_tags)); + memset(meta_locations, 0, variant_count * sizeof(u32)); + memset(struct_tag_locations, 0, bh_arr_length(u->meta_tags) * sizeof(u32)); + + // Member names + u32 i = 0; + bh_arr_each(UnionVariant*, puv, u->variants_ordered) { + UnionVariant* uv = *puv; - bh_buffer_align(&table_buffer, 8); + name_locations[i++] = ctx->buffer.length; + bh_buffer_append(&ctx->buffer, uv->name, strlen(uv->name)); + } - // Variant tags - i = 0; - bh_arr_each(UnionVariant*, puv, u->variants_ordered) { - UnionVariant* uv = *puv; + bh_buffer_align(&ctx->buffer, 8); - if (uv->meta_tags == NULL) { - i += 1; - continue; - } + // Polymorphic solutions + build_polymorphic_solutions_array(u->poly_sln, &ctx->buffer, &ctx->constexpr_ctx, param_locations); - bh_arr(AstTyped *) meta_tags = uv->meta_tags; - assert(meta_tags); + bh_buffer_align(&ctx->buffer, 8); - bh_arr(u64) meta_tag_locations=NULL; - bh_arr_new(global_heap_allocator, meta_tag_locations, bh_arr_length(meta_tags)); + // Variant tags + i = 0; + bh_arr_each(UnionVariant*, puv, u->variants_ordered) { + UnionVariant* uv = *puv; - int j = 0; - bh_arr_each(AstTyped *, meta, meta_tags) { - AstTyped* value = *meta; - assert(value->flags & Ast_Flag_Comptime); - assert(value->type); + if (uv->meta_tags == NULL) { + i += 1; + continue; + } - meta_tag_locations[j++] = build_constexpr(value, &table_buffer, &constexpr_ctx); - } + bh_arr(AstTyped *) meta_tags = uv->meta_tags; - bh_buffer_align(&table_buffer, 8); - meta_locations[i] = table_buffer.length; + bh_arr(u32) meta_tag_locations=NULL; + bh_arr_new(ctx->context->gp_alloc, meta_tag_locations, bh_arr_length(meta_tags)); - fori (k, 0, bh_arr_length(meta_tags)) { - WRITE_SLICE(meta_tag_locations[k], meta_tags[k]->type->id); - } + build_tag_array(meta_tags, ctx, meta_tag_locations); - bh_arr_free(meta_tag_locations); - i += 1; - } + bh_buffer_align(&ctx->buffer, 8); + meta_locations[i++] = ctx->buffer.length; + write_tag_array(meta_tags, ctx, meta_tag_locations); - bh_buffer_align(&table_buffer, 8); - u32 variants_base = table_buffer.length; + bh_arr_free(meta_tag_locations); + } - // Variants array - i = 0; - bh_arr_each(UnionVariant*, puv, u->variants_ordered) { - UnionVariant* uv = *puv; + bh_buffer_align(&ctx->buffer, 8); + u32 variants_base = ctx->buffer.length; - u32 name_loc = name_locations[i]; - u32 meta_loc = meta_locations[i++]; + // Variants array + i = 0; + bh_arr_each(UnionVariant*, puv, u->variants_ordered) { + UnionVariant* uv = *puv; - WRITE_SLICE(name_loc, strlen(uv->name)); - bh_buffer_write_u32(&table_buffer, uv->tag_value); - bh_buffer_write_u32(&table_buffer, uv->type->id); + u32 name_loc = name_locations[i]; + u32 meta_loc = meta_locations[i++]; - WRITE_SLICE(meta_loc, bh_arr_length(uv->meta_tags)); - } + WRITE_SLICE(name_loc, strlen(uv->name)); + bh_buffer_write_u32(&ctx->buffer, uv->tag_value); + write_type_idx(ctx, uv->type); - bh_buffer_align(&table_buffer, 8); - u32 params_base = table_buffer.length; + WRITE_SLICE(meta_loc, bh_arr_length(uv->meta_tags)); + } - // Polymorphic solution any array - i = 0; - bh_arr_each(AstPolySolution, sln, u->poly_sln) { - WRITE_PTR(param_locations[i++]); - if (sln->kind == PSK_Type) bh_buffer_write_u32(&table_buffer, basic_types[Basic_Kind_Type_Index].id); - else bh_buffer_write_u32(&table_buffer, sln->value->type->id); - } + // Polymorphic solution any array + bh_buffer_align(&ctx->buffer, 8); + u32 params_base = ctx->buffer.length; + write_polymorphic_solutions_array(u->poly_sln, ctx, param_locations); - // Union tag array - i = 0; - bh_arr_each(AstTyped *, tag, u->meta_tags) { - AstTyped* value = *tag; - assert(value->flags & Ast_Flag_Comptime); - assert(value->type); + // Union tag array + build_tag_array(u->meta_tags, ctx, struct_tag_locations); - struct_tag_locations[i++] = build_constexpr(value, &table_buffer, &constexpr_ctx); - } + // Union methods + MethodDataInfo method_data_info = write_method_data(ctx, type); - // Union methods - bh_arr(StructMethodData) method_data=NULL; + bh_buffer_align(&ctx->buffer, 8); + u32 union_tag_base = ctx->buffer.length; + write_tag_array(u->meta_tags, ctx, struct_tag_locations); - AstType *ast_type = type->ast_type; - if (!context.options->generate_method_info) { - goto no_union_methods; - } + // Union name + u32 name_base = 0; + u32 name_length = 0; + if (u->name) { + name_length = strlen(u->name); + name_base = ctx->buffer.length; + bh_buffer_append(&ctx->buffer, u->name, name_length); + } - if (ast_type && ast_type->kind == Ast_Kind_Union_Type) { - AstUnionType *union_type = (AstUnionType *) ast_type; - Scope* union_scope = union_type->scope; - - if (union_scope == NULL) goto no_union_methods; - - fori (i, 0, shlen(union_scope->symbols)) { - AstFunction* node = (AstFunction *) strip_aliases(union_scope->symbols[i].value); - if (node->kind != Ast_Kind_Function) continue; - assert(node->entity); - assert(node->entity->function == node); - - // Name - char *name = union_scope->symbols[i].key; - u32 name_loc = table_buffer.length; - u32 name_len = strlen(name); - bh_buffer_append(&table_buffer, name, name_len); - - // any data member - bh_buffer_align(&table_buffer, 4); - u32 data_loc = table_buffer.length; - u32 func_idx = 0; // get_element_idx(module, node); - bh_buffer_write_u32(&table_buffer, func_idx); - bh_buffer_write_u32(&table_buffer, 0); - - bh_arr_push(method_data, ((StructMethodData) { - .name_loc = name_loc, - .name_len = name_len, - .type = node->type->id, - .data_loc = data_loc, - })); - } - } + bh_buffer_align(&ctx->buffer, 4); + i32 offset = ctx->buffer.length; + write_type_info_header(ctx, type); - no_union_methods: + if (type->Union.constructed_from != NULL) { + bh_buffer_write_u32(&ctx->buffer, type->Union.constructed_from->type_id); - bh_buffer_align(&table_buffer, 4); - u32 method_data_base = table_buffer.length; + Type *constructed_from = type_lookup_by_id(ctx->context, type->Union.constructed_from->type_id); + ensure_type_has_been_submitted_for_emission(ctx->module, constructed_from); + } else { + bh_buffer_write_u32(&ctx->buffer, 0); + } - i = 0; - bh_arr_each(StructMethodData, method, method_data) { - WRITE_SLICE(method->name_loc, method->name_len); - WRITE_PTR(method->data_loc); - bh_buffer_write_u32(&table_buffer, method->type); - } + write_type_idx(ctx, type->Union.tag_type); - bh_buffer_align(&table_buffer, 8); - - u32 union_tag_base = table_buffer.length; - fori (i, 0, bh_arr_length(u->meta_tags)) { - WRITE_SLICE(struct_tag_locations[i], u->meta_tags[i]->type->id); - } + WRITE_SLICE(name_base, name_length); + WRITE_SLICE(variants_base, variant_count); + WRITE_SLICE(params_base, bh_arr_length(u->poly_sln)); + WRITE_SLICE(union_tag_base, bh_arr_length(u->meta_tags)); + WRITE_SLICE(method_data_info.base, method_data_info.count); - // Union name - u32 name_base = 0; - u32 name_length = 0; - if (u->name) { - name_length = strlen(u->name); - name_base = table_buffer.length; - bh_buffer_append(&table_buffer, u->name, name_length); - } + return offset; +} - bh_buffer_align(&table_buffer, 8); - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, type_size_of(type)); - bh_buffer_write_u32(&table_buffer, type_alignment_of(type)); +static i32 build_type_info_for_polyunion(struct TypeBuilderContext *ctx, Type *type) { + u32* tag_locations = bh_alloc_array(ctx->context->scratch_alloc, u32, bh_arr_length(type->PolyUnion.meta_tags)); + memset(tag_locations, 0, sizeof(u32) * bh_arr_length(type->PolyUnion.meta_tags)); + + u32 name_base = ctx->buffer.length; + u32 name_length = strlen(type->PolyUnion.name); + bh_buffer_append(&ctx->buffer, type->PolyUnion.name, name_length); + + u32 tags_count = bh_arr_length(type->PolyUnion.meta_tags); + i32 i = 0; + bh_arr_each(AstTyped *, tag, type->PolyUnion.meta_tags) { + AstTyped* value = *tag; + + tag_locations[i] = build_constexpr(value, &ctx->buffer, &ctx->constexpr_ctx); + if (tag_locations[i] == 0) { + // Polymorphic structs are weird in this case, because the tag might not be constructed generically for + // the polymorphic structure so it should only be constructed for actual solidified structures. + // See core/containers/map.onyx with Custom_Format for an example. + tags_count--; + } else { + i++; + } + } - if (type->Union.constructed_from != NULL) { - bh_buffer_write_u32(&table_buffer, type->Union.constructed_from->type_id); - } else { - bh_buffer_write_u32(&table_buffer, 0); - } + bh_buffer_align(&ctx->buffer, 8); + u32 tags_base = ctx->buffer.length; - bh_buffer_write_u32(&table_buffer, type->Union.tag_type->id); + fori (i, 0, tags_count) { + WRITE_PTR(tag_locations[i]); + write_type_idx(ctx, type->PolyUnion.meta_tags[i]->type); + } - WRITE_SLICE(name_base, name_length); - WRITE_SLICE(variants_base, variant_count); - WRITE_SLICE(params_base, bh_arr_length(u->poly_sln)); - WRITE_SLICE(union_tag_base, bh_arr_length(u->meta_tags)); - WRITE_SLICE(method_data_base, bh_arr_length(method_data)); + bh_buffer_align(&ctx->buffer, 8); - bh_arr_free(method_data); - break; - } + i32 offset = ctx->buffer.length; + write_type_info_header(ctx, type); + WRITE_SLICE(name_base, name_length); + WRITE_SLICE(tags_base, tags_count); - case Type_Kind_PolyUnion: { - u32* tag_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(type->PolyUnion.meta_tags)); - memset(tag_locations, 0, sizeof(u32) * bh_arr_length(type->PolyUnion.meta_tags)); - - u32 name_base = table_buffer.length; - u32 name_length = strlen(type->PolyUnion.name); - bh_buffer_append(&table_buffer, type->PolyUnion.name, name_length); - - u32 tags_count = bh_arr_length(type->PolyUnion.meta_tags); - i32 i = 0; - bh_arr_each(AstTyped *, tag, type->PolyUnion.meta_tags) { - AstTyped* value = *tag; - - tag_locations[i] = build_constexpr(value, &table_buffer, &constexpr_ctx); - if (tag_locations[i] == 0) { - // Polymorphic structs are weird in this case, because the tag might not be constructed generically for - // the polymorphic structure so it should only be constructed for actual solidified structures. - // See core/containers/map.onyx with Custom_Format for an example. - tags_count--; - } else { - i++; - } - } + return offset; +} - bh_buffer_align(&table_buffer, 8); - u32 tags_base = table_buffer.length; +#undef BH_INTERNAL_ALLOCATOR +#define BH_INTERNAL_ALLOCATOR (ctx.context->gp_alloc) - fori (i, 0, tags_count) { - WRITE_SLICE(tag_locations[i], type->PolyUnion.meta_tags[i]->type->id); - } +static void build_type_info_for_type(OnyxWasmModule *module, Type *type) { + bh_buffer buffer; - bh_buffer_align(&table_buffer, 8); - table_info[type_idx] = table_buffer.length; - bh_buffer_write_u32(&table_buffer, type->kind); - bh_buffer_write_u32(&table_buffer, 0); - bh_buffer_write_u32(&table_buffer, 0); - WRITE_SLICE(name_base, name_length); - WRITE_SLICE(tags_base, tags_count); + struct TypeBuilderContext ctx = {0}; + ctx.module = module; + ctx.context = module->context; - break; - } - - case Type_Kind_Invalid: - case Type_Kind_Count: - break; - } - } + bh_buffer_init(&ctx.buffer, ctx.context->gp_alloc, 512); + bh_arr_new(ctx.context->gp_alloc, ctx.patches, 16); - if (context.options->verbose_output == 1) { - bh_printf("Type table size: %d bytes.\n", table_buffer.length); + u32 type_table_info_data_id = NEXT_DATA_ID(module); + + ctx.constexpr_ctx.module = module; + ctx.constexpr_ctx.data_id = type_table_info_data_id; + + i32 offset = 0; + + switch (type->kind) { + case Type_Kind_Basic: offset = build_type_info_for_basic(&ctx, type); break; + case Type_Kind_Pointer: offset = build_type_info_for_pointer(&ctx, type); break; + case Type_Kind_MultiPointer: offset = build_type_info_for_multipointer(&ctx, type); break; + case Type_Kind_Array: offset = build_type_info_for_array(&ctx, type); break; + case Type_Kind_Slice: offset = build_type_info_for_slice(&ctx, type); break; + case Type_Kind_DynArray: offset = build_type_info_for_dynarray(&ctx, type); break; + case Type_Kind_VarArgs: offset = build_type_info_for_varargs(&ctx, type); break; + case Type_Kind_Compound: offset = build_type_info_for_compound(&ctx, type); break; + case Type_Kind_Function: offset = build_type_info_for_function(&ctx, type); break; + case Type_Kind_Enum: offset = build_type_info_for_enum(&ctx, type); break; + case Type_Kind_Struct: offset = build_type_info_for_struct(&ctx, type); break; + case Type_Kind_PolyStruct: offset = build_type_info_for_polystruct(&ctx, type); break; + case Type_Kind_Distinct: offset = build_type_info_for_distinct(&ctx, type); break; + case Type_Kind_Union: offset = build_type_info_for_union(&ctx, type); break; + case Type_Kind_PolyUnion: offset = build_type_info_for_polyunion(&ctx, type); break; + + case Type_Kind_Invalid: + case Type_Kind_Count: + break; } WasmDatum type_info_data = { .alignment = 8, - .length = table_buffer.length, - .data = table_buffer.data, + .length = ctx.buffer.length, + .data = ctx.buffer.data, }; emit_data_entry(module, &type_info_data); assert(type_info_data.id == type_table_info_data_id); - bh_arr_each(u32, patch_loc, base_patch_locations) { + bh_arr_each(u32, patch_loc, ctx.patches) { DatumPatchInfo patch; patch.kind = Datum_Patch_Relative; patch.data_id = type_info_data.id; @@ -810,26 +761,49 @@ static u64 build_type_table(OnyxWasmModule* module) { bh_arr_push(module->data_patches, patch); } + WasmDatum *type_table_data = &module->data[module->global_type_table_data_id - 1]; + i32 next_location = type_table_data->length; + type_table_data->length += 2 * POINTER_SIZE; + *(i32 *) &((u8 *) type_table_data->data)[next_location] = type->id; + + DatumPatchInfo patch; + patch.kind = Datum_Patch_Data; + patch.data_id = type_info_data.id; + patch.offset = offset; + patch.index = module->global_type_table_data_id; + patch.location = next_location + POINTER_SIZE; + bh_arr_push(module->data_patches, patch); + + *module->type_info_entry_count += 1; + module->type_info_size += type_info_data.length; + module->type_info_size += 8; + + bh_arr_free(ctx.patches); +} + +#undef BH_INTERNAL_ALLOCATOR +#define BH_INTERNAL_ALLOCATOR (module->context->gp_alloc) + +static u64 prepare_type_table(OnyxWasmModule* module) { + // This is the data behind the "type_table" slice in runtime/info/types.onyx + u32 type_count = bh_arr_length(module->context->types.type_map.entries) + 1; + void* table_info = bh_alloc_array(module->context->gp_alloc, u8, 2 * type_count * POINTER_SIZE); + memset(table_info, 0, 2 * type_count * POINTER_SIZE); + WasmDatum type_table_data = { .alignment = POINTER_SIZE, - .length = type_count * POINTER_SIZE, + .length = 0, .data = table_info, }; emit_data_entry(module, &type_table_data); + module->global_type_table_data_id = type_table_data.id; - fori (i, 0, type_count) { - DatumPatchInfo patch; - patch.kind = Datum_Patch_Data; - patch.data_id = type_info_data.id; - patch.offset = table_info[i]; - patch.index = type_table_data.id; - patch.location = i * POINTER_SIZE; - bh_arr_push(module->data_patches, patch); - } + Table_Info_Type* tmp_data = bh_alloc(module->context->gp_alloc, 2 * POINTER_SIZE); - Table_Info_Type* tmp_data = bh_alloc(global_heap_allocator, 2 * POINTER_SIZE); tmp_data[0] = 0; - tmp_data[1] = type_count; + tmp_data[1] = 0; + module->type_info_entry_count = &tmp_data[1]; + WasmDatum type_table_global_data = { .alignment = POINTER_SIZE, .length = 2 * POINTER_SIZE, @@ -837,15 +811,13 @@ static u64 build_type_table(OnyxWasmModule* module) { }; emit_data_entry(module, &type_table_global_data); - { - DatumPatchInfo patch; - patch.kind = Datum_Patch_Data; - patch.data_id = type_table_data.id; - patch.offset = 0; - patch.index = type_table_global_data.id; - patch.location = 0; - bh_arr_push(module->data_patches, patch); - } + DatumPatchInfo patch; + patch.kind = Datum_Patch_Data; + patch.data_id = type_table_data.id; + patch.offset = 0; + patch.index = type_table_global_data.id; + patch.location = 0; + bh_arr_push(module->data_patches, patch); return type_table_global_data.id; @@ -856,9 +828,12 @@ static u64 build_type_table(OnyxWasmModule* module) { +#undef BH_INTERNAL_ALLOCATOR +#define BH_INTERNAL_ALLOCATOR (module->context->gp_alloc) + static u64 build_foreign_blocks(OnyxWasmModule* module) { bh_arr(u32) base_patch_locations=NULL; - bh_arr_new(global_heap_allocator, base_patch_locations, 256); + bh_arr_new(module->context->gp_alloc, base_patch_locations, 256); #define PATCH (bh_arr_push(base_patch_locations, foreign_buffer.length)) #define PATCH_AT(x) (bh_arr_push(base_patch_locations, (x))) @@ -879,11 +854,11 @@ static u64 build_foreign_blocks(OnyxWasmModule* module) { #define Foreign_Block_Type u64 #endif u32 block_count = bh_arr_length(module->foreign_blocks); - Foreign_Block_Type* foreign_info = bh_alloc_array(global_heap_allocator, Foreign_Block_Type, block_count); // HACK + Foreign_Block_Type* foreign_info = bh_alloc_array(module->context->gp_alloc, Foreign_Block_Type, block_count); // HACK memset(foreign_info, 0, block_count * sizeof(Foreign_Block_Type)); bh_buffer foreign_buffer; - bh_buffer_init(&foreign_buffer, global_heap_allocator, 4096); + bh_buffer_init(&foreign_buffer, module->context->gp_alloc, 4096); u32 foreign_info_data_id = NEXT_DATA_ID(module); @@ -903,11 +878,11 @@ static u64 build_foreign_blocks(OnyxWasmModule* module) { u32 funcs_length = 0; - u32 *name_offsets = bh_alloc_array(global_scratch_allocator, u32, shlen(fb->scope->symbols)); - u32 *name_lengths = bh_alloc_array(global_scratch_allocator, u32, shlen(fb->scope->symbols)); - u32 *func_types = bh_alloc_array(global_scratch_allocator, u32, shlen(fb->scope->symbols)); - u32 *tag_offsets = bh_alloc_array(global_scratch_allocator, u32, shlen(fb->scope->symbols)); - u32 *tag_lengths = bh_alloc_array(global_scratch_allocator, u32, shlen(fb->scope->symbols)); + u32 *name_offsets = bh_alloc_array(module->context->scratch_alloc, u32, shlen(fb->scope->symbols)); + u32 *name_lengths = bh_alloc_array(module->context->scratch_alloc, u32, shlen(fb->scope->symbols)); + u32 *func_types = bh_alloc_array(module->context->scratch_alloc, u32, shlen(fb->scope->symbols)); + u32 *tag_offsets = bh_alloc_array(module->context->scratch_alloc, u32, shlen(fb->scope->symbols)); + u32 *tag_lengths = bh_alloc_array(module->context->scratch_alloc, u32, shlen(fb->scope->symbols)); fori (i, 0, shlen(fb->scope->symbols)) { AstFunction *func = (AstFunction *) fb->scope->symbols[i].value; @@ -938,6 +913,7 @@ static u64 build_foreign_blocks(OnyxWasmModule* module) { bh_buffer_align(&foreign_buffer, type_alignment_of(tag->type)); tag_array[i * 2 + 0] = foreign_buffer.length; tag_array[i * 2 + 1] = tag->type->id; + ensure_type_has_been_submitted_for_emission(module, tag->type); PATCH_AT(tag_array_offset + i * POINTER_SIZE * 2); bh_buffer_grow(&foreign_buffer, foreign_buffer.length + size); @@ -955,6 +931,7 @@ static u64 build_foreign_blocks(OnyxWasmModule* module) { name_offsets[funcs_length] = func_name_base; name_lengths[funcs_length] = func_name_length; func_types[funcs_length] = func->type->id; + ensure_type_has_been_submitted_for_emission(module, func->type); funcs_length++; } @@ -980,11 +957,6 @@ static u64 build_foreign_blocks(OnyxWasmModule* module) { index++; } - - if (context.options->verbose_output == 1) { - bh_printf("Foreign blocks size: %d bytes.\n", foreign_buffer.length); - } - WasmDatum foreign_info_data = { .alignment = 8, .length = foreign_buffer.length, @@ -1020,7 +992,7 @@ static u64 build_foreign_blocks(OnyxWasmModule* module) { bh_arr_push(module->data_patches, patch); } - Foreign_Block_Type* tmp_data = bh_alloc(global_heap_allocator, 2 * POINTER_SIZE); + Foreign_Block_Type* tmp_data = bh_alloc(module->context->gp_alloc, 2 * POINTER_SIZE); tmp_data[0] = 0; tmp_data[1] = block_count; WasmDatum foreign_table_global_data = { @@ -1051,7 +1023,7 @@ static u64 build_foreign_blocks(OnyxWasmModule* module) { static u64 build_tagged_procedures(OnyxWasmModule *module) { bh_arr(u32) base_patch_locations=NULL; - bh_arr_new(global_heap_allocator, base_patch_locations, 256); + bh_arr_new(module->context->gp_alloc, base_patch_locations, 256); #define PATCH (bh_arr_push(base_patch_locations, tag_proc_buffer.length)) #define WRITE_PTR(val) \ @@ -1070,11 +1042,11 @@ static u64 build_tagged_procedures(OnyxWasmModule *module) { #define Tagged_Procedure_Type u64 #endif u32 proc_count = bh_arr_length(module->procedures_with_tags); - Tagged_Procedure_Type* tag_proc_info = bh_alloc_array(global_heap_allocator, Tagged_Procedure_Type, proc_count); // HACK + Tagged_Procedure_Type* tag_proc_info = bh_alloc_array(module->context->gp_alloc, Tagged_Procedure_Type, proc_count); // HACK memset(tag_proc_info, 0, proc_count * sizeof(Tagged_Procedure_Type)); bh_buffer tag_proc_buffer; - bh_buffer_init(&tag_proc_buffer, global_heap_allocator, 4096); + bh_buffer_init(&tag_proc_buffer, module->context->gp_alloc, 4096); u32 proc_info_data_id = NEXT_DATA_ID(module); @@ -1093,8 +1065,8 @@ static u64 build_tagged_procedures(OnyxWasmModule *module) { AstFunction *func = *pfunc; u32 tag_count = bh_arr_length(func->tags); - u32 *tag_data_offsets = bh_alloc_array(global_scratch_allocator, u32, tag_count); - u32 *tag_data_types = bh_alloc_array(global_scratch_allocator, u32, tag_count); + u32 *tag_data_offsets = bh_alloc_array(module->context->scratch_alloc, u32, tag_count); + u32 *tag_data_types = bh_alloc_array(module->context->scratch_alloc, u32, tag_count); u32 tag_index = 0; bh_arr_each(AstTyped *, ptag, func->tags) { @@ -1103,6 +1075,7 @@ static u64 build_tagged_procedures(OnyxWasmModule *module) { tag_data_offsets[tag_index ] = tag_proc_buffer.length; tag_data_types [tag_index++] = tag->type->id; + ensure_type_has_been_submitted_for_emission(module, tag->type); u32 size = type_size_of(tag->type); bh_buffer_grow(&tag_proc_buffer, tag_proc_buffer.length + size); @@ -1128,14 +1101,11 @@ static u64 build_tagged_procedures(OnyxWasmModule *module) { bh_buffer_write_u32(&tag_proc_buffer, get_element_idx(module, func)); bh_buffer_write_u32(&tag_proc_buffer, 0); bh_buffer_write_u32(&tag_proc_buffer, func->type->id); + ensure_type_has_been_submitted_for_emission(module, func->type); WRITE_SLICE(tag_array_base, tag_count); bh_buffer_write_u32(&tag_proc_buffer, func->entity->package->id); } - if (context.options->verbose_output == 1) { - bh_printf("Tagged procedure size: %d bytes.\n", tag_proc_buffer.length); - } - WasmDatum proc_info_data = { .alignment = 8, .length = tag_proc_buffer.length, @@ -1171,7 +1141,7 @@ static u64 build_tagged_procedures(OnyxWasmModule *module) { bh_arr_push(module->data_patches, patch); } - Tagged_Procedure_Type* tmp_data = bh_alloc(global_heap_allocator, 2 * POINTER_SIZE); + Tagged_Procedure_Type* tmp_data = bh_alloc(module->context->gp_alloc, 2 * POINTER_SIZE); tmp_data[0] = 0; tmp_data[1] = proc_count; WasmDatum proc_table_global_data = { @@ -1201,7 +1171,7 @@ static u64 build_tagged_procedures(OnyxWasmModule *module) { static u64 build_tagged_globals(OnyxWasmModule *module) { bh_arr(u32) base_patch_locations=NULL; - bh_arr_new(global_heap_allocator, base_patch_locations, 256); + bh_arr_new(module->context->gp_alloc, base_patch_locations, 256); #define PATCH (bh_arr_push(base_patch_locations, tag_global_buffer.length)) #define WRITE_PTR(val) \ @@ -1220,11 +1190,11 @@ static u64 build_tagged_globals(OnyxWasmModule *module) { #define Tagged_Global_Type u64 #endif u32 global_count = bh_arr_length(module->globals_with_tags); - Tagged_Global_Type* tag_global_info = bh_alloc_array(global_heap_allocator, Tagged_Global_Type, global_count); // HACK + Tagged_Global_Type* tag_global_info = bh_alloc_array(module->context->gp_alloc, Tagged_Global_Type, global_count); // HACK memset(tag_global_info, 0, global_count * sizeof(Tagged_Global_Type)); bh_buffer tag_global_buffer; - bh_buffer_init(&tag_global_buffer, global_heap_allocator, 4096); + bh_buffer_init(&tag_global_buffer, module->context->gp_alloc, 4096); u32 global_info_data_id = NEXT_DATA_ID(module); @@ -1243,8 +1213,8 @@ static u64 build_tagged_globals(OnyxWasmModule *module) { AstMemRes *memres = *pmemres; u32 tag_count = bh_arr_length(memres->tags); - u32 *tag_data_offsets = bh_alloc_array(global_scratch_allocator, u32, tag_count); - u32 *tag_data_types = bh_alloc_array(global_scratch_allocator, u32, tag_count); + u32 *tag_data_offsets = bh_alloc_array(module->context->scratch_alloc, u32, tag_count); + u32 *tag_data_types = bh_alloc_array(module->context->scratch_alloc, u32, tag_count); u32 tag_index = 0; bh_arr_each(AstTyped *, ptag, memres->tags) { @@ -1253,6 +1223,7 @@ static u64 build_tagged_globals(OnyxWasmModule *module) { tag_data_offsets[tag_index ] = tag_global_buffer.length; tag_data_types [tag_index++] = tag->type->id; + ensure_type_has_been_submitted_for_emission(module, tag->type); u32 size = type_size_of(tag->type); bh_buffer_grow(&tag_global_buffer, tag_global_buffer.length + size); @@ -1286,14 +1257,11 @@ static u64 build_tagged_globals(OnyxWasmModule *module) { bh_buffer_write_u32(&tag_global_buffer, 0); bh_buffer_write_u32(&tag_global_buffer, memres->type->id); + ensure_type_has_been_submitted_for_emission(module, memres->type); WRITE_SLICE(tag_array_base, tag_count); bh_buffer_write_u32(&tag_global_buffer, memres->entity->package->id); } - if (context.options->verbose_output == 1) { - bh_printf("Tagged global size: %d bytes.\n", tag_global_buffer.length); - } - WasmDatum global_info_data = { .alignment = 8, .length = tag_global_buffer.length, @@ -1329,7 +1297,7 @@ static u64 build_tagged_globals(OnyxWasmModule *module) { bh_arr_push(module->data_patches, patch); } - Tagged_Procedure_Type* tmp_data = bh_alloc(global_heap_allocator, 2 * POINTER_SIZE); + Tagged_Procedure_Type* tmp_data = bh_alloc(module->context->gp_alloc, 2 * POINTER_SIZE); tmp_data[0] = 0; tmp_data[1] = global_count; WasmDatum global_table_global_data = { diff --git a/core/alloc/alloc.onyx b/core/alloc/alloc.onyx index 0e0908c4a..cef37a1a8 100644 --- a/core/alloc/alloc.onyx +++ b/core/alloc/alloc.onyx @@ -8,48 +8,47 @@ package core.alloc #load "./pool" #load "./logging" #load "./gc" -#load "./memdebug" +#load "./debug" + +use runtime +#if runtime.runtime == .Onyx { + #load "./memwatch" +} use core.memory use core.intrinsics.types {type_is_function} -#doc "Overloaded procedure for converting something to an Allocator." +/// Overloaded procedure for converting something to an Allocator. as_allocator :: #match { macro (a: Allocator) => a } -#doc """ - Allocates memory from the stack. This is similar to `alloca` in C. - - **DO NOT USE THIS IN A LOOP! You cannot free memory allocated off the stack.** -""" +/// Allocates memory from the stack. This is similar to `alloca` in C. +/// +/// **DO NOT USE THIS IN A LOOP! You cannot free memory allocated off the stack.** from_stack :: macro (size: u32) -> rawptr { // This should do something about the alignment... // Everything so far has assume that the stack is aligned to 16 bytes. - defer __stack_top = cast([&]u8, __stack_top) + size; + __stack_top = cast([&]u8, __stack_top) - size; return __stack_top; } -#doc """ - Allocates memory from the stack to form a slice of `T` with length `size`. - - **DO NOT USE THIS IN A LOOP! You cannot free memory allocated off the stack.** -""" +/// Allocates memory from the stack to form a slice of `T` with length `size`. +/// +/// **DO NOT USE THIS IN A LOOP! You cannot free memory allocated off the stack.** array_from_stack :: macro ($T: type_expr, size: u32) -> [] T { - defer __stack_top = cast([&]u8, __stack_top) + size * sizeof T; + __stack_top = cast([&]u8, __stack_top) - size * sizeof T; return (cast([&]T) __stack_top)[0 .. size]; } -#doc """ - Moves a value on to the heap. Useful for cases like this in - - f :: () -> &Foo { - return alloc.on_heap(Foo.{ - name = "...", - age = 42 - }); - } -""" +/// Moves a value on to the heap. Useful for cases like this in +/// +/// f :: () -> &Foo { +/// return alloc.on_heap(Foo.{ +/// name = "...", +/// age = 42 +/// }); +/// } on_heap :: macro (v: $V) -> &V { use core @@ -59,9 +58,7 @@ on_heap :: macro (v: $V) -> &V { return out; } -#doc """ - Like `alloc.on_heap`, but allocates on the temporary allocator. -""" +/// Like `alloc.on_heap`, but allocates on the temporary allocator. on_temp :: macro (v: $V) -> &V { use core @@ -71,10 +68,8 @@ on_temp :: macro (v: $V) -> &V { return out; } -#doc """ - Copies the internal closure data of a function to the provided allocator, - and returns the new closed function. -""" +/// Copies the internal closure data of a function to the provided allocator, +/// and returns the new closed function. copy_closure :: (f: $F/type_is_function, a: Allocator) -> F { if !f.closure do return f; @@ -99,19 +94,26 @@ temp_state : arena.ArenaState; #thread_local temp_allocator : Allocator; -#doc """ - Initializes the thread-local temporary allocator. - - You do not need to call this. It is called automatically on thread initialization. -""" +/// Initializes the thread-local temporary allocator. +/// +/// You do not need to call this. It is called automatically on thread initialization. init_temp_allocator :: () { temp_state = arena.make(heap_allocator, TEMPORARY_ALLOCATOR_SIZE); temp_allocator = as_allocator(&temp_state); } -#doc """ - Resets the temporary allocator, effectively freeing all allocations made in the temporary allocator. -""" +/// Resets the temporary allocator, effectively freeing all allocations made in the temporary allocator. clear_temp_allocator :: () { arena.clear(&temp_state); } + + +report_leaks_in_scope :: macro () { + use core.alloc + + use __ha := alloc.debug.make(context.allocator) + + __old_allocator := context.allocator + context.allocator = alloc.as_allocator(&__ha) + defer context.allocator = __old_allocator +} diff --git a/core/alloc/arena.onyx b/core/alloc/arena.onyx index edfb86428..b0ff2493f 100644 --- a/core/alloc/arena.onyx +++ b/core/alloc/arena.onyx @@ -1,27 +1,23 @@ +/// This allocator is mostly used for making many fixed-size +/// allocation (i.e. allocations that will not need to change +/// in size, such as game entities or position structs). The +/// power of this allocator over the heap allocator for this +/// purpose is that it is much faster, since the logic is +/// simpler. Another power of this allocator over something +/// such as a dynamic array is that the dynamic array could +/// relocate and cause any pointers to the data inside to +/// become invalidated; this is definitely not behaviour you +/// want. This arena allocator can grow as large as needed, +/// while guaranteeing that the memory inside of it will +/// never move. package core.alloc.arena -#package_doc """ - This allocator is mostly used for making many fixed-size - allocation (i.e. allocations that will not need to change - in size, such as game entities or position structs). The - power of this allocator over the heap allocator for this - purpose is that it is much faster, since the logic is - simpler. Another power of this allocator over something - such as a dynamic array is that the dynamic array could - relocate and cause any pointers to the data inside to - become invalidated; this is definitely not behaviour you - want. This arena allocator can grow as large as needed, - while guaranteeing that the memory inside of it will - never move. -""" use core // Deprecated struct 'ArenaState'. Use 'Arena' instead. ArenaState :: Arena -#doc """ - Stores internal details used during arena allocations. -""" +/// Stores internal details used during arena allocations. Arena :: struct { backing_allocator : Allocator; @@ -94,12 +90,10 @@ arena_alloc_proc :: (data: rawptr, aa: AllocationAction, size: u32, align: u32, return null; } -#doc """ - Makes a new arena. - - `arena_size` specifies the size of each individual arena page, which must be at least 4 bytes - in size (but should be quite a bit large). -""" +/// Makes a new arena. +/// +/// `arena_size` specifies the size of each individual arena page, which must be at least 4 bytes +/// in size (but should be quite a bit large). make :: (backing: Allocator, arena_size: u32) -> Arena { assert(arena_size >= 4, "Arena size was expected to be at least 4 bytes."); @@ -124,9 +118,7 @@ make_allocator :: (rs: &Arena) -> Allocator { }; } -#doc """ - Frees all pages in an arena. -""" +/// Frees all pages in an arena. free :: (arena: &Arena) { walker := arena.first_arena; trailer := walker; @@ -141,9 +133,7 @@ free :: (arena: &Arena) { arena.size = 0; } -#doc """ - Clears and frees every page, except for first page. -""" +/// Clears and frees every page, except for first page. clear :: (arena: &Arena) { walker := arena.first_arena.next; @@ -157,9 +147,7 @@ clear :: (arena: &Arena) { arena.size = sizeof rawptr; } -#doc """ - Returns the number of pages in the arena. -""" +/// Returns the number of pages in the arena. get_allocated_arenas :: (arena: &Arena) -> u32 { arenas := 0; walker := arena.first_arena; @@ -171,27 +159,23 @@ get_allocated_arenas :: (arena: &Arena) -> u32 { return arenas; } -#doc """ - Returns the number of bytes used by the arena. -""" +/// Returns the number of bytes used by the arena. get_allocated_bytes :: (arena: &Arena) -> u32 { return get_allocated_arenas(arena) * (arena.arena_size - 1) + arena.size; } -#doc """ - Creates an arena allocator and automatically applies it to the context's allocator - in the current scope. - - foo :: () { - alloc.arena.auto(); - - // Lazily allocate everything, knowing that it will - // be freed when this function returns. - for 100 { - s := string.alloc_copy("Make a copy of me!"); - } - } -""" +/// Creates an arena allocator and automatically applies it to the context's allocator +/// in the current scope. +/// +/// foo :: () { +/// alloc.arena.auto(); +/// +/// // Lazily allocate everything, knowing that it will +/// // be freed when this function returns. +/// for 100 { +/// s := string.copy("Make a copy of me!"); +/// } +/// } auto :: #match { macro (size := 32 * 1024, $dest: Code = [](context.allocator)) { use core.alloc {arena, heap_allocator} @@ -217,19 +201,17 @@ auto :: #match { } } -#doc """ - Creates an arena allocator to be used as the temporary allocator - in the code block. - - foo :: () { - alloc.arena.auto_temp() { - for 1000 { - // Will be automatically freed - x := new_temp(i32); - } - } - } -""" +/// Creates an arena allocator to be used as the temporary allocator +/// in the code block. +/// +/// foo :: () { +/// alloc.arena.auto_temp() { +/// for 1000 { +/// // Will be automatically freed +/// x := new_temp(i32); +/// } +/// } +/// } auto_temp :: macro (body: Code) -> i32 { use core.alloc {arena, heap_allocator} a := arena.make(heap_allocator, 32 * 1024); diff --git a/core/alloc/atomic.onyx b/core/alloc/atomic.onyx index 874cc1407..8fc8bbec0 100644 --- a/core/alloc/atomic.onyx +++ b/core/alloc/atomic.onyx @@ -1,10 +1,8 @@ +/// AtomicAllocator wraps another allocator in a mutex, +/// ensuring that every allocation is thread-safe. This +/// is not needed for the general purpose heap allocator, +/// as that already has a thread-safe implementation. package core.alloc.atomic -#package_doc """ - AtomicAllocator wraps another allocator in a mutex, - ensuring that every allocation is thread-safe. This - is not needed for the general purpose heap allocator, - as that already has a thread-safe implementation. -""" // This can only be used when the core.sync package exists. #if #defined(package core.sync) { @@ -13,19 +11,15 @@ package core.alloc.atomic use core.alloc use core.sync -#doc """ - Stores internal details used by the atomic allocator. - - Simply the wrapped allocator and the mutex. -""" +/// Stores internal details used by the atomic allocator. +/// +/// Simply the wrapped allocator and the mutex. AtomicAllocator :: struct { a: Allocator; m: sync.Mutex; } -#doc """ - Creates a new AtomicAllocator over an existing allocator. -""" +/// Creates a new AtomicAllocator over an existing allocator. make :: (a: Allocator) -> AtomicAllocator { atomic: AtomicAllocator = .{ a = a }; @@ -34,9 +28,7 @@ make :: (a: Allocator) -> AtomicAllocator { return atomic; } -#doc """ - Makes an allocator out of the atomic allocator state. -""" +/// Makes an allocator out of the atomic allocator state. make_allocator :: (atomic: &AtomicAllocator) => Allocator.{ atomic, atomic_alloc }; diff --git a/core/alloc/debug.onyx b/core/alloc/debug.onyx new file mode 100644 index 000000000..f2182011b --- /dev/null +++ b/core/alloc/debug.onyx @@ -0,0 +1,177 @@ +package core.alloc.debug +#allow_stale_code + +use core.alloc +use core.memory +use runtime.info { Stack_Frame, get_stack_trace } + +Debug_Max_Stack_Frames :: 6 + +Debug_Header_Magic_Number :: 0x7e577e50 +Debug_Header_Magic_Number_Mask :: 0xfffffff0 + +Debug_Allocation_State :: enum { + Bad :: 0 + Allocated :: 2 + Freed :: 4 +} + +Debug_Header :: struct #align 16 { + next: &Debug_Header + prev: &Debug_Header + + frames: [Debug_Max_Stack_Frames] Stack_Frame + frame_count: u32 + + size: u32 + + magic_number: u32 +} + +Debug_State :: struct { + backing: Allocator + first: &Debug_Header +} + +make :: (backing_allocator: Allocator) -> Debug_State { + return .{ backing_allocator, null } +} + +#overload +alloc.as_allocator :: (s: &Debug_State) -> Allocator { + return .{ s, debug_alloc } +} + +Debug_State.destroy :: (s: &Debug_State) { + walker := s.first + while walker { + n := walker.next + if get_state(walker) == .Allocated { + logf( + .Warning, + "Memory leaked at {} ({} bytes)", + cast([&] Debug_Header) walker + 1, + walker.size + ) + log_stack_trace(walker.frames[0 .. walker.frame_count], .Warning) + + s.backing.func(s.backing.data, .Free, 0, 0, walker) + } + walker = n + } + + s.first = null +} + + +#local +debug_alloc :: (state: &Debug_State, action: AllocationAction, size, align: u32, oldptr: rawptr) -> rawptr { + old: &Debug_Header + if oldptr { + old = cast([&] Debug_Header) oldptr - 1 + + if old.magic_number & Debug_Header_Magic_Number_Mask != Debug_Header_Magic_Number { + log(.Debug, "Debug allocator got something that doesn't look like it was allocated from it. Maybe corrupt memory?") + return state.backing.func(state.backing.data, action, size, align, oldptr) + } + } + + if action == .Resize || action == .Free { + remove_entry_from_list(state, old) + } + + ret: & Debug_Header + + switch action { + case .Alloc { + debug_header: &Debug_Header = state.backing.func(state.backing.data, action, size + sizeof Debug_Header, align, null) + + debug_header.magic_number = Debug_Header_Magic_Number + ~~Debug_Allocation_State.Allocated + debug_header.frame_count = get_stack_trace(debug_header.frames, 1).count + debug_header.size = size + + ret = debug_header + } + + case .Free { + if get_state(old) == .Freed { + report_double_free(old) + return null + } + + old.magic_number = Debug_Header_Magic_Number + ~~Debug_Allocation_State.Freed + old.frame_count = get_stack_trace(old.frames, 1).count + + state.backing.func(state.backing.data, action, size + sizeof Debug_Header, align, old) + } + + case .Resize { + if get_state(old) == .Freed { + report_double_free(old) + } + + old.magic_number = Debug_Header_Magic_Number + ~~Debug_Allocation_State.Freed + + debug_header: &Debug_Header = state.backing.func(state.backing.data, action, size + sizeof Debug_Header, align, old) + + debug_header.magic_number = Debug_Header_Magic_Number + ~~Debug_Allocation_State.Allocated + debug_header.frame_count = get_stack_trace(debug_header.frames, 1).count + debug_header.size = size + + ret = debug_header + } + } + + if ret != null { + ret.next = state.first + ret.prev = null + + if state.first != null { + state.first.prev = ret + } + + state.first = ret + return cast([&] Debug_Header) ret + 1 + } + + return null +} + +#local +log_stack_trace :: (trace: [] Stack_Frame, severity: Log_Level) { + for &t in trace { + logf(severity, " {} at {}:{}", t.info.func_name, t.info.file, t.current_line) + } +} + +#local +get_state :: (h: &Debug_Header) -> Debug_Allocation_State { + if (h.magic_number & Debug_Header_Magic_Number_Mask) != Debug_Header_Magic_Number { + return .Bad + } + + return ~~(h.magic_number & ~Debug_Header_Magic_Number_Mask) +} + +#local +remove_entry_from_list :: (state: &Debug_State, header: &Debug_Header) { + if header.prev { + header.prev.next = header.next + } else { + state.first = header.next + } + + if header.next { + header.next.prev = header.prev + } +} + +#local +report_double_free :: (header: &Debug_Header) { + logf(.Warning, "Double free detected on {}. Tried to free here:", cast([&] Debug_Header) header + 1) + use trace := get_stack_trace(2) + log_stack_trace(trace, .Warning) + + log(.Warning, "Was already freed here:") + log_stack_trace(header.frames[0 .. header.frame_count], .Warning) +} diff --git a/core/alloc/fixed.onyx b/core/alloc/fixed.onyx index 2cbd98a91..0d80e58cc 100644 --- a/core/alloc/fixed.onyx +++ b/core/alloc/fixed.onyx @@ -1,16 +1,14 @@ +/// This allocator is very simple. It is simply a bump allocator from +/// a fixed size buffer. It cannot free or resize, and will return null +/// when it has used all memory in the buffer given to it. +/// +/// This kind of allocator is useful for temporary string building or +/// similar circumstances, where you know that the needed memory size +/// will not be exceeded, but you don't what to deal with potential +/// slowness of a general heap allocator. By using this allocator, you +/// can continue to use the same code that does allocations like normal, +/// but can get the speed increase of a simple allocation strategy. package core.alloc.fixed -#package_doc """ - This allocator is very simple. It is simply a bump allocator from - a fixed size buffer. It cannot free or resize, and will return null - when it has used all memory in the buffer given to it. - - This kind of allocator is useful for temporary string building or - similar circumstances, where you know that the needed memory size - will not be exceeded, but you don't what to deal with potential - slowness of a general heap allocator. By using this allocator, you - can continue to use the same code that does allocations like normal, - but can get the speed increase of a simple allocation strategy. -""" use core diff --git a/core/alloc/gc.onyx b/core/alloc/gc.onyx index 7fea7ed6a..81371291e 100644 --- a/core/alloc/gc.onyx +++ b/core/alloc/gc.onyx @@ -1,19 +1,17 @@ +/// "Garbage collection" is not somthing Onyx has. Even things +/// like reference counted pointers is not something Onyx can +/// do, because of Onyx's simpler semantics. That being said, +/// with custom allocators and some careful design, GC is +/// "achievable". This allocator wraps another allocator. With +/// each allocation, a little extra space is allocated to build +/// a linked list of all allocations made. This way, when the +/// memory is done being used, everything can be freed automatically. +/// +/// The `auto` macro makes this allocator very easy to use: +/// core.alloc.gc.auto() { +/// // Every allocation here will automatically be freed +/// } package core.alloc.gc -#package_doc """ - "Garbage collection" is not somthing Onyx has. Even things - like reference counted pointers is not something Onyx can - do, because of Onyx's simpler semantics. That being said, - with custom allocators and some careful design, GC is - "achievable". This allocator wraps another allocator. With - each allocation, a little extra space is allocated to build - a linked list of all allocations made. This way, when the - memory is done being used, everything can be freed automatically. - - The `auto` macro makes this allocator very easy to use: - core.alloc.gc.auto() { - // Every allocation here will automatically be freed - } -""" use runtime @@ -157,10 +155,8 @@ GC_Manually_Free_Magic_Number :: 0xface1337 return cast([&] GCLink, newptr) + 1; } -#doc """ - Removes an allocation from the garbage collectors tracking list, - so it will not be freed automatically. -""" +/// Removes an allocation from the garbage collectors tracking list, +/// so it will not be freed automatically. untrack :: (ptr: rawptr) -> bool { link: &GCLink = (cast([&] GCLink) ptr) - 1; diff --git a/core/alloc/heap.onyx b/core/alloc/heap.onyx index f967098b0..b4025fbe8 100644 --- a/core/alloc/heap.onyx +++ b/core/alloc/heap.onyx @@ -19,7 +19,7 @@ use core #local Enable_Clear_Freed_Memory :: #defined(runtime.vars.Enable_Heap_Clear_Freed_Memory) #local Enable_Stack_Trace :: runtime.Stack_Trace_Enabled -#load "core/intrinsics/wasm" +#load "core:intrinsics/wasm" #if runtime.Multi_Threading_Enabled { use core {sync} diff --git a/core/alloc/logging.onyx b/core/alloc/logging.onyx index 6fcf972f5..7992edf2d 100644 --- a/core/alloc/logging.onyx +++ b/core/alloc/logging.onyx @@ -1,9 +1,7 @@ +/// This allocator simply wraps another allocator and +/// prints every allocation/deallocation made by that +/// allocator. package core.alloc.log -#package_doc """ - This allocator simply wraps another allocator and - prints every allocation/deallocation made by that - allocator. -""" use core diff --git a/core/alloc/memdebug.onyx b/core/alloc/memwatch.onyx similarity index 63% rename from core/alloc/memdebug.onyx rename to core/alloc/memwatch.onyx index 98f97eeb0..ace051842 100644 --- a/core/alloc/memdebug.onyx +++ b/core/alloc/memwatch.onyx @@ -1,15 +1,13 @@ -package core.alloc.memdebug +/// The memory debugger allocator wraps an existing allocator (normally the heap allocator), +/// and reports on a TCP socket all of the allocation operations done to the underlying +/// allocator. This listener on this socket can use this information to show useful information +/// about the memory usage in the program. +/// +/// This is best used when it starts at the very beginning of the program. +/// The easiest way to use this is to define MEMWATCH in runtime.vars, +/// or pass -DMEMWATCH on the command line. +package core.alloc.memwatch #allow_stale_code -#package_doc """ - The memory debugger allocator wraps an existing allocator (normally the heap allocator), - and reports on a TCP socket all of the allocation operations done to the underlying - allocator. This listener on this socket can use this information to show useful information - about the memory usage in the program. - - This is best used when it starts at the very beginning of the program. - The easiest way to use this is to define MEMDEBUG in runtime.vars, - or pass -DMEMDEBUG on the command line. -""" use core {Result} use core.alloc @@ -22,7 +20,7 @@ use runtime VERSION :: 1 DEFAULT_PORT :: 4004 -MemDebugState :: struct { +MemWatchState :: struct { wrapped_allocator: Allocator; listen_addr: net.SocketAddress; @@ -30,7 +28,7 @@ MemDebugState :: struct { writer: ? io.Writer; } -MemDebugMsg :: union { +MemWatchMsg :: union { Start: struct { version: u32; heap_base_address: u32; @@ -42,11 +40,11 @@ MemDebugMsg :: union { newptr: u32; size: u32; align: u32; - trace: [] MemDebugStackNode; + trace: [] MemWatchStackNode; }; } -MemDebugStackNode :: struct { +MemWatchStackNode :: struct { file: str; line: u32; current_line: u32; @@ -54,7 +52,7 @@ MemDebugStackNode :: struct { } -make :: (a: Allocator, listen_addr: &net.SocketAddress) -> MemDebugState { +make :: (a: Allocator, listen_addr: &net.SocketAddress) -> MemWatchState { return .{ a, *listen_addr, @@ -63,12 +61,12 @@ make :: (a: Allocator, listen_addr: &net.SocketAddress) -> MemDebugState { }; } -free :: (m: &MemDebugState) { +free :: (m: &MemWatchState) { io.writer_free(m.writer->unwrap_ptr()); m.socket->unwrap_ptr()->close(); } -wait_for_connection :: (m: &MemDebugState) -> Result(void, io.Error) { +wait_for_connection :: (m: &MemWatchState) -> Result(void, io.Error) { listen_socket := net.socket_create(.Inet, .Stream, .ANY)?; listen_socket->option(.ReuseAddress, true); listen_socket->bind(&m.listen_addr); @@ -78,34 +76,36 @@ wait_for_connection :: (m: &MemDebugState) -> Result(void, io.Error) { m.socket = .{ Some = result.socket }; m.writer = .{ Some = io.writer_make(m.socket->unwrap_ptr(), 0) }; - memdebug_send_message(m, .{ Start = .{ version = VERSION, heap_base_address = cast(u32) __heap_start } }); + memwatch_send_message(m, .{ Start = .{ version = VERSION, heap_base_address = cast(u32) __heap_start } }); + + return .{ Ok = .{} }; } enable_in_scope :: macro (a: Allocator, port := DEFAULT_PORT) { - use core.alloc.memdebug + use core.alloc.memwatch addr: net.SocketAddress; net.make_ipv4_address(&addr, "0.0.0.0", ~~port); old_allocator := a; - dbg := memdebug.make(old_allocator, &addr); + dbg := memwatch.make(old_allocator, &addr); a = alloc.as_allocator(&dbg); - memdebug.wait_for_connection(&dbg); + memwatch.wait_for_connection(&dbg); - defer memdebug.free(&dbg); + defer memwatch.free(&dbg); } #overload -alloc.as_allocator :: (memdebug: &MemDebugState) => Allocator.{ memdebug, memdebug_proc } +alloc.as_allocator :: (memwatch: &MemWatchState) => Allocator.{ memwatch, memwatch_proc } #local -memdebug_proc :: (m: &MemDebugState, action: AllocationAction, size: u32, align: u32, oldptr: rawptr) -> rawptr { +memwatch_proc :: (m: &MemWatchState, action: AllocationAction, size: u32, align: u32, oldptr: rawptr) -> rawptr { newptr := m.wrapped_allocator.func(m.wrapped_allocator.data, action, size, align, oldptr); - trace: [] MemDebugStackNode = .[]; + trace: [] MemWatchStackNode = .[]; stack_trace := runtime.info.get_stack_trace(); if stack_trace { slice.init(&trace, stack_trace.count, context.temp_allocator); @@ -117,7 +117,7 @@ memdebug_proc :: (m: &MemDebugState, action: AllocationAction, size: u32, align: } } - memdebug_send_message(m, .{ + memwatch_send_message(m, .{ Action = .{ action, ~~oldptr, @@ -133,9 +133,9 @@ memdebug_proc :: (m: &MemDebugState, action: AllocationAction, size: u32, align: #local -memdebug_send_message :: (m: &MemDebugState, msg: MemDebugMsg) { +memwatch_send_message :: (m: &MemWatchState, msg: MemWatchMsg) { success := osad.serialize(msg, m.writer->unwrap_ptr()); if !success { - logf(.Warning, "MemDebug logging failed when sending."); + logf(.Warning, "MemWatch logging failed when sending."); } } diff --git a/core/alloc/pool.onyx b/core/alloc/pool.onyx index b69e95eb3..c5e7d0904 100644 --- a/core/alloc/pool.onyx +++ b/core/alloc/pool.onyx @@ -1,16 +1,14 @@ +/// A pool allocator is an O(1) allocator that is capable of allocating and freeing. +/// It is able to do both in constant time because it maintains a linked list of all +/// the free elements in the pool. When an element is requested the first element of +/// linked list is returned and the list is updated. When an element is freed, it +/// becomes the first element. The catch with this strategy however, is that all of +/// the allocations must be of the same size. This would not be an allocator to use +/// when dealing with heterogenous data, but when doing homogenous data, such as +/// game entities, this allocator is great. It allows you to allocate and free as +/// many times as you want, without worrying about fragmentation or slow allocators. +/// Just make sure you don't allocate more than the pool can provide. package core.alloc.pool -#package_doc """ - A pool allocator is an O(1) allocator that is capable of allocating and freeing. - It is able to do both in constant time because it maintains a linked list of all - the free elements in the pool. When an element is requested the first element of - linked list is returned and the list is updated. When an element is freed, it - becomes the first element. The catch with this strategy however, is that all of - the allocations must be of the same size. This would not be an allocator to use - when dealing with heterogenous data, but when doing homogenous data, such as - game entities, this allocator is great. It allows you to allocate and free as - many times as you want, without worrying about fragmentation or slow allocators. - Just make sure you don't allocate more than the pool can provide. -""" use core @@ -31,7 +29,7 @@ pool_allocator_proc :: (pool: &PoolAllocator($Elem), aa: AllocationAction, size: } case .Resize { - assert(false, "Cannot resize in a pool allocator!"); + panic("Cannot resize in a pool allocator!"); return null; } diff --git a/core/alloc/ring.onyx b/core/alloc/ring.onyx index bb61a540d..fdfe9fc5a 100644 --- a/core/alloc/ring.onyx +++ b/core/alloc/ring.onyx @@ -1,15 +1,13 @@ +/// This allocator is great for temporary memory, such as returning +/// a pointer from a function, or storing a formatted string. The +/// memory allocated using this allocator does not need to be freed. +/// The idea is that as you keep allocating you will "wrap around" +/// and start writing over memory that was allocated before. For this +/// reason, it is not safe to use this for any kind of permanent +/// allocation. Also, be wary that you provide this allocator with +/// a buffer big enough to store as much data as you are going to need +/// at any given time. package core.alloc.ring -#package_doc """ - This allocator is great for temporary memory, such as returning - a pointer from a function, or storing a formatted string. The - memory allocated using this allocator does not need to be freed. - The idea is that as you keep allocating you will "wrap around" - and start writing over memory that was allocated before. For this - reason, it is not safe to use this for any kind of permanent - allocation. Also, be wary that you provide this allocator with - a buffer big enough to store as much data as you are going to need - at any given time. -""" use core diff --git a/core/builtin.onyx b/core/builtin.onyx index 9bc40c2e4..a69b0a990 100644 --- a/core/builtin.onyx +++ b/core/builtin.onyx @@ -35,23 +35,33 @@ dyn_str :: #type [..] u8; -// -// This is the type of a range literal (i.e. 1 .. 5). -// This is a special type that the compiler knows how to iterator through. -// So, one can simply write: -// -// for x in 1 .. 5 { ... } -// -// Although not controllable from the literal syntax, there is a `step` -// member that allows you control how many numbers to advance each iteration. -// For example, range.{ 0, 100, 2 } would iterate over the even numbers, and -// range.{ 100, 0, -1 } would count backwards from 100 to 0 (and including 0). +/// This is the type of a range literal (i.e. 1 .. 5). +/// This is a special type that the compiler knows how to iterator through. +/// So, one can simply write: +/// +/// for x in 1 .. 5 { ... } +/// +/// Although not controllable from the literal syntax, there is a `step` +/// member that allows you control how many numbers to advance each iteration. +/// For example, range.{ 0, 100, 2 } would iterate over the even numbers, and +/// range.{ 100, 0, -1 } would count backwards from 100 to 0 (and including 0). range :: struct { low : i32; high : i32; step : i32 = 1; } +/// To have parity between range32 and range64 +range32 :: range + + +/// This is the same as the `range` type, except with 64-bit integers. +range64 :: struct { + low : i64; + high : i64; + step : i64 = 1; +} + @@ -60,35 +70,33 @@ range :: struct { // casts to all other pointer types. null :: cast(rawptr) 0 -#doc """ - `null_proc` is a special function that breaks the normal rules of type - checking. `null_proc`, or any procedure marked with `#null`, is assignable - to any function type, regardless of if the types match. For example, - - f: (i32) -> i32 = null_proc; - - Even though `null_proc` is a `() -> void` function, it bypasses that check - and gets assigned to `f`. If f is called, there will be a runtime exception. - This is by design. -""" +/// `null_proc` is a special function that breaks the normal rules of type +/// checking. `null_proc`, or any procedure marked with `#null`, is assignable +/// to any function type, regardless of if the types match. For example, +/// +/// f: (i32) -> i32 = null_proc; +/// +/// Even though `null_proc` is a `() -> void` function, it bypasses that check +/// and gets assigned to `f`. If f is called, there will be a runtime exception. +/// This is by design. null_proc :: () -> void #null --- -// -// I find myself wanting to return a completely nullified string like the -// one below that I decided to added a builtin binding for it. This might -// go away at some point and would just need to be defined in every file. +/// +/// I find myself wanting to return a completely nullified string like the +/// one below that I decided to added a builtin binding for it. This might +/// go away at some point and would just need to be defined in every file. null_str :: str.{ null, 0 } -// -// The 'context' is used to store thread-local configuration for things like -// allocators, loggers, exception handles, and other things. It is thread -// local so every threads gets its own copy. +/// +/// The 'context' is used to store thread-local configuration for things like +/// allocators, loggers, exception handles, and other things. It is thread +/// local so every threads gets its own copy. #thread_local context : OnyxContext; -// -// This is the type of the 'context' global variable. +/// +/// This is the type of the 'context' global variable. OnyxContext :: struct { // The allocator used by default by the standard library. It is by // default the global heap allocator. @@ -127,31 +135,34 @@ OnyxContext :: struct { // // Define helper methods for setting and retrieving the user_data // stored on the context. -#inject OnyxContext { - set_user_data :: macro (c: &OnyxContext, data: &$T) { - c.user_data = data; - c.user_data_type = T; - } +OnyxContext.set_user_data :: macro (c: &OnyxContext, data: &$T) { + c.user_data = data; + c.user_data_type = T; +} - get_user_data :: macro (c: &OnyxContext, $T: type_expr) -> &T { - if c.user_data_type != T do return null; - return ~~ c.user_data; - } +OnyxContext.get_user_data :: macro (c: &OnyxContext, $T: type_expr) -> &T { + if c.user_data_type != T do return null; + return ~~ c.user_data; } // CLEANUP: Does assert() need to be in the builtin file? -// It uses context.assert_handler, but does it needt to be here? -// -// Checks if the condition is true. If not, invoke the context's -// assert handler. +// It uses context.assert_handler, but does it need to be here? + +/// Checks if the condition is true. If not, invoke the context's +/// assert handler. assert :: (cond: bool, msg: str, site := #callsite) { if !cond { context.assert_handler(msg, site); } } +/// Causes a runtime panic with the specified error message. +panic :: (msg: str, site := #callsite) { + context.assert_handler(msg, site); +} + // // Basic logging @@ -251,34 +262,31 @@ raw_alloc :: (use a: Allocator, size: u32, alignment := Default_Allocation_Align return func(data, AllocationAction.Alloc, size, alignment, null); } -// -// Helper procedure to resize an allocation from an allocator. +/// Helper procedure to resize an allocation from an allocator. raw_resize :: (use a: Allocator, ptr: rawptr, size: u32, alignment := Default_Allocation_Alignment) -> rawptr { return func(data, AllocationAction.Resize, size, alignment, ptr); } -// -// Helper procedure to free an allocation from an allocator. +/// Helper procedure to free an allocation from an allocator. raw_free :: (use a: Allocator, ptr: rawptr) { func(data, AllocationAction.Free, 0, 0, ptr); } -#inject Allocator { - alloc :: raw_alloc - resize :: raw_resize - free :: raw_free +Allocator.alloc :: raw_alloc +Allocator.resize :: raw_resize +Allocator.free :: raw_free - move :: macro (use a: Allocator, v: $V) -> &V { - out := cast(&V) a->alloc(sizeof V); - *out = v; - return out; - } +Allocator.move :: macro (use a: Allocator, v: $V) -> &V { + out := cast(&V) a->alloc(sizeof V); + *out = v; + return out; } -// -// Helper function to allocate/free using allocator in the context structure. +/// Helper function to allocate using the allocator in the context structure. calloc :: (size: u32) => raw_alloc(context.allocator, size); +/// Helper function to resize using the allocator in the context structure. cresize :: (ptr: rawptr, size: u32) => raw_resize(context.allocator, ptr, size); +/// Helper function to free using the allocator in the context structure. cfree :: (ptr: rawptr) => raw_free(context.allocator, ptr); @@ -326,7 +334,17 @@ cfree :: (ptr: rawptr) => raw_free(context.allocator, ptr); return res; } - new_temp :: macro (T: type_expr) => { + #overload + new :: macro (v: $T, allocator := context.allocator) -> &T { + use core + + out := cast(&T) raw_alloc(allocator, sizeof T); + core.memory.set(out, 0, sizeof T); + *out = v; + return out; + } + + new_temp :: macro (T) => { return new(T, allocator=context.temp_allocator); } @@ -354,24 +372,24 @@ cfree :: (ptr: rawptr) => raw_free(context.allocator, ptr); return make(T, n, allocator=context.temp_allocator); } - // - // This is a rather unique way of using the type matching system - // to select an overload. What is desired here is that when you say: - // - // make(Foo) - // - // You match the overload for make that is designed for making a Foo. - // However, you cannot use the type matching system to match by value. - // In order to get around this, `make` will pass a null pointer to this - // match procedure, that is casted to be a *pointer* to the desired type. - // Therefore, if you want to add your own make overload, you have to add - // a match to `__make_overload` that takes a *pointer* to the desired - // type as the first argument, and then an allocator as the second. - // Optionally, you can take a parameter between them that is an integer, - // useful when constructing things like arrays. - // - // See core/container/array.onyx for an example. - // + /// + /// This is a rather unique way of using the type matching system + /// to select an overload. What is desired here is that when you say: + /// + /// make(Foo) + /// + /// You match the overload for make that is designed for making a Foo. + /// However, you cannot use the type matching system to match by value. + /// In order to get around this, `make` will pass a null pointer to this + /// match procedure, that is casted to be a *pointer* to the desired type. + /// Therefore, if you want to add your own make overload, you have to add + /// a match to `__make_overload` that takes a *pointer* to the desired + /// type as the first argument, and then an allocator as the second. + /// Optionally, you can take a parameter between them that is an integer, + /// useful when constructing things like arrays. + /// + /// See core/container/array.onyx for an example. + /// __make_overload :: #match {} delete :: #match {} @@ -392,58 +410,73 @@ cfree :: (ptr: rawptr) => raw_free(context.allocator, ptr); } -#doc """ - Represents a generic "iterator" or "generator" of a specific - type. Can be used in a for-loop natively. - `data` is used for contextual information and is passed to - the `next`, `close`, and `remove` procedures. - - `next` is used to extract the next value out of the iterator. - It returns the next value, and a continuation flag. If the - flag is false, the value should be ignored and iteration should - stop. - - `close` should called when the iterator has ended. This is - done automatically in for-loops, and in the `core.iter` library. - In for-loops, `close` is called no matter which way the for-loop - exits (`break`, `return`, etc). Using this rule, iterator can - be used to create "resources" that automatically close when you - are done with them. - - `remove` is used to tell the iterator to remove the last value - returned from some underlying data store. Invoked automatically - using the `#remove` directive in a for-loop. -""" +/// Represents a generic "iterator" or "generator" of a specific +/// type. Can be used in a for-loop natively. +/// +/// `data` is used for contextual information and is passed to +/// the `next`, `close`, and `remove` procedures. +/// +/// `next` is used to extract the next value out of the iterator. +/// It returns the next value, and a continuation flag. If the +/// flag is false, the value should be ignored and iteration should +/// stop. +/// +/// `close` should called when the iterator has ended. This is +/// done automatically in for-loops, and in the `core.iter` library. +/// In for-loops, `close` is called no matter which way the for-loop +/// exits (`break`, `return`, etc). Using this rule, iterator can +/// be used to create "resources" that automatically close when you +/// are done with them. +/// +/// `remove` is used to tell the iterator to remove the last value +/// returned from some underlying data store. Invoked automatically +/// using the `#remove` directive in a for-loop. Iterator :: struct (Iter_Type: type_expr) { data: rawptr; - next: (data: rawptr) -> (Iter_Type, bool); + next: (data: rawptr) -> Optional(Iter_Type); close: (data: rawptr) -> void = null_proc; remove: (data: rawptr) -> void = null_proc; } -#doc """ - Optional represents the possibility of a value being empty, without - resorting to pointers and null-pointers. Most of the functionality - for Optional is defined in core/containers/optional.onyx. This - definition exists here because the compiler use it as the template - for types like '? i32'. In other words, '? i32' is equivalent to - 'Optional(i32)'. -""" +/// Optional represents the possibility of a value being empty, without +/// resorting to pointers and null-pointers. Most of the functionality +/// for Optional is defined in core/containers/optional.onyx. This +/// definition exists here because the compiler use it as the template +/// for types like '? i32'. In other words, '? i32' is equivalent to +/// 'Optional(i32)'. Optional :: union (Value_Type: type_expr) { None: void; Some: Value_Type; } +/// This structure represents the slice types, `[] T`. +/// While slices are a special type in Onyx, and therefore need extra +/// compiler support, this structure exists to allow for placing +/// methods onto slices. See `core/container/slice.onyx` for examples. +Slice :: struct (T: type_expr) { + data: [&] T; + count: i32; +} + + +/// This structure represents the dynamic array types, `[..] T`. +/// This structure exists to allow for placing methods onto dynamic arrays. +/// See `core/container/array.onyx` for examples. +Array :: struct (T: type_expr) { + data: [&] T; + count: i32; + capacity: i32; + allocator: Allocator; +} + -#doc """ - This structure represents the result of a '#callsite' expression. Currently, #callsite - is only valid (and parsed) as a default value for a procedure parameter. It allows - the function to get the address of the calling site, which can be used for error - printing, unique hashes, and much more. -""" +/// This structure represents the result of a '#callsite' expression. Currently, #callsite +/// is only valid (and parsed) as a default value for a procedure parameter. It allows +/// the function to get the address of the calling site, which can be used for error +/// printing, unique hashes, and much more. CallSite :: struct { file : str; line : u32; @@ -451,66 +484,52 @@ CallSite :: struct { } -#doc """ - This structure is used to represent any value in the language. - It contains a pointer to the data, and the type of the value. - Using the `core.misc` library, you can easily manipulate `any`s - and build runtime polymorphism. -""" +/// This structure is used to represent any value in the language. +/// It contains a pointer to the data, and the type of the value. +/// Using the `core.misc` library, you can easily manipulate `any`s +/// and build runtime polymorphism. any :: struct { data: rawptr; type: type_expr; } -#doc """ - Represents a code block that can be passed around at compile-time. - This is commonly used with macros or polymorphic procedures to create - very power extensions to the syntax. -""" +/// Represents a code block that can be passed around at compile-time. +/// This is commonly used with macros or polymorphic procedures to create +/// very power extensions to the syntax. Code :: struct {_:i32;} -#doc """ - This procedure is a special compiler generated procedure that initializes all the data segments - in the program. It should only be called once, by the main thread, at the start of execution. It - is undefined behaviour if it is called more than once. -""" +/// This procedure is a special compiler generated procedure that initializes all the data segments +/// in the program. It should only be called once, by the main thread, at the start of execution. It +/// is undefined behaviour if it is called more than once. __initialize_data_segments :: () -> void --- -#doc """ - This is a special compiler generated procedure that calls all procedures specified with `#init` - in the specified order. It should theoretically only be called once on the main thread. -""" +/// This is a special compiler generated procedure that calls all procedures specified with `#init` +/// in the specified order. It should theoretically only be called once on the main thread. __run_init_procedures :: () -> void --- -#doc """ - This overloaded procedure allow you to define an implicit rule for how to convert any value - into a boolean. A default is provided for ALL pointer types and array types, but this can - be used for structures or distinct types. -""" +/// This overloaded procedure allows you to define an implicit rule for how to convert any value +/// into a boolean. A default is provided for ALL pointer types and array types, but this can +/// be used for structures or distinct types. __implicit_bool_cast :: #match -> bool {} -#doc """ - Internal procedure to allocate space for the captures in a closure. This will be soon - changed to a configurable way, but for now it simply allocates out of the heap allocator. -""" +/// Internal procedure to allocate space for the captures in a closure. This will be soon +/// changed to a configurable way, but for now it simply allocates out of the heap allocator. __closure_block_allocate :: (size: i32) -> rawptr { return context.closure_allocate(size); } -#doc """ - Defines all options for changing the memory layout, imports and exports, - and more of an Onyx binary. -""" -Link_Options :: struct { - // By default the memory layout of a binary is: - // reserved | static-data | stack | heap - // But when stack_first is true, it is: - // reserved | stack | static-data | heap - stack_first := false; +/// +__dispose_used_local :: #match -> void { + #order 10000 delete +} + +/// Defines all options for changing the memory layout, imports and exports, +/// and more of an Onyx binary. +Link_Options :: struct { // The size, in bytes of the stack. stack_size := 16 * 65536; // 16 pages * 65536 bytes per page = 1 MiB stack @@ -562,20 +581,18 @@ Link_Options :: struct { } -#doc """ - Special type used to represent a package at runtime. - For example, - - x: package_id = package main - - Currently, there is not much you can do with this; it is - only used by the runtime.info library if you want to filter - tags based on which package they are coming from. -""" +/// Special type used to represent a package at runtime. +/// For example, +/// +/// x: package_id = package main +/// +/// Currently, there is not much you can do with this; it is +/// only used by the runtime.info library if you want to filter +/// tags based on which package they are coming from. package_id :: #distinct u32 -// -// Special value used to represents any package. +/// +/// Special value used to represents any package. any_package :: cast(package_id) 0 // @@ -586,12 +603,11 @@ any_package :: cast(package_id) 0 // DEPRECATED THINGS // -// -// This is the special type of a paramter that was declared to have the type '...'. -// This is an old feature of the language now called 'untyped varargs'. It had -// a similar construction to varargs in C/C++. Because it is incredibly unsafe -// and not programmer friendly, this way of doing it has been deprecated in -// favor of using '..any', which provides type information along with the data. +/// This is the special type of a paramter that was declared to have the type '...'. +/// This is an old feature of the language now called 'untyped varargs'. It had +/// a similar construction to varargs in C/C++. Because it is incredibly unsafe +/// and not programmer friendly, this way of doing it has been deprecated in +/// favor of using '..any', which provides type information along with the data. vararg :: #type &struct { data: rawptr; count: i32; diff --git a/core/container/array.onyx b/core/container/array.onyx index 17cb97b5e..1f60dc0f1 100644 --- a/core/container/array.onyx +++ b/core/container/array.onyx @@ -15,53 +15,55 @@ use core // Dynamic Arrays // --------------------------------- -#doc """ - Creates a new dynamic array. -""" -make :: #match #local {} - -#doc """ - Creates a dynamic array of type `T` with an initial capacity of `capacity`, - from the `allocator`. -""" +/// Creates a new dynamic array. +Array.make :: #match #local {} + +/// Creates a dynamic array of type `T` with an initial capacity of `capacity`, +/// from the `allocator`. #overload -make :: ($T: type_expr, capacity := 4, allocator := context.allocator) -> [..] T { +Array.make :: ($T: type_expr, capacity := 4, allocator := context.allocator) -> [..] T { arr : [..] T; - init(&arr, capacity, allocator); + Array.init(&arr, capacity, allocator); return arr; } -#doc """ - Creates a new dynamic array as a *copy* of the provided array. -""" +/// Creates a new dynamic array as a *copy* of the provided array. #overload -make :: (base: [] $T, allocator := context.allocator) -> [..] T { +Array.make :: (base: [] $T, allocator := context.allocator) -> [..] T { arr: [..] T; - init(&arr, base.count, allocator); + Array.init(&arr, base.count, allocator); for& base do arr << *it; return arr; } #overload __make_overload :: macro (_: &[..] $T, allocator := context.allocator) -> [..] T { - return #this_package.make(T, allocator=allocator); + return Array.make(T, allocator=allocator); } #overload __make_overload :: macro (_: &[..] $T, capacity: u32, allocator := context.allocator) -> [..] T { - return #this_package.make(T, capacity, allocator); + return Array.make(T, capacity, allocator); } -#doc "Initializes a dynamic array." -init :: (arr: &[..] $T, capacity := 4, allocator := context.allocator) { +/// Initializes a dynamic array. +Array.init :: (arr: &[..] $T, capacity := 4, allocator := context.allocator) { arr.count = 0; arr.capacity = capacity; arr.allocator = allocator; arr.data = raw_alloc(allocator, sizeof T * arr.capacity); } -#doc "Frees a dynamic array." -free :: (arr: &[..] $T) { +Array.raw_from_slice :: (sl: [] $T, allocator: Allocator) -> (arr: [..] T) { + arr.data = sl.data + arr.count = sl.count + arr.capacity = sl.count + arr.allocator = allocator + return +} + +/// Frees a dynamic array. +Array.free :: (arr: &[..] $T) { arr.count = 0; arr.capacity = 0; @@ -71,13 +73,13 @@ free :: (arr: &[..] $T) { #overload builtin.delete :: macro (x: &[..] $T) { - #this_package.free(x); + Array.free(x); } -copy :: #match #locked { +Array.copy :: #match #locked { (arr: &[..] $T, allocator := context.allocator) -> [..] T { new_arr : [..] T; - init(&new_arr, arr.count, allocator); + Array.init(&new_arr, arr.count, allocator); new_arr.count = arr.count; for i in 0 .. arr.count do new_arr.data[i] = arr.data[i]; @@ -91,61 +93,57 @@ copy :: #match #locked { } } -#doc """ - Copies a sub-array of a dynamic-array. - - arr := array.make(.[ 2, 3, 5, 7, 11 ]); - sub := array.copy_range(&arr, 2 .. 5); - println(sub); // 5, 7, 11 -""" -copy_range :: (arr: &[..] $T, r: range, allocator := context.allocator) -> [..] T { +/// Copies a sub-array of a dynamic-array. +/// +/// arr := array.make(.[ 2, 3, 5, 7, 11 ]); +/// sub := array.copy_range(&arr, 2 .. 5); +/// println(sub); // 5, 7, 11 +Array.copy_range :: (arr: &[..] $T, r: range, allocator := context.allocator) -> [..] T { new_arr : [..] T; - init(&new_arr, r.high - r.low, allocator); + Array.init(&new_arr, r.high - r.low, allocator); new_arr.count = r.high - r.low; for i in r do new_arr.data[i] = arr.data[i]; return new_arr; } -#doc """ - Clears a dynamic array. - - Note: This does not clear or free the memory for the dynamic array. -""" -clear :: (arr: &[..] $T) { +/// Clears a dynamic array. +/// +/// Note: This does not clear or free the memory for the dynamic array. +Array.clear :: (arr: &[..] $T) { arr.count = 0; } -#doc """ - Resizes a dynamic array if it does not have enough capacity. - - If this procedure returns `true`, `arr.capacity` will be greater than or equal to `capacity`. -""" -ensure_capacity :: (arr: &[..] $T, capacity: u32) -> bool { +/// Resizes a dynamic array if it does not have enough capacity. +/// +/// If this procedure returns `true`, `arr.capacity` will be greater than or equal to `capacity`. +Array.ensure_capacity :: (arr: &[..] $T, capacity: u32) -> bool { if arr.capacity >= capacity do return true; - if arr.data == null do init(arr); + if arr.data == null do Array.init(arr, capacity); while capacity > arr.capacity do arr.capacity <<= 1; new_data := raw_resize(arr.allocator, arr.data, sizeof T * arr.capacity); if new_data == null do return false; arr.data = new_data; + + core.memory.set( + core.memory.ptr_add(arr.data, sizeof T * arr.count), + 0, + sizeof T * (arr.capacity - arr.count) + ) return true; } -#doc """ - Appends a zeroed-element to the end of the array, and returns a pointer to it. -""" -alloc_one :: (arr: &[..] $T) -> &T { - if !ensure_capacity(arr, arr.count + 1) do return null; +/// Appends a zeroed-element to the end of the array, and returns a pointer to it. +Array.alloc_one :: (arr: &[..] $T) -> &T { + if !Array.ensure_capacity(arr, arr.count + 1) do return null; arr.count += 1; return &arr.data[arr.count - 1]; } -#doc """ - Appends `x` to the end of the array. -""" -push :: (arr: &[..] $T, x: T) -> bool { - if !ensure_capacity(arr, arr.count + 1) do return false; +/// Appends `x` to the end of the array. +Array.push :: (arr: &[..] $T, x: T) -> bool { + if !Array.ensure_capacity(arr, arr.count + 1) do return false; arr.data[arr.count] = x; arr.count += 1; return true; @@ -153,39 +151,37 @@ push :: (arr: &[..] $T, x: T) -> bool { // Semi-useful shortcut for adding something to an array. #operator << macro (arr: [..] $T, v: T) { - #this_package.push(&arr, v); + Array.push(&arr, v); } -#doc """ - Inserts element(s) into the middle of the array at `idx`. - - If `idx >= arr.count`, nothing happens. -""" -insert :: #match #local {} +/// Inserts element(s) into the middle of the array at `idx`. +/// +/// If `idx >= arr.count`, nothing happens. +Array.insert :: #match #local {} #overload -insert :: (arr: &[..] $T, idx: u32, x: T) -> bool { +Array.insert :: (arr: &[..] $T, idx: u32, x: T) -> bool { if idx > arr.count do return false; - if !ensure_capacity(arr, arr.count + 1) do return false; + if !Array.ensure_capacity(arr, arr.count + 1) do return false; - arr.count += 1; while i := arr.count; i > idx { arr.data[i] = arr.data[i - 1]; i -= 1; } + arr.count += 1; arr.data[idx] = x; return true; } #overload -insert :: (arr: &[..] $T, idx: u32, new_arr: [] T) -> bool { +Array.insert :: (arr: &[..] $T, idx: u32, new_arr: [] T) -> bool { if idx > arr.count do return false; - if !ensure_capacity(arr, arr.count + new_arr.count) do return false; + if !Array.ensure_capacity(arr, arr.count + new_arr.count) do return false; arr.count += new_arr.count; - while i := arr.count; i > idx { + while i := arr.count - 1; i > idx { arr.data[i] = arr.data[i - new_arr.count]; i -= 1; } @@ -196,15 +192,13 @@ insert :: (arr: &[..] $T, idx: u32, new_arr: [] T) -> bool { return true; } -#doc """ - Inserts a zeroed-element at `idx`. -""" -insert_empty :: (arr: &[..] $T, idx: u32) -> bool { +/// Inserts a zeroed-element at `idx`. +Array.insert_empty :: (arr: &[..] $T, idx: u32) -> bool { if idx > arr.count do return false; - if !ensure_capacity(arr, arr.count + 1) do return false; + if !Array.ensure_capacity(arr, arr.count + 1) do return false; arr.count += 1; - while i := arr.count; i > idx { + while i := arr.count - 1; i > idx { arr.data[i] = arr.data[i - 1]; i -= 1; } @@ -212,12 +206,10 @@ insert_empty :: (arr: &[..] $T, idx: u32) -> bool { return true; } -#doc """ - Removes all instances of `elem` from the array. - - Uses `==` to test for equality. -""" -remove :: (arr: &[..] $T, elem: T) { +/// Removes all instances of `elem` from the array. +/// +/// Uses `==` to test for equality. +Array.remove :: (arr: &[..] $T, elem: T) { move := 0; while i := 0; i < arr.count - move { @@ -233,12 +225,10 @@ remove :: (arr: &[..] $T, elem: T) { arr.count -= move; } -#doc """ - Removes the element at index `idx` from the array and returns it. - - Maintains order of the array. -""" -delete :: (arr: &[..] $T, idx: u32) -> T { +/// Removes the element at index `idx` from the array and returns it. +/// +/// Maintains order of the array. +Array.delete :: (arr: &[..] $T, idx: u32) -> T { if idx >= arr.count do return .{}; to_return := arr.data[idx]; @@ -250,12 +240,10 @@ delete :: (arr: &[..] $T, idx: u32) -> T { return to_return; } -#doc """ - Removes the element at index `idx` from the array and returns it. - - Order is not guaranteed to be preserved. -""" -fast_delete :: (arr: &[..] $T, idx: u32) -> T { +/// Removes the element at index `idx` from the array and returns it. +/// +/// Order is not guaranteed to be preserved. +Array.fast_delete :: (arr: &[..] $T, idx: u32) -> T { if idx >= arr.count do return .{}; to_return := arr.data[idx]; @@ -265,12 +253,8 @@ fast_delete :: (arr: &[..] $T, idx: u32) -> T { return to_return; } -#doc """ - Removes `n` elements from the end of the array. - - `n` by default is 1. -""" -pop :: (arr: &[..] $T, n := 1) -> T { +/// Removes `n` elements from the end of the array. +Array.pop :: (arr: &[..] $T, n := 1) -> T { if arr.count == 0 do return .{}; c := core.math.min(n, arr.count); @@ -279,36 +263,30 @@ pop :: (arr: &[..] $T, n := 1) -> T { } -#doc """ - Appends elements from another array or iterator to the end of the array. -""" -concat :: #match #local {} +/// Appends elements from another array or iterator to the end of the array. +Array.concat :: #match #local {} #overload -concat :: (arr: &[..] $T, other: [] T) { - if !ensure_capacity(arr, arr.count + other.count) do return; +Array.concat :: (arr: &[..] $T, other: [] T) { + if !Array.ensure_capacity(arr, arr.count + other.count) do return; core.memory.copy(arr.data + arr.count, other.data, other.count * sizeof T); arr.count += other.count; } #overload -concat :: (arr: &[..] $T, other: Iterator(T)) { +Array.concat :: (arr: &[..] $T, other: Iterator(T)) { for other { - push(arr, it); + Array.push(arr, it); } } -#doc """ - Removes all elements for which the given predicate does not hold. - - Use `it` to refer to the current element being tested. - - arr := array.make(.[ 1, 2, 3, 4, 5 ]); - array.filter(&arr, [v](v % 2 == 0)); - println(arr); // 2, 4 -""" -filter :: macro (arr: &[..] $T, body: Code) { +/// Removes all elements for which the given predicate does not hold. +/// +/// arr := array.make(.[ 1, 2, 3, 4, 5 ]); +/// array.filter(&arr, [v](v % 2 == 0)); +/// println(arr); // 2, 4 +Array.filter :: macro (arr: &[..] $T, body: Code) { move := 0; while i := 0; i < arr.count - move { @@ -327,8 +305,8 @@ filter :: macro (arr: &[..] $T, body: Code) { } -// Useful structure when talking about dynamic arrays where you don't know of what -// type they store. For example, when passing a dynamic array as an 'any' argument. +/// Useful structure when talking about dynamic arrays where you don't know of what +/// type they store. For example, when passing a dynamic array as an 'any' argument. Untyped_Array :: struct { data: rawptr; count: u32; @@ -338,35 +316,59 @@ Untyped_Array :: struct { + + +// +// Everything below here only exists for backwards compatibility. +// + +make :: Array.make +init :: Array.init +free :: Array.free +copy :: Array.copy +copy_range :: Array.copy_range +clear :: Array.clear +ensure_capacity :: Array.ensure_capacity +alloc_one :: Array.alloc_one +push :: Array.push +insert :: Array.insert +insert_empty :: Array.insert_empty +remove :: Array.remove +delete :: Array.delete +fast_delete :: Array.fast_delete +pop :: Array.pop +concat :: Array.concat +filter :: Array.filter + + + // Things that work with slices and arrays -use core.slice - -transplant :: slice.transplant -get :: slice.get -get_ptr :: slice.get_ptr -set :: slice.set -contains :: slice.contains -empty :: slice.empty -sum :: slice.sum -product :: slice.product -average :: slice.average -reverse :: slice.reverse -sort :: slice.sort -quicksort :: slice.quicksort -unique :: slice.unique -fold :: slice.fold -every :: slice.every -some :: slice.some -fill :: slice.fill -fill_range :: slice.fill_range -to_list :: slice.to_list -find :: slice.find -find_ptr :: slice.find_ptr -find_opt :: slice.find_opt -first :: slice.first -count_where :: slice.count_where -windows :: slice.windows -chunks :: slice.chunks -greatest :: slice.greatest -least :: slice.least +transplant :: Slice.transplant +get :: Slice.get +get_ptr :: Slice.get_ptr +set :: Slice.set +contains :: Slice.contains +empty :: Slice.empty +sum :: Slice.sum +product :: Slice.product +average :: Slice.average +reverse :: Slice.reverse +sort :: Slice.sort +quicksort :: Slice.quicksort +unique :: Slice.unique +fold :: Slice.fold +every :: Slice.every +some :: Slice.some +fill :: Slice.fill +fill_range :: Slice.fill_range +to_list :: Slice.to_list +find :: Slice.find +find_ptr :: Slice.find_ptr +find_opt :: Slice.find_opt +first :: Slice.first +count_where :: Slice.count_where +windows :: Slice.windows +chunks :: Slice.chunks +greatest :: Slice.greatest +least :: Slice.least diff --git a/core/container/bucket_array.onyx b/core/container/bucket_array.onyx index 74876a93c..3f0c60097 100644 --- a/core/container/bucket_array.onyx +++ b/core/container/bucket_array.onyx @@ -70,7 +70,7 @@ get_ptr :: (use b: &Bucket_Array($T), idx: i32) -> &T { return &buckets[bucket_index].data[elem_index]; } -push :: (use b: &Bucket_Array($T), elem: T) -> bool { +push :: (use b: &Bucket_Array($T), elem: T) { last_bucket := &buckets[buckets.count - 1]; if last_bucket.count < elements_per_bucket { last_bucket.data[last_bucket.count] = elem; @@ -119,20 +119,20 @@ as_iter :: (b: &Bucket_Array($T)) -> Iterator(T) { c.bucket_idx = 0; c.elem_idx = 0; - next :: (use c: &Context($T)) -> (T, bool) { + next :: (use c: &Context($T)) -> ? T { use core.intrinsics.onyx bucket := &ba.buckets[bucket_idx]; while elem_idx == bucket.count { bucket_idx += 1; - if bucket_idx == ba.buckets.count do return .{}, false; + if bucket_idx == ba.buckets.count do return .None; bucket = &ba.buckets[bucket_idx]; elem_idx = 0; } defer elem_idx += 1; - return bucket.data[elem_idx], true; + return bucket.data[elem_idx]; } return .{ diff --git a/core/container/heap.onyx b/core/container/heap.onyx index f4d9adce9..c9f3e9e02 100644 --- a/core/container/heap.onyx +++ b/core/container/heap.onyx @@ -1,39 +1,82 @@ package core.heap -use core.array - Heap :: struct (T: type_expr) { data: [..] T; compare: (T, T) -> i32 = null_proc; } -make :: ($T: type_expr, cmp: (T, T) -> i32 = null_proc) -> Heap(T) { +#overload +__make_overload :: macro (_: &Heap($T), allocator: Allocator) -> Heap(T) { + return #this_package.Heap.make(T); +} + +#overload +delete :: (h: &Heap) { + delete(&h.data); +} + +Heap.make :: ($T: type_expr, cmp: (T, T) -> i32 = null_proc) -> Heap(T) { h: Heap(T); - init(&h, cmp); + Heap.init(&h, cmp); return h; } -init :: (use heap: &Heap, cmp: (heap.T, heap.T) -> i32 = null_proc) { - array.init(&data); +Heap.init :: (use heap: &Heap, cmp: (heap.T, heap.T) -> i32 = null_proc) { + Array.init(&data); compare = cmp; } -insert :: (use heap: &Heap, v: heap.T) { +Heap.insert :: (use heap: &Heap, v: heap.T) { data << v; shift_up(heap, data.count - 1); } #operator << macro (heap: Heap($T), v: T) { - #this_package.insert(&heap, v); + #this_package.Heap.insert(&heap, v); } -remove_top :: (use heap: &Heap) -> heap.T { +Heap.empty :: macro (heap: &Heap) => heap.data.count == 0; + +Heap.peek_top :: (use heap: &Heap) -> ? heap.T { + if data.count == 0 do return .None; + return data[0]; +} + +Heap.remove_top :: (use heap: &Heap) -> ? heap.T { + if data.count == 0 do return .None; + x := data[0]; - array.fast_delete(&data, 0); + data->fast_delete(0); shift_down(heap, 0); return x; } +Heap.remove :: macro (heap: &Heap, cond: Code) -> ? heap.T { + shift_down :: shift_down + + for e, i in heap.data { + if #unquote cond(e) { + x := heap.data->fast_delete(i); + shift_down(heap, i); + return x; + } + } + + return .None; +} + + +// These definitions only exist for backwards compatibility + +make :: Heap.make +init :: Heap.init +insert :: Heap.insert +empty :: Heap.empty +peek_top :: Heap.peek_top +remove_top :: Heap.remove_top +remove :: Heap.remove + + #local { heap_parent :: macro (index) => (index - 1) / 2 heap_lchild :: macro (index) => (index * 2) + 1 diff --git a/core/container/iter.onyx b/core/container/iter.onyx index 0212b6e38..9557440d9 100644 --- a/core/container/iter.onyx +++ b/core/container/iter.onyx @@ -13,70 +13,75 @@ use core.intrinsics.types {type_is_struct} // // Iterator is a builtin type known by the compiler, as Iterators // can be used in for-loops natively without any translation. -#inject Iterator { - filter :: filter; - map :: map; - flat_map :: flat_map; - zip :: zip; - - take_one :: take_one; - take :: take; - take_while :: take_while; - skip :: skip; - skip_while :: skip_while; - - flatten :: flatten; - enumerate :: enumerate; - - find :: find; - fold :: fold; - fold1 :: fold1; - count :: count; - some :: some; - every :: every; - sum :: sum; - collect :: to_array; - collect_map :: to_map; -} - - -#doc """ - The standard function to convert something to an Iterator. - For-loops currently do not use this function to determine - how to iterate over something unknown, but that could be - a feature down the line. -""" +Iterator.from :: as_iter +Iterator.next :: next +Iterator.close :: close +Iterator.next_opt :: next_opt +Iterator.empty :: empty +Iterator.counter :: counter + +Iterator.filter :: filter; +Iterator.map :: map; +Iterator.flat_map :: flat_map; +Iterator.zip :: zip; + +Iterator.take :: take; +Iterator.take_while :: take_while; +Iterator.skip :: skip; +Iterator.skip_while :: skip_while; + +Iterator.flatten :: flatten; +Iterator.enumerate :: enumerate; +Iterator.group_by :: group_by; + +Iterator.find :: find; +Iterator.fold :: fold; +Iterator.fold1 :: fold1; +Iterator.scan :: scan +Iterator.scan1 :: scan1 +Iterator.count :: count; +Iterator.some :: some; +Iterator.every :: every; +Iterator.sum :: sum; +Iterator.collect :: to_array; +Iterator.collect_map :: to_map; + +Iterator.generator :: generator +Iterator.generator_no_copy :: generator_no_copy +Iterator.comp :: comp +Iterator.prod :: prod + +Iterator.single :: single +Iterator.const :: const + + + +/// The standard function to convert something to an Iterator. +/// For-loops currently do not use this function to determine +/// how to iterate over something unknown, but that could be +/// a feature down the line. as_iter :: #match -> Iterator {} -#doc """ - Helper interface to test if something can be passed to - as_iter successfully. -""" +/// Helper interface to test if something can be passed to +/// as_iter successfully. Iterable :: interface (T: type_expr) { t as T; { as_iter(t) } -> Iterator; } -#doc "Helper function to get the next value out of an iterator." -next :: (it: Iterator) -> (it.Iter_Type, bool) { +/// Helper function to get the next value out of an iterator. +next :: (it: Iterator) -> ? it.Iter_Type { return it.next(it.data); } -#doc """ - Helper function to get the next value out of an iterator, but translated to an optional. - Returns `None` if the iterator was empty, `Some(value)` otherwise. -""" +/// Helper function to get the next value out of an iterator, but translated to an optional. +/// Returns `None` if the iterator was empty, `Some(value)` otherwise. next_opt :: (it: Iterator) -> ? it.Iter_Type { - v, exists := it.next(it.data); - if exists do return v; - return .{}; + return it.next(it.data); } -#doc """ - Helper function to close an iterator, if a close function - is defined. -""" +/// Helper function to close an iterator, if a close function is defined. close :: (it: Iterator) { if it.close != null_proc { it.close(it.data); @@ -84,32 +89,28 @@ close :: (it: Iterator) { } -#doc """ - Helper function to create an iterator of a type that does not produce an values. -""" +/// Helper function to create an iterator of a type that does not produce an values. empty :: ($T: type_expr) -> Iterator(T) { return .{ // CLEANUP: Fix the compiler bug that makes this not able to a closure. - next = #solidify ($T: type_expr, _: rawptr) -> (T, bool) { - return .{}, false; + next = #solidify ($T: type_expr, _: rawptr) -> ? T { + return .None; } { T = T } }; } -#doc """ - Helper function to create an infinite counting iterator. - - Use `start` to configure the starting value. - - Use `type` to configure the type used for the iterator. -""" +/// Helper function to create an infinite counting iterator. +/// +/// Use `start` to configure the starting value. +/// +/// Use `type` to configure the type used for the iterator. counter :: (start: type = 0, $type: type_expr = i32) -> Iterator(type) { return generator( &.{ i = start }, ctx => { defer ctx.i += 1; - return ctx.i, true; + return Optional.make(ctx.i); } ); } @@ -136,13 +137,6 @@ ImplicitIterator :: interface (T: type_expr) { { t->iter_open() } -> void; t->iter_next(); { t->iter_close() } -> void; - - { - do { - value, success := t->iter_next(); - return success; - } - } -> bool; } @@ -164,7 +158,7 @@ HasAsIter :: interface (T: type_expr) { // Most of these procedures come in two variants, // one that takes a context paramter, and one that does not. -#doc "Only yields the values for which the predicate is true." +/// Only yields the values for which the predicate is true. filter :: #match #local {} #overload @@ -173,17 +167,15 @@ filter :: (it: Iterator($T), predicate: (T) -> bool) => &.{ iterator = it, predicate = predicate }, fi => { - value, cont := next(fi.iterator); - if cont { - while !fi.predicate(value) { - value, cont = next(fi.iterator); - if !cont do return value, false; + value := next(fi.iterator); + if value { + while !fi.predicate(value->unwrap()) { + value = next(fi.iterator); + if !value do break; } - - return value, true; + return value; } - - return value, false; + return value; }, fi => { close(fi.iterator); }); @@ -193,27 +185,23 @@ filter :: (it: Iterator($T), ctx: $Ctx, predicate: (T, Ctx) -> bool) => generator( &.{ iterator = it, predicate = predicate, ctx = ctx }, - fi => { - value, cont := next(fi.iterator); - if cont { - while !fi.predicate(value, fi.ctx) { - value, cont = next(fi.iterator); - if !cont do return value, false; + (fi: $C) -> ? T { + value := next(fi.iterator); + if value { + while !fi.predicate(value->unwrap(), fi.ctx) { + value = next(fi.iterator); + if !value do return .None; } - - return value, true; + return value; } - - return value, false; + return .None; }, fi => { close(fi.iterator); }); -#doc """ - Transforms every value that comes out of an iterator - using the transform function. -""" +/// Transforms every value that comes out of an iterator +/// using the transform function. map :: #match #local {} #overload @@ -221,13 +209,12 @@ map :: (it: Iterator($T), transform: (T) -> $R) => generator( &.{ iterator = it, transform = transform }, - mi => { - value, cont := next(mi.iterator); - if cont { - return mi.transform(value), true; - } - - return .{}, false; + (mi: $C) -> ? R { + v := next(mi.iterator); + return switch v { + case .Some as v => Optional.make(mi.transform(v)); + case .None => .None; + }; }, mi => { close(mi.iterator); }) @@ -237,26 +224,23 @@ map :: (it: Iterator($T), ctx: $Ctx, transform: (T, Ctx) -> $R) => generator( &.{ iterator = it, transform = transform, ctx = ctx }, - mi => { - value, cont := next(mi.iterator); - if cont { - return mi.transform(value, mi.ctx), true; - } - - return .{}, false; + (mi: $C) -> ? R { + v := next(mi.iterator); + return switch v { + case .Some as v => Optional.make(mi.transform(v, mi.ctx)); + case .None => .None; + }; }, mi => { close(mi.iterator); }) -#doc """ - Transforms every value that comes out of an iterator - using the transform function into a new iterator, from - which subsequent values will be output. - - iter.flat_map(iter.as_iter(1 .. 5), x => iter.as_iter(1 .. x+1)) - // 1, 1, 2, 1, 2, 3, 1, 2, 3, 4 -""" +/// Transforms every value that comes out of an iterator +/// using the transform function into a new iterator, from +/// which subsequent values will be output. +/// +/// iter.flat_map(iter.as_iter(1 .. 5), x => iter.as_iter(1 .. x+1)) +/// // 1, 1, 2, 1, 2, 3, 1, 2, 3, 4 flat_map :: #match #local {} #overload @@ -268,21 +252,21 @@ flat_map :: (it: Iterator($T), transform: (T) -> Iterator($R)) => while true { if mi.get_new_inner { mi.get_new_inner = false; - t, outer := next(mi.iterator); - if !outer do break; - - mi.inner_iter = mi.transform(t); + switch next(mi.iterator) { + case .None do break break; + case .Some as t { + mi.inner_iter = mi.transform(t); + } + } } - value, cont := next(mi.inner_iter); - if cont { - return value, true; - } + value := next(mi.inner_iter); + if value do return value; mi.get_new_inner = true; } - return .{}, false; + return .None; }, mi => { close(mi.iterator); }) @@ -296,21 +280,21 @@ flat_map :: (it: Iterator($T), ctx: $Ctx, transform: (T, Ctx) -> Iterator($R)) = while true { if mi.get_new_inner { mi.get_new_inner = false; - t, outer := next(mi.iterator); - if !outer do break; - - mi.inner_iter = mi.transform(t, mi.ctx); + switch next(mi.iterator) { + case .None do break break; + case .Some as t { + mi.inner_iter = mi.transform(t, mi.ctx); + } + } } - value, cont := next(mi.inner_iter); - if cont { - return value, true; - } + value := next(mi.inner_iter); + if value do return value; mi.get_new_inner = true; } - return .{}, false; + return .None; }, mi => { close(mi.iterator); }) @@ -318,7 +302,7 @@ flat_map :: (it: Iterator($T), ctx: $Ctx, transform: (T, Ctx) -> Iterator($R)) = -#doc "Only yields the first `count` values, then closes." +/// Only yields the first `count` values, then closes. take :: (it: Iterator($T), count: u32) -> Iterator(T) { return generator( &.{ iterator = it, remaining = count }, @@ -329,42 +313,46 @@ take :: (it: Iterator($T), count: u32) -> Iterator(T) { return next(ti.iterator); } - return .{}, false; + return .None; }, ti => { close(ti.iterator); }); } -#doc "Yields values while the predicate returns true." +/// Yields values while the predicate returns true. take_while :: (it: Iterator($T), predicate: (T) -> bool) -> Iterator(T) { return generator( &.{ iterator = it, predicate = predicate }, ti => { - value, cont := next(ti.iterator); - if !cont do return value, false; + value := next(ti.iterator); + if value { + if ti.predicate(value->unwrap()) { + return value; + } + } - return value, ti.predicate(value); + return .None; }, ti => { close(ti.iterator); }); } -#doc "Discards the first `count` values and yields all remaining values." +/// Discards the first `count` values and yields all remaining values. skip :: (it: Iterator($T), count: u32) -> Iterator(T) { return generator( &.{ iterator = it, to_skip = count, skipped = false }, - si => { + (si: $C) -> ? T { while !si.skipped && si.to_skip > 0 { si.to_skip -= 1; - value, cont := next(si.iterator); + value := next(si.iterator); - if !cont { + if !value { si.skipped = true; - return value, false; + return .None; } } @@ -375,7 +363,7 @@ skip :: (it: Iterator($T), count: u32) -> Iterator(T) { } -#doc "Discards values while the predicate is true, then yields all values." +/// Discards values while the predicate is true, then yields all values. skip_while :: #match #local {} #overload @@ -383,18 +371,18 @@ skip_while :: (it: Iterator($T), predicate: (T) -> bool) -> Iterator(T) { return generator( &.{ iterator = it, predicate = predicate, skipped = false }, - si => { + (si: $C) -> ? T { while !si.skipped { - value, cont := next(si.iterator); + value := next(si.iterator); - if !cont { + if !value { si.skipped = true; - return value, false; + return .None; } - if !si.predicate(value) { + if !si.predicate(value->unwrap()) { si.skipped = true; - return value, true; + return value; } } @@ -411,16 +399,16 @@ skip_while :: (it: Iterator($T), ctx: $Ctx, predicate: (T, Ctx) -> bool) -> Iter si => { while !si.skipped { - value, cont := next(si.iterator); + value := next(si.iterator); - if !cont { + if !value { si.skipped = true; - return value, false; + return .None; } - if !si.predicate(value, si.ctx) { + if !si.predicate(value->unwrap(), si.ctx) { si.skipped = true; - return value, true; + return value; } } @@ -431,48 +419,48 @@ skip_while :: (it: Iterator($T), ctx: $Ctx, predicate: (T, Ctx) -> bool) -> Iter } -#doc """ - Combines two iterators into one by yielding a Pair of - the value from each of the iterators. -""" +/// Combines two iterators into one by yielding a Pair of +/// the value from each of the iterators. zip :: (left_iterator: Iterator($T), right_iterator: Iterator($R)) -> Iterator(Pair(T, R)) { return generator( &.{ left_iter = left_iterator, right_iter = right_iterator }, zi => { - v1, cont1 := next(zi.left_iter); - v2, cont2 := next(zi.right_iter); + v1 := next(zi.left_iter); + v2 := next(zi.right_iter); - return Pair.make(v1, v2), cont1 && cont2; + if v1 && v2 { + return Optional.make(Pair.make(v1->unwrap(), v2->unwrap())); + } + + return .None; }, zi => { close(zi.left_iter); close(zi.right_iter); }); } -#doc """ - Filters and maps at the same time. - - If the provided function returns a None variant of Optional, - then the entry is discarded. - - If the provided function returns `Some(x)`, then `x` is yielded. -""" +/// Filters and maps at the same time. +/// +/// If the provided function returns a None variant of Optional, +/// then the entry is discarded. +/// +/// If the provided function returns `Some(x)`, then `x` is yielded. flatten :: (i: Iterator($T), f: (T) -> ? $R) -> Iterator(R) { return generator( &.{ i = i, f = f }, fi => { while true { - v, cont := next(fi.i); - if !cont do break; + v := next(fi.i); + if !v do break; - v2 := fi.f(v); + v2 := v->and_then(fi.f); if v2 { - return v2->unwrap(), true; + return v2; } } - return .{}, false; + return .None; }, fi => { close(fi.i); } @@ -480,10 +468,8 @@ flatten :: (i: Iterator($T), f: (T) -> ? $R) -> Iterator(R) { } -#doc """ - Combines iterators by first yielding all values from - one, then yielding all values from the next, and so on. -""" +/// Combines iterators by first yielding all values from +/// one, then yielding all values from the next, and so on. concat :: (iters: ..Iterator($T)) -> Iterator(T) { return generator( &.{ @@ -494,15 +480,13 @@ concat :: (iters: ..Iterator($T)) -> Iterator(T) { c => { while c.idx < c.iters.count { curr_iter := c.iters[c.idx]; - value, valid := next(curr_iter); - if valid { - return value, true; - } + value := next(curr_iter); + if value do return value; c.idx += 1; } - return .{}, false; + return .None; }, c => { @@ -512,20 +496,20 @@ concat :: (iters: ..Iterator($T)) -> Iterator(T) { }); } -#doc "Yields the same value indefinitely. Useful with `iter.zip`." +/// Yields the same value indefinitely. Useful with `iter.zip`. const :: (value: $T) -> Iterator(T) { - return generator(&.{ v = value }, c => (c.v, true)); + return generator(&.{ v = value }, c => Optional.make(c.v)); } -#doc "Yields a single value, then stops." +/// Yields a single value, then stops. single :: (value: $T, dispose: (T) -> void = null_proc) -> Iterator(T) { return generator(&.{ v = value, yielded = false, dispose = dispose }, c => { if !c.yielded { c.yielded = true; - return c.v, true; + return Optional.make(c.v); } - return .{}, false; + return .None; }, c => { if c.dispose != null_proc { c.dispose(c.v); @@ -534,11 +518,9 @@ single :: (value: $T, dispose: (T) -> void = null_proc) -> Iterator(T) { } -#doc """ - Yields a value that contains: - 1) the value from the iterator, - 2) an incrementing integer. -""" +/// Yields a value that contains: +/// 1) the value from the iterator, +/// 2) an incrementing integer. enumerate :: #match #local {} #overload @@ -551,13 +533,14 @@ enumerate :: (it: Iterator($T), start_index: i32 = 0) -> Iterator(Enumeration_Va &.{ iterator = it, current_index = start_index }, ec => { - value, cont := next(ec.iterator); - if cont { + value := next(ec.iterator); + if value { defer ec.current_index += 1; - return Enumeration_Value(typeof value).{ ec.current_index, value }, true; + return Enumeration_Value(T).{ ec.current_index, value->unwrap() } + |> Optional.make(); } - return .{}, false; + return .None; }, ec => { close(ec.iterator); }); @@ -570,34 +553,6 @@ enumerate :: (it: Iterator($T), start_index: i32 = 0) -> Iterator(Enumeration_Va -#doc """ - Extract the next value out of an iterator. Closes it when - the iterator is out of values, if no_close is false. -""" -take_one :: (it: Iterator($T), no_close := false) -> (T, bool) { - ret, cont := next(it); - if !cont && !no_close { close(it); } - return ret, cont; -} - -#doc """ - Macro that allows you to extract elements from an iterator in a simple way: - - value: i32; - iterator: Iterator(i32) = ...; - - if [](value) << iterator { - ...iterater closed... - } -""" -#operator << macro (dest: Code, it: Iterator($T)) -> bool { - take_one :: take_one - - cont: bool; - (#unquote dest), cont = take_one(it); - return !cont; -} - // // Iterator creations @@ -608,16 +563,14 @@ take_one :: (it: Iterator($T), no_close := false) -> (T, bool) { #overload as_iter :: from_array -#doc """ - `from_array` has two almost identical implementations, - but the details are important here. Normally, `from_array` - returns an iterator by value, unless the array is of - structures, then it returns an iterator by pointer. - This seems weird, but in practice it is closer to what - you want, as you don't want to have to copy every structure - out of the array. While for primitives, you don't want to - dereference it everywhere. -""" +/// `from_array` has two almost identical implementations, +/// but the details are important here. Normally, `from_array` +/// returns an iterator by value, unless the array is of +/// structures, then it returns an iterator by pointer. +/// This seems weird, but in practice it is closer to what +/// you want, as you don't want to have to copy every structure +/// out of the array. While for primitives, you don't want to +/// dereference it everywhere. from_array :: #match #local {} #overload @@ -627,10 +580,10 @@ from_array :: (arr: [] $T/type_is_struct) => generator( ctx => { if ctx.current < ctx.count { defer ctx.current += 1; - return &ctx.data[ctx.current], true; + return &ctx.data[ctx.current] |> Optional.make(); } - return null, false; + return .None; } ); @@ -641,10 +594,10 @@ from_array :: (arr: [] $T, by_pointer: bool) => generator( ctx => { if ctx.current < ctx.count { defer ctx.current += 1; - return &ctx.data[ctx.current], true; + return &ctx.data[ctx.current] |> Optional.make(); } - return null, false; + return .None; } ); @@ -655,18 +608,16 @@ from_array :: (arr: [] $T) => generator( ctx => { if ctx.current < ctx.count { defer ctx.current += 1; - return ctx.data[ctx.current], true; + return ctx.data[ctx.current] |> Optional.make(); } - return .{}, false; + return .None; } ); -#doc """ - Iterators created from pointers to dynamic arrays are - special, because they support the #remove directive. -""" +/// Iterators created from pointers to dynamic arrays are +/// special, because they support the #remove directive. #local generic_dynamic_array_as_iter :: (x: &[..] $T, $access: Code, $return_type: type_expr) => { Context :: struct (T: type_expr) { @@ -680,10 +631,10 @@ generic_dynamic_array_as_iter :: (x: &[..] $T, $access: Code, $return_type: type next :: (use _: &Context($T), $access: Code) => { if current < arr.count { defer current += 1; - return (#unquote access), true; + return (#unquote access) |> Optional.make(); } else { - return .{}, false; + return .None; } } @@ -715,26 +666,47 @@ as_iter :: macro (x: &[..] $T, by_pointer: bool) => { return G(x, [](&arr.data[current]), Iterator(&T)); } - - #overload as_iter :: (r: range) => generator( &.{ r = r, v = r.low }, - ctx => { + (ctx: $C) -> ? i32 { if ctx.r.step > 0 { if ctx.v >= ctx.r.high { - return 0, false; + return .None; } else { defer ctx.v += ctx.r.step; - return ctx.v, true; + return ctx.v; } } else { if ctx.v < ctx.r.high { - return 0, false; + return .None; + } else { + defer ctx.v += ctx.r.step; + return ctx.v; + } + } + }); + + +#overload +as_iter :: (r: range64) => generator( + &.{ r = r, v = r.low }, + (ctx: $C) -> ? i64 { + if ctx.r.step > 0 { + if ctx.v < ctx.r.high { + defer ctx.v += ctx.r.step; + return ctx.v; } else { + return .None; + } + + } else { + if ctx.v >= ctx.r.high { defer ctx.v += ctx.r.step; - return ctx.v, true; + return ctx.v; + } else { + return .None; } } }); @@ -762,11 +734,9 @@ find :: (it: Iterator($T), predicate: (T) -> bool) -> ? T { } -#doc """ - Incremently calls `combine` on the yielded value and the - accumulated value, producing a new accumulated value. Returns - the final accumulated value. -""" +/// Incremently calls `combine` on the yielded value and the +/// accumulated value, producing a new accumulated value. Returns +/// the final accumulated value. fold :: #match #local {} #overload @@ -784,11 +754,9 @@ fold :: (it: Iterator($T), initial_value: $R, combine: (T, R) -> R) -> R { return result; } -#doc """ - Incremently calls `combine` on the yielded value and the - accumulated value, producing a new accumulated value. Returns - the final accumulated value. -""" +/// Incremently calls `combine` on the yielded value and the +/// accumulated value, producing a new accumulated value. Returns +/// the final accumulated value. fold1 :: #match #local {} #overload @@ -797,9 +765,10 @@ fold1 :: macro (it: $T/Iterable, combine: $S) => #overload fold1 :: (it: Iterator($T), combine: (T, T) -> T) -> ? T { - result, valid := next(it); - if !valid do return .None; + maybe_result := next(it); + if !maybe_result do return .None; + result := maybe_result->unwrap(); for value in it { result = combine(value, result); } @@ -808,7 +777,69 @@ fold1 :: (it: Iterator($T), combine: (T, T) -> T) -> ? T { } -#doc "Returns how many times the `cond` was true." + +/// +scan :: #match #local {} + +#overload +scan :: macro (it: $T/Iterable, init: $R, combine: $S) => + #this_package.scan(#this_package.as_iter(it), init, combine) + +#overload +scan :: (it: Iterator($T), initial_value: $R, combine: (T, R) -> R) -> Iterator(R) { + return generator( + &.{ value = initial_value, combine = combine, iterator = it } + + (ctx: &$C) -> ? R { + switch next(ctx.iterator) { + case .None do return .None + case .Some as yielded { + ctx.value = ctx.combine(yielded, ctx.value) + return ctx.value + } + } + } + + (ctx: &$C) { + close(ctx.iterator) + } + ) +} + + +/// +scan1 :: #match #local {} + +#overload +scan1 :: macro (it: $T/Iterable, combine: $S) => + #this_package.scan1(#this_package.as_iter(it), combine) + +#overload +scan1 :: (it: Iterator($T), combine: (T, T) -> T) -> Iterator(T) { + return generator( + &.{ value = next(it), combine = combine, iterator = it } + + (ctx: &$C) -> ? T { + if !ctx.value do return .None + + defer { + ctx.value = switch next(ctx.iterator) { + case .None => (? T).{ None = .{} } + case .Some as yielded => ctx.combine(yielded, ctx.value!) + } + } + + return ctx.value + } + + (ctx: &$C) { + close(ctx.iterator) + } + ) +} + + +/// Returns how many times the `cond` was true. count :: #match #local {} #overload @@ -824,7 +855,7 @@ count :: (it: Iterator($T), cond: (T) -> bool) -> i32 { -#doc "Returns if `cond` returned true for *any* yielded value." +/// Returns if `cond` returned true for *any* yielded value. some :: #match #local {} #overload @@ -838,7 +869,7 @@ some :: (it: Iterator($T), cond: (T) -> bool) -> bool { } -#doc "Returns if `cond` returned true for *all* yielded values." +/// Returns if `cond` returned true for *all* yielded values. every :: #match #local {} #overload @@ -851,7 +882,7 @@ every :: (it: Iterator($T), cond: (T) -> bool) -> bool { return true; } -#doc "Returns the sum of all yield values, using the `+` operator." +/// Returns the sum of all yield values, using the `+` operator. sum :: #match #local {} #overload @@ -870,10 +901,8 @@ sum :: (it: Iterator($T)) -> T { } -#doc """ - Places all yielded values into a dynamically allocated array, - using the allocator provided (context.allocator by default). -""" +/// Places all yielded values into a dynamically allocated array, +/// using the allocator provided (context.allocator by default). to_array :: (it: Iterator($T), allocator := context.allocator) -> [..] T { arr := array.make(T, allocator=allocator); for v in it do array.push(&arr, v); @@ -881,8 +910,8 @@ to_array :: (it: Iterator($T), allocator := context.allocator) -> [..] T { return arr; } -#doc """ -""" +/// Places all yielded values into a Map, with the `first` member +/// being the key, and the `second` member being the value. to_map :: (it: Iterator(Pair($K, $V)), allocator := context.allocator) -> Map(K, V) { m := builtin.make(Map(K, V), allocator=allocator); for p in it { @@ -891,27 +920,23 @@ to_map :: (it: Iterator(Pair($K, $V)), allocator := context.allocator) -> Map(K, return m; } -#doc """ - Collects elements into an array, or a map, depending on if the - iterator produces a Pair(K, V) or not. -""" +/// Collects elements into an array, or a map, depending on if the +/// iterator produces a Pair(K, V) or not. collect :: #match { to_array } -#doc """ - Produces an iterator that first yields all values from the - first iterable, combined with the first yield value from the - second iterable. Then, steps the second iterable, and repeats. - - For example, - - iter.prod(1 .. 4, 1 .. 3) - - Would yield: - (1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2) -""" +/// Produces an iterator that first yields all values from the +/// first iterable, combined with the first yield value from the +/// second iterable. Then, steps the second iterable, and repeats. +/// +/// For example, +/// +/// iter.prod(1 .. 4, 1 .. 3) +/// +/// Would yield: +/// (1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2) prod :: #match #local {} #overload @@ -921,7 +946,7 @@ prod :: macro (x: $I/Iterable, y: $I2/Iterable) => { #overload prod :: (x: $I1/Iterable, y_iter: Iterator($Y)) => { - y_val, _ := take_one(y_iter); + y_val := next(y_iter) return generator( &.{ @@ -933,39 +958,48 @@ prod :: (x: $I1/Iterable, y_iter: Iterator($Y)) => { }, ctx => { - x_val, cont := take_one(ctx.x_iter); - if cont { - return Pair.make(x_val, ctx.y_val), true; + switch ctx.y_val { + case .Some as y { + next(ctx.x_iter)->with([x] { + return Optional.make(Pair.make(x, y)) + }) + } + + case .None do return .None } - ctx.y_val, cont = take_one(ctx.y_iter); - if !cont do return .{}, false; + switch next(ctx.y_iter) { + case .None do return .None - ctx.x_iter = as_iter(ctx.x); - x_val, cont = take_one(ctx.x_iter); - if !cont do return .{}, false; + case .Some as new_y_val { + ctx.y_val = new_y_val - return Pair.make(x_val, ctx.y_val), true; + close(ctx.x_iter) + ctx.x_iter = as_iter(ctx.x) + x_val := next(ctx.x_iter) + if !x_val do return .None + + return Optional.make(Pair.make(x_val!, ctx.y_val!)) + } + } } - ); + ) } -#doc """ - Simple iterator comprehensions, in the same vein - as Pythons comprehension syntax. - - Python: - results = [it * 2 for it in [1, 2, 3, 4, 5]] - Onyx: - results := iter.comp(u32.[1, 2, 3, 4, 5], [it](it * 2)); -""" +/// Simple iterator comprehensions, in the same vein +/// as Pythons comprehension syntax. +/// +/// Python: +/// results = [it * 2 for it in [1, 2, 3, 4, 5]] +/// Onyx: +/// results := iter.comp(u32.[1, 2, 3, 4, 5], [it](it * 2)); comp :: #match #local {} #overload comp :: macro (i: Iterator(&$V), value: Code) => { it: V; - a := make([..] typeof #unquote value); + a := make([..] typeof #unquote value(it)); for __it in i { it := *__it; @@ -977,7 +1011,7 @@ comp :: macro (i: Iterator(&$V), value: Code) => { #overload comp :: macro (i: Iterator($V), value: Code) => { it: V; - a := make([..] typeof #unquote value); + a := make([..] typeof #unquote value(it)); for i do a << (#unquote value(it)); return a; @@ -988,18 +1022,16 @@ comp :: macro (i: $I/Iterable, value: Code) => #this_package.comp(#this_package.as_iter(i), value); -#doc """ - Using the polymorph solving system, you can write type - free versions of arbitrary iterators. This is used - heavily by many of the functions defined above. - - Maybe at some point an alternate allocator would be good - for this? For now, I think the temporary allocator is sufficient. -""" +/// Using the polymorph solving system, you can write type +/// free versions of arbitrary iterators. This is used +/// heavily by many of the functions defined above. +/// +/// Maybe at some point an alternate allocator would be good +/// for this? For now, I think the temporary allocator is sufficient. generator :: #match #local {} #overload -generator :: (ctx: &$Ctx, gen: (&Ctx) -> ($T, bool)) -> Iterator(T) { +generator :: (ctx: &$Ctx, gen: (&Ctx) -> ? $T) -> Iterator(T) { v := raw_alloc(context.temp_allocator, sizeof Ctx); core.memory.copy(v, ctx, sizeof Ctx); @@ -1010,7 +1042,7 @@ generator :: (ctx: &$Ctx, gen: (&Ctx) -> ($T, bool)) -> Iterator(T) { } #overload -generator :: (ctx: &$Ctx, gen: (&Ctx) -> ($T, bool), close: (&Ctx) -> void) -> Iterator(T) { +generator :: (ctx: &$Ctx, gen: (&Ctx) -> ? $T, close: (&Ctx) -> void) -> Iterator(T) { v := raw_alloc(context.temp_allocator, sizeof Ctx); core.memory.copy(v, ctx, sizeof Ctx); @@ -1024,14 +1056,55 @@ generator :: (ctx: &$Ctx, gen: (&Ctx) -> ($T, bool), close: (&Ctx) -> void) -> I generator_no_copy :: #match #local {} #overload -generator_no_copy :: (ctx: &$Ctx, gen: (&Ctx) -> ($T, bool)) => +generator_no_copy :: (ctx: &$Ctx, gen: (&Ctx) -> ? $T) => Iterator(T).{ ctx, gen } #overload -generator_no_copy :: (ctx: &$Ctx, gen: (&Ctx) -> ($T, bool), close: (&Ctx) -> void) => +generator_no_copy :: (ctx: &$Ctx, gen: (&Ctx) -> ? $T, close: (&Ctx) -> void) => Iterator(T).{ ctx, gen, close } +/// Groups like elements together using the provided comparison function. +/// `cmp` should return `true` if the two elements are equal. +/// The items should be sorted in such a way that the equal items appear next to each other. +group_by :: (it: Iterator($T), cmp: (T, T) -> bool) -> Iterator(Pair(T, Iterator(T))) { + return generator( + &.{ outer_iter = it, cmp = cmp, key_item = next(it), yielded_key = false } + + (ctx: &$Ctx) -> ? Pair(T, Iterator(T)) { + if !ctx.key_item do return .None + + ctx.yielded_key = false + + return Pair.make( + ctx.key_item->unwrap(), + generator_no_copy(ctx, ctx => { + if !ctx.yielded_key { + ctx.yielded_key = true + return ctx.key_item + } + + switch next(ctx.outer_iter) { + case .None { + ctx.key_item = .None + return .None + } + case .Some as next_item { + if ctx.cmp(ctx.key_item->unwrap(), next_item) { + return next_item + } + + ctx.key_item = next_item + return .None + } + } + }) + ) + } + ) +} + + #if runtime.Multi_Threading_Enabled { #local sync :: core.sync @@ -1053,16 +1126,15 @@ generator_no_copy :: (ctx: &$Ctx, gen: (&Ctx) -> ($T, bool), close: (&Ctx) -> vo ended := false; } - next :: (use c: &Context($T)) -> (T, bool) { - if ended do return .{}, false; + next :: (use c: &Context($T)) -> ? T { + if ended do return .None; sync.scoped_mutex(&mutex); - if v, success := take_one(iterator); !success { + v := iterator.next(iterator.data); + if !v { ended = true; - return v, false; - } else { - return v, true; } + return v; } close :: (use c: &Context($T)) { @@ -1082,14 +1154,12 @@ generator_no_copy :: (ctx: &$Ctx, gen: (&Ctx) -> ($T, bool), close: (&Ctx) -> vo return .{c, #solidify next {T=it.Iter_Type}, #solidify close {T=it.Iter_Type}}; } - #doc """ - Allows you to easily write a parallelized for-loop over an iterator. - For example, - - iter.parallel_for(1 .. 100, 4, &.{}) { - printf("Thread {} has {}!\n", context.thread_id, it); - } - """ + /// Allows you to easily write a parallelized for-loop over an iterator. + /// For example, + /// + /// iter.parallel_for(1 .. 100, 4, &.{}) { + /// printf("Thread {} has {}!\n", context.thread_id, it); + /// } parallel_for :: #match #local {} #overload diff --git a/core/container/list.onyx b/core/container/list.onyx index 0e8d9bb7d..ec091de0b 100644 --- a/core/container/list.onyx +++ b/core/container/list.onyx @@ -16,19 +16,17 @@ List :: struct (Elem_Type: type_expr) { last: &ListElem(Elem_Type) = null; } -#inject List { - free :: free - push_end :: push_end - push_begin :: push_begin - pop_end :: pop_end - pop_begin :: pop_begin - count :: count - at :: at - contains :: contains - fold :: fold - map :: map - as_iter :: as_iter -} +List.free :: free +List.push_end :: push_end +List.push_begin :: push_begin +List.pop_end :: pop_end +List.pop_begin :: pop_begin +List.count :: count +List.at :: at +List.contains :: contains +List.fold :: fold +List.map :: map +List.as_iter :: as_iter make :: ($T: type_expr, allocator := context.allocator) -> List(T) { return .{ allocator = allocator }; @@ -201,10 +199,10 @@ as_iter :: (list: &List) => core.iter.generator(&.{current = list.first}, (ctx) => { if ctx.current != null { defer ctx.current = ctx.current.next; - return ctx.current.data, true; + return Optional.make(ctx.current.data); } - return .{}, false; + return .None; }); #overload diff --git a/core/container/map.onyx b/core/container/map.onyx index ca456859f..d5a05d1a1 100644 --- a/core/container/map.onyx +++ b/core/container/map.onyx @@ -1,7 +1,6 @@ package core.map use core -use core.array use core.hash use core.memory use core.math @@ -10,11 +9,9 @@ use core.conv use core {Optional} use core.intrinsics.onyx { __initialize } -#doc """ - Map is a generic hash-map implementation that uses chaining. - Values can be of any type. Keys must of a type that supports - the core.hash.hash, and the '==' operator. -""" +/// Map is a generic hash-map implementation that uses chaining. +/// Values can be of any type. Keys must of a type that supports +/// the core.hash.hash, and the '==' operator. @conv.Custom_Format.{ #solidify format_map {K=Key_Type, V=Value_Type} } Map :: struct (Key_Type: type_expr, Value_Type: type_expr) where ValidKey(Key_Type) { allocator : Allocator; @@ -41,85 +38,72 @@ Map :: struct (Key_Type: type_expr, Value_Type: type_expr) where ValidKey(Key_Ty { t == t } -> bool; } -#inject Map { - init :: init - has :: has - get :: get - get_ptr :: get_ptr - get_opt :: get_opt - get_ptr_or_create :: get_ptr_or_create - put :: put - delete :: delete - update :: update - clear :: clear - empty :: empty - literal :: literal - as_iter :: as_iter -} -#inject builtin { - Map :: Map; -} +builtin.Map :: Map -#doc """ - Allows for creation of a Map using make(). - - m := make(Map(str, i32)); -""" +/// Allows for creation of a Map using make(). +/// +/// m := make(Map(str, i32)); #overload __make_overload :: macro (x: &Map($K, $V), allocator := context.allocator) => - #this_package.make(K, V, allocator); + #this_package.Map.make(K, V, allocator); -#doc """ - Creates and initializes a new map using the types provided. -""" -make :: macro ($Key: type_expr, $Value: type_expr, allocator := context.allocator) -> Map(Key, Value) { +/// Creates and initializes a new map using the types provided. +Map.make :: macro ($Key: type_expr, $Value: type_expr, allocator := context.allocator) -> Map(Key, Value) { map : Map(Key, Value); - #this_package.init(&map, allocator); + #this_package.Map.init(&map, allocator); return map; } -#doc "Initializes a map." -init :: (map: &Map($K, $V), allocator := context.allocator) { +/// Initializes a map. +Map.init :: (map: &Map($K, $V), allocator := context.allocator) { __initialize(map); map.allocator = allocator; map.hashes = builtin.make([] u32, 8, allocator=allocator); - array.fill(map.hashes, -1); + Array.fill(map.hashes, -1); - array.init(&map.entries, allocator=allocator); + Array.init(&map.entries, allocator=allocator); } -#doc "Allows for deletion of a Map using `delete(&map)`." -#match builtin.delete core.map.free +// Allows for deletion of a Map using `delete(&map)`. +#overload +builtin.delete :: Map.free -#doc """ - Destroys a map and frees all memory. -""" -free :: (use map: &Map) { - if hashes.data != null do memory.free_slice(&hashes, allocator=allocator); - if entries.data != null do array.free(&entries); +/// Destroys a map and frees all memory. +Map.free :: (use map: &Map) { + if hashes.data != null do Slice.free(&hashes, allocator=allocator); + if entries.data != null do Array.free(&entries); } -#doc """ - Shallow copies a map using the allocator provided if one is provided, or the allocator on the old map otherwise. -""" -copy :: (oldMap: &Map, allocator: ? Allocator = .None) -> Map(oldMap.Key_Type, oldMap.Value_Type) { +/// Shallow copies a map using the allocator provided if one is provided, or the allocator on the old map otherwise. +Map.copy :: #match #local {} + +#overload +Map.copy :: (oldMap: &Map, allocator: ? Allocator = .None) -> Map(oldMap.Key_Type, oldMap.Value_Type) { newMap: typeof *oldMap; newMap.allocator = allocator ?? oldMap.allocator; - newMap.hashes = array.copy(oldMap.hashes, newMap.allocator); - newMap.entries = array.copy(&oldMap.entries, newMap.allocator); + newMap.hashes = Array.copy(oldMap.hashes, newMap.allocator); + newMap.entries = Array.copy(&oldMap.entries, newMap.allocator); return newMap; } -#doc """ - Sets the value at the specified key, or creates a new entry - if the key was not already present. -""" -put :: (use map: &Map, key: map.Key_Type, value: map.Value_Type) { +#overload +Map.copy :: (oldMap: Map, allocator: ? Allocator = .None) -> Map(oldMap.Key_Type, oldMap.Value_Type) { + newMap: typeof oldMap + newMap.allocator = allocator ?? oldMap.allocator + newMap.hashes = Array.copy(oldMap.hashes, newMap.allocator) + newMap.entries = Array.copy(&oldMap.entries, newMap.allocator) + + return newMap +} + +/// Sets the value at the specified key, or creates a new entry +/// if the key was not already present. +Map.put :: (use map: &Map, key: map.Key_Type, value: map.Value_Type) { lr := lookup(map, key); if lr.entry_index >= 0 { @@ -133,45 +117,34 @@ put :: (use map: &Map, key: map.Key_Type, value: map.Value_Type) { if full(map) do grow(map); } -#doc """ - Returns true if the map contains the key. -""" -has :: (use map: &Map, key: map.Key_Type) -> bool { +/// Returns true if the map contains the key. +Map.has :: (use map: &Map, key: map.Key_Type) -> bool { lr := lookup(map, key); return lr.entry_index >= 0; } -#doc """ - Returns the value at the specified key, or map.default_value if - the key is not present. - - This is subject to change with the addition of Optional to the - standard library. -""" -get :: (use map: &Map, key: map.Key_Type) -> ? map.Value_Type { +/// Returns the value at the specified key, or `.None` if the value +/// is not present +Map.get :: (use map: &Map, key: map.Key_Type) -> ? map.Value_Type { lr := lookup(map, key); if lr.entry_index >= 0 do return entries[lr.entry_index].value; return .{}; } -#doc """ - Returns a pointer to the value at the specified key, or null if - the key is not present. -""" -get_ptr :: (use map: &Map, key: map.Key_Type) -> &map.Value_Type { +/// Returns a pointer to the value at the specified key, or null if +/// the key is not present. +Map.get_ptr :: (use map: &Map, key: map.Key_Type) -> &map.Value_Type { lr := lookup(map, key); if lr.entry_index >= 0 do return &entries[lr.entry_index].value; return null; } -#doc """ - Returns a pointer to the value at the specified key. If the key - is not in the map, a new value is created and inserted, then the - pointer to that value is returned. -""" -get_ptr_or_create :: (use map: &Map, key: map.Key_Type) -> &map.Value_Type { +/// Returns a pointer to the value at the specified key. If the key +/// is not in the map, a new value is created and inserted, then the +/// pointer to that value is returned. +Map.get_ptr_or_create :: (use map: &Map, key: map.Key_Type) -> &map.Value_Type { lr := lookup(map, key); if lr.entry_index < 0 { put(map, key, .{}); @@ -181,22 +154,20 @@ get_ptr_or_create :: (use map: &Map, key: map.Key_Type) -> &map.Value_Type { return &entries[lr.entry_index].value; } -#doc """ - **DEPRECATED** - Use `map.get` instead. - - Returns an Optional of the value at the specified key. The Optional - has a value if the key is present, otherwise the optional does not - have a value. -""" -get_opt :: (use map: &Map, key: map.Key_Type) -> ?map.Value_Type { +/// **DEPRECATED** - Use `map.get` instead. +/// +/// Returns an Optional of the value at the specified key. The Optional +/// has a value if the key is present, otherwise the optional does not +/// have a value. +Map.get_opt :: (use map: &Map, key: map.Key_Type) -> ?map.Value_Type { lr := lookup(map, key); if lr.entry_index >= 0 do return Optional.make(entries[lr.entry_index].value); return .{}; } -#doc "Removes an entry from the map." -delete :: (use map: &Map, key: map.Key_Type) { +/// Removes an entry from the map. +Map.delete :: (use map: &Map, key: map.Key_Type) { lr := lookup(map, key); if lr.entry_index < 0 do return; @@ -204,30 +175,28 @@ delete :: (use map: &Map, key: map.Key_Type) { else do entries[lr.entry_prev].next = entries[lr.entry_index].next; if lr.entry_index == entries.count - 1 { - array.pop(&entries); + Array.pop(&entries); return; } - array.fast_delete(&entries, lr.entry_index); + Array.fast_delete(&entries, lr.entry_index); last := lookup(map, entries[lr.entry_index].key); if last.entry_prev >= 0 do entries[last.entry_prev].next = lr.entry_index; else do hashes[last.hash_index] = lr.entry_index; } -#doc """ -Helper macro that finds a value by the key, and if it exists, -runs the code, providing an `it` variable that is a pointer -to the value. - - m: Map(str, i32); - m->update("test") { - *it += 10; - } -or: - m->update("test", [v](*v += 10)); -""" -update :: macro (map: ^Map, key: map.Key_Type, body: Code) { +/// Helper macro that finds a value by the key, and if it exists, +/// runs the code, providing an `it` variable that is a pointer +/// to the value. +/// +/// m: Map(str, i32); +/// m->update("test") { +/// *it += 10; +/// } +/// or: +/// m->update("test", [v](*v += 10)); +Map.update :: macro (map: ^Map, key: map.Key_Type, body: Code) { lookup_ :: lookup lr := lookup_(map, key); @@ -237,25 +206,21 @@ update :: macro (map: ^Map, key: map.Key_Type, body: Code) { } } -#doc """ - Removes all entries from the hash map. Does NOT - modify memory, so be wary of dangling pointers! -""" -clear :: (use map: &Map) { +/// Removes all entries from the hash map. Does NOT +/// modify memory, so be wary of dangling pointers! +Map.clear :: (use map: &Map) { for i in 0 .. hashes.count do hashes.data[i] = -1; entries.count = 0; } -#doc "Returns if the map does not contain any elements." -empty :: (use map: &Map) -> bool { +/// Returns if the map does not contain any elements. +Map.empty :: (use map: &Map) -> bool { return entries.count == 0; } -#doc """ - Helper procedure to nicely format a Map when printing. - Rarely ever called directly, instead used by conv.format_any. -""" -format_map :: (output: &conv.Format_Output, format: &conv.Format, x: &Map($K, $V)) { +/// Helper procedure to nicely format a Map when printing. +/// Rarely ever called directly, instead used by conv.format_any. +Map.format_map :: (output: &conv.Format_Output, format: &conv.Format, x: &Map($K, $V)) { if format.pretty_printing { output->write("{\n"); for& x.entries { @@ -273,15 +238,13 @@ format_map :: (output: &conv.Format_Output, format: &conv.Format, x: &Map($K, $V } } -#doc """ - Quickly create a Map with some entries. - - Map.literal(str, i32, .[ - .{ "test", 123 }, - .{ "foo", 456 }, - ]); -""" -literal :: ($Key: type_expr, $Value: type_expr, values: [] MapLiteralValue(Key, Value)) => { +/// Quickly create a Map with some entries. +/// +/// Map.literal(str, i32, .[ +/// .{ "test", 123 }, +/// .{ "foo", 456 }, +/// ]); +Map.literal :: ($Key: type_expr, $Value: type_expr, values: [] MapLiteralValue(Key, Value)) => { m := core.map.make(Key, Value); for & values { m->put(it.key, it.value); @@ -296,30 +259,28 @@ MapLiteralValue :: struct (K: type_expr, V: type_expr) { value: V; } -#doc """ - Produces an iterator that yields all values of the map, - in an unspecified order, as Map is unordered. -""" -as_iter :: (m: &Map) => +/// Produces an iterator that yields all values of the map, +/// in an unspecified order, as Map is unordered. +Map.as_iter :: (m: &Map) => core.iter.generator( &.{ m = m, i = 0 }, ctx => { if ctx.i < ctx.m.entries.count { defer ctx.i += 1; - return &ctx.m.entries.data[ctx.i], true; + return Optional.make(&ctx.m.entries.data[ctx.i]); } - return .{}, false; + return .None; }); // // Helper operator overloads for accessing values, accessing // values by pointer, and setting values. -#operator [] macro (map: Map($K, $V), key: K) -> ?V { return #this_package.get(&map, key); } -#operator &[] macro (map: Map($K, $V), key: K) -> &V { return #this_package.get_ptr(&map, key); } -#operator []= macro (map: Map($K, $V), key: K, value: V) { #this_package.put(&map, key, value); } +#operator [] macro (map: Map($K, $V), key: K) -> ?V { return #this_package.Map.get(&map, key); } +#operator &[] macro (map: Map($K, $V), key: K) -> &V { return #this_package.Map.get_ptr(&map, key); } +#operator []= macro (map: Map($K, $V), key: K, value: V) { #this_package.Map.put(&map, key, value); } // // Private symbols @@ -369,17 +330,34 @@ as_iter :: (m: &Map) => rehash :: (use map: &Map, new_size: i32) { memory.free_slice(&hashes, allocator); hashes = builtin.make([] u32, new_size, allocator=allocator); - array.fill(hashes, -1); - - for &entry in entries do entry.next = -1; - - index := 0; - for &entry in entries { - defer index += 1; + Array.fill(hashes, -1); + for &entry, index in entries { hash_index := entry.hash % hashes.count; entries[index].next = hashes[hash_index]; hashes[hash_index] = index; } } } + +// +// Everything below here only exists for backwards compatibility. +// + +make :: Map.make +init :: Map.init +free :: Map.free +copy :: Map.copy +has :: Map.has +get :: Map.get +get_ptr :: Map.get_ptr +get_opt :: Map.get_opt +get_ptr_or_create :: Map.get_ptr_or_create +put :: Map.put +delete :: Map.delete +update :: Map.update +clear :: Map.clear +empty :: Map.empty +literal :: Map.literal +as_iter :: Map.as_iter + diff --git a/core/container/optional.onyx b/core/container/optional.onyx index b6133e736..eac0e7eb8 100644 --- a/core/container/optional.onyx +++ b/core/container/optional.onyx @@ -13,247 +13,194 @@ use core // used by Map and Set in their `get_opt` function. In theory, it should // be used in many more places, instead of returning `.{}`. -#inject Optional { - #doc """ - Helper procedure for creating an Optional with a value. - Pass a type as the first argument to force the type, otherwise - the type will be inferred from the parameter type. - """ - make :: #match #locked { - ((x: $T) => (?T).{ Some = x }), - ($T: type_expr, x: T) => ((?T).{ Some = x }) - } +/// Helper procedure for creating an Optional with a value. +/// Pass a type as the first argument to force the type, otherwise +/// the type will be inferred from the parameter type. +Optional.make :: #match #locked { + ((x: $T) => (?T).{ Some = x }), + ($T: type_expr, x: T) => ((?T).{ Some = x }) +} - #doc """ - Create an empty Optional of a certain type. This procedure - is mostly useless, because you can use `.{}` in type inferred - places to avoid having to specify the type. - """ - empty :: macro (T: type_expr) => (?T).{ None = .{} }; - - #doc """ - Converts a pointer to an optional by defining `null` to be `None`, - and a non-null pointer to be `Some`. This dereferences the valid - pointer to return the data stored at the pointer's address. - """ - from_ptr :: macro (p: &$T) -> ?T { - p_ := p; - if p_ do return *p_; - return .None; - } +/// Create an empty Optional of a certain type. This procedure +/// is mostly useless, because you can use `.{}` in type inferred +/// places to avoid having to specify the type. +Optional.empty :: macro (T: type_expr) => (?T).{ None = .{} }; + +/// Converts a pointer to an optional by defining `null` to be `None`, +/// and a non-null pointer to be `Some`. This dereferences the valid +/// pointer to return the data stored at the pointer's address. +Optional.from_ptr :: macro (p: &$T) -> ?T { + p_ := p; + if p_ do return *p_; + return .None; +} - #doc """ - Extracts the value from the Optional, or uses a default if - no value is present. - """ - value_or :: (o: ?$T, default: T) => switch o { - case .Some as v => v; - case #default => default; - } +/// Wraps a pointer in an optional. If the pointer is null, then the optional +/// is None. If the pointer is non-null, then the optional is Some. +Optional.wrap_ptr :: macro (p: &$T) -> ?&T { + p_ := p + if p_ do return p_ + return .None +} - #doc "Clears the value in the Optional, zeroing the memory of the value." - reset :: (o: &?$T) { - *o = .None; - } +/// Extracts the value from the Optional, or uses a default if +/// no value is present. +Optional.value_or :: macro (o: ?$T, default: T) => switch o { + case .Some as v => v; + case _ => default; +} - #doc "Sets the value in the Optional." - set :: (o: &?$T, value: T) { - *o = .{ Some = value }; - } +/// Clears the value in the Optional, zeroing the memory of the value. +Optional.reset :: (o: &?$T) { + *o = .None; +} - #doc "Flattens nested optionals." - // @Bug should be able to say ? ? $T here. - flatten :: (o1: ? Optional($T)) -> ? T { - switch o1 { - case .Some as o2 { - return o2; - } +/// Sets the value in the Optional. +Optional.set :: (o: &?$T, value: T) { + *o = .{ Some = value }; +} - case .None --- +/// Flattens nested optionals. +// @Bug should be able to say ? ? $T here. +Optional.flatten :: (o1: ? Optional($T)) -> ? T { + switch o1 { + case .Some as o2 { + return o2; } - return .None; + case .None --- } - #doc "Monadic chaining operation." - and_then :: (o: ?$T, transform: (T) -> ?$R) -> ?R { - return switch o { - case .Some as v => transform(v); - case #default => .None; - }; - } + return .None; +} - #doc "Changes the value inside the optional, if present." - transform :: (o: ?$T, transform: (T) -> $R) -> ?R { - switch o { - case .Some as v do return .{ Some = transform(v) }; - case #default do return .None; - } - } +/// Monadic chaining operation. +Optional.and_then :: (o: ?$T, transform: (T) -> ?$R) -> ?R { + return switch o { + case .Some as v => transform(v); + case _ => .None; + }; +} - #doc """ - Like `value_or`, but instead of providing a value, you - provide a function to generate a value. - """ - or_else :: (o: ?$T, generate: () -> ?T) -> ?T { - return switch o { - case .Some => o; - case #default => generate(); - }; +/// Changes the value inside the optional, if present. +Optional.transform :: (o: ?$T, transform: (T) -> $R) -> ?R { + switch o { + case .Some as v do return .{ Some = transform(v) }; + case _ do return .None; } +} - #doc """ - Returns the value inside the optional, if there is one. - If not, an assertion is thrown and the context's assert - handler must take care of it. - """ - unwrap :: (o: ?$T) -> T { - switch o { - case .Some as v do return v; - case #default { - assert(false, "Unwrapping empty Optional."); - } - } - } +/// Like `value_or`, but instead of providing a value, you +/// provide a function to generate a value. +Optional.or_else :: (o: ?$T, generate: () -> ?T) -> ?T { + return switch o { + case .Some => o; + case _ => generate(); + }; +} - #doc """ - Returns a pointer to the value inside the optional, if there is one. - If not, an assertion is thrown and the context's assert handler must - take care of it. - """ - unwrap_ptr :: (o: & ?$T) -> &T { - switch o { - case .Some as &v do return v; - case #default { - assert(false, "Unwrapping empty Optional."); - } +/// Returns the value inside the optional, if there is one. +/// If not, an assertion is thrown and the context's assert +/// handler must take care of it. +Optional.unwrap :: (o: ?$T) -> T { + switch o { + case .Some as v do return v; + case _ { + panic("Unwrapping empty Optional."); + return .{}; } } +} - #doc """ - Returns the value inside the optional, if there is one. - If not, an assertion is thrown and the context's assert - handler must take care of it. - """ - expect :: (o: ?$T, message: str) -> T { - switch o { - case .Some as v do return v; - case #default { - assert(false, message); - } +/// Returns a pointer to the value inside the optional, if there is one. +/// If not, an assertion is thrown and the context's assert handler must +/// take care of it. +Optional.unwrap_ptr :: (o: & ?$T) -> &T { + switch o { + case .Some as &v do return v; + case _ { + panic("Unwrapping empty Optional."); + return .{}; } } +} - #doc """ - Returns a pointer to the value inside the optional, if there is one. - If not, an assertion is thrown and the context's assert handler must - take care of it. - """ - expect_ptr :: (o: & ?$T, message: str) -> &T { - switch o { - case .Some as &v do return v; - case #default { - assert(false, message); - } +/// Returns the value inside the optional, if there is one. +/// If not, an assertion is thrown and the context's assert +/// handler must take care of it. +Optional.expect :: (o: ?$T, message: str) -> T { + switch o { + case .Some as v do return v; + case _ { + panic(message); + return .{}; } } +} - or_return :: #match { - macro (o: ?$T) -> T { - switch value := o; value { - case .Some as v do return v; - case #default { - return return .{}; - } - } - }, - macro (o: ?$T, return_value: $R) -> T { - switch value := o; value { - case .Some as v do return v; - case #default { - return return return_value; - } - } - }, +/// Returns a pointer to the value inside the optional, if there is one. +/// If not, an assertion is thrown and the context's assert handler must +/// take care of it. +Optional.expect_ptr :: (o: & ?$T, message: str) -> &T { + switch o { + case .Some as &v do return v; + case _ { + panic(message); + return .{}; + } } +} - catch :: macro (o: ?$T, body: Code) -> T { +Optional.or_return :: #match { + macro (o: ?$T) -> T { switch value := o; value { case .Some as v do return v; - case .None { - #unquote body; + case _ { + return return .{}; } } - } - - with :: macro (o: ?$T, body: Code) { - switch o { - case .None ---; - case .Some as it { - #unquote body(it); + }, + macro (o: ?$T, return_value: $R) -> T { + switch value := o; value { + case .Some as v do return v; + case _ { + return return return_value; } } - } - - #doc """ - Creates a scope that the `?` operator on an Optional type can - return to, instead of returning from the enclosing function. + }, +} - Useful when chaining a bunch of operations that *could* fail, - while having a clean and easy escape hatch. +Optional.into_result :: macro (o: ?$T, err: $R) -> Result(T, R) { + switch value := o; value { + case .Some as v do return .{ Ok = v } + case _ do return .{ Err = err } + } +} - Optional.try() { - x := operation_1()?; - y := operation_2(x)?; - z := operation_3(x, y)?; - opreation_4(z); - } - println("Done"); - - In this example, if any of the operations fail, the execution - will cleanly go to `println` statement. - - To know when something returned `None`, you can either use the second - parameter called `catch`, which is simply a block of code to be run. - Or you can use the return result from the function as so: - - // Sadly, cannot use the nicer syntax, `try() { ... }` - completed := Optional.try(#quote { - // ... - }); - """ - try :: macro (body: Code, catch: Code = []{}) -> bool { - // - // Using a 'do'-expression block to introduce a new - // 'return' location. This way, when the code in the `body` - // does a `return return`, it will target this do-block. - _ := do -> u32 { - // - // Insert the body of interest. +Optional.catch :: macro (o: ?$T, body: Code) -> T { + switch value := o; value { + case .Some as v do return v; + case .None { #unquote body; - - // - // If execution makes it here, everything was successfuly - // and no `return return`s were encountered. Return true. - return return true; - }; - - #unquote catch; - - // - // If execution makes it here, there was an unexpected early - // return. Return false to signal the caller of this. - return false; + } } +} - - hash :: (o: ?$T/core.hash.Hashable) => switch o { - case .Some as v => core.hash.hash(v); - case #default => 0; +Optional.with :: macro (o: ?$T, body: Code) { + switch o { + case .None ---; + case .Some as it { + #unquote body(it); + } } } -#operator == (o1, o2: ?$T) -> bool { +Optional.hash :: (o: ?$T/core.hash.Hashable) => switch o { + case .Some as v => core.hash.hash(v); + case _ => 0; +} + +#operator== :: (o1, o2: ?$T) -> bool { if cast(Optional(T).tag_enum, o1) != cast(Optional(T).tag_enum, o2) do return false; if o1.tag == .None do return true; @@ -262,29 +209,31 @@ use core return v1 == v2; } -#operator ?? macro (opt: ?$T, default: T) -> T { +#operator?? :: macro (opt: ?$T, default: T) -> T { return switch value := opt; value { case .Some as v => v; - case #default => default; + case _ => default; }; } -#operator ?? macro (opt: ?$T, catch: Code) -> T { +#operator?? :: macro (opt: ?$T, catch: Code) -> T { switch value := opt; value { case .Some as v do return v; - case #default --- + case _ --- } #unquote catch; } -#operator ? macro (opt: ?$T) -> T { +#operator? :: macro (opt: ?$T) -> T { switch value := opt; value { case .Some as v do return v; - case #default do return return .{}; + case _ do return #from_proc .{}; } } +#operator! :: macro (o: ? $T) => o->unwrap() + #overload __implicit_bool_cast :: macro (o: ?$T) => cast(Optional(T).tag_enum, o) == .Some; diff --git a/core/container/pair.onyx b/core/container/pair.onyx index e79402d88..76f9e860d 100644 --- a/core/container/pair.onyx +++ b/core/container/pair.onyx @@ -1,26 +1,22 @@ package core -#doc """ - A `Pair` represents a pair of values of heterogenous types. - This structure does not do much on its own; however, it - is useful because provides overloads for formatting, hashing - and equality. This means you can use a `Pair(T, R)` as a key - for a Map or Set out of the box, provided T and R are hashable - and equatable. -""" +/// A `Pair` represents a pair of values of heterogenous types. +/// This structure does not do much on its own; however, it +/// is useful because provides overloads for formatting, hashing +/// and equality. This means you can use a `Pair(T, R)` as a key +/// for a Map or Set out of the box, provided T and R are hashable +/// and equatable. @conv.Custom_Format.{#solidify _format {First_Type=First_Type, Second_Type=Second_Type}} Pair :: struct (First_Type: type_expr, Second_Type: type_expr) { first: First_Type; second: Second_Type; } -#inject Pair { - make :: macro (x: $X, y: $Y) => #this_package.Pair(X, Y).{x, y}; +Pair.make :: macro (x: $X, y: $Y) => #this_package.Pair(X, Y).{x, y}; - _format :: (output: &conv.Format_Output, format: &conv.Format, p: &Pair($First_Type, $Second_Type)) { - conv.format(output, "({}, {})", p.first, p.second); - } +Pair._format :: (output: &conv.Format_Output, format: &conv.Format, p: &Pair($First_Type, $Second_Type)) { + conv.format(output, "({}, {})", p.first, p.second); } #overload diff --git a/core/container/result.onyx b/core/container/result.onyx index ac3426843..5d8bcefa0 100644 --- a/core/container/result.onyx +++ b/core/container/result.onyx @@ -13,165 +13,156 @@ use core.conv use core {Optional} -#doc """ - Result(T, E) is a structure that represents either an Ok value - of type T, or an Err value of type E. `status` contains either - .Ok, or .Err depending on which is currently held. -""" +/// Result(T, E) is a structure that represents either an Ok value +/// of type T, or an Err value of type E. `status` contains either +/// .Ok, or .Err depending on which is currently held. Result :: union (Ok_Type: type_expr, Err_Type: type_expr) { Err: Err_Type; Ok: Ok_Type; } -#inject Result { - #doc "Returns true if the result contains an Ok value." - is_ok :: (r: #Self) -> bool { - return switch r { - case .Ok => true; - case #default => false; - }; - } +/// Returns true if the result contains an Ok value. +Result.is_ok :: (r: #Self) -> bool { + return switch r { + case .Ok => true; + case _ => false; + }; +} - #doc "Returns true if the result contains an Err value." - is_err :: (r: #Self) -> bool { - return switch r { - case .Err => true; - case #default => false; - }; - } +/// Returns true if the result contains an Err value. +Result.is_err :: (r: #Self) -> bool { + return switch r { + case .Err => true; + case _ => false; + }; +} - #doc "Returns an Optional of the Ok type." - ok :: (r: #Self) -> Optional(r.Ok_Type) { - return switch r { - case .Ok as v => Optional.make(v); - case #default => .{}; - }; - } +/// Returns an Optional of the Ok type. +Result.ok :: (r: #Self) -> Optional(r.Ok_Type) { + return switch r { + case .Ok as v => Optional.make(v); + case _ => .{}; + }; +} - #doc "Returns an Optional of the Err type." - err :: (r: #Self) -> Optional(r.Err_Type) { - return switch r { - case .Err as v => Optional.make(v); - case #default => .{}; - }; - } +/// Returns an Optional of the Err type. +Result.err :: (r: #Self) -> Optional(r.Err_Type) { + return switch r { + case .Err as v => Optional.make(v); + case _ => .{}; + }; +} - #doc """ - Forcefully extracts the Ok value out of the Result. If the - result contains an Err, an assertion is thrown. - """ - unwrap :: (r: #Self) -> r.Ok_Type { - switch r { - case .Ok as v do return v; - case .Err as err { - msg := tprintf("Unwrapping Result with error '{}'.", err); - assert(false, msg); - return .{}; - } +/// Forcefully extracts the Ok value out of the Result. If the +/// result contains an Err, an assertion is thrown. +Result.unwrap :: (r: #Self) -> r.Ok_Type { + switch r { + case .Ok as v do return v; + case .Err as err { + msg := tprintf("Unwrapping Result with error '{}'.", err); + panic(msg); + return .{}; } } +} - #doc """ - Tries to extract the Ok value out of the Result. If the - result contains an Err, the empty .{} value is returned. - """ - unwrap_or_default :: (r: #Self) -> r.Ok_Type { - return switch r { - case .Ok as v => v; - case #default => .{}; - }; - } +/// Tries to extract the Ok value out of the Result. If the +/// result contains an Err, the empty .{} value is returned. +Result.unwrap_or_default :: (r: #Self) -> r.Ok_Type { + return switch r { + case .Ok as v => v; + case _ => .{}; + }; +} - #doc """ - Tries to extract the Ok value out of the Result. If the - result contains an Err, a custom assertion message is thrown. - """ - expect :: (r: #Self, msg: str) -> r.Ok_Type { - switch r { - case .Ok as v do return v; - case #default { - assert(false, msg); - } +/// Tries to extract the Ok value out of the Result. If the +/// result contains an Err, a custom assertion message is thrown. +Result.expect :: (r: #Self, msg: str) -> r.Ok_Type { + switch r { + case .Ok as v do return v; + case _ { + panic(msg); + return .{}; } } +} - #doc """ - Returns a new result defined by: - Ok(n) => Ok(f(n)) - Err(e) => Err(e) - """ - transform :: (r: Result($T, $E), f: (T) -> $R) -> Result(R, E) { - return switch r { - case .Ok as v => .{ Ok = f(v) }; - case .Err as v => .{ Err = v }; - }; - } +/// Returns a new result defined by: +/// Ok(n) => Ok(f(n)) +/// Err(e) => Err(e) +Result.transform :: (r: Result($T, $E), f: (T) -> $R) -> Result(R, E) { + return switch r { + case .Ok as v => Result(R, E).{ Ok = f(v) }; + case .Err as e => Result(R, E).{ Err = e }; + }; +} - #doc "Monadic chaining operation." - and_then :: (r: #Self, f: (r.Ok_Type) -> Result($R, r.Err_Type)) -> Result(R, r.Err_Type) { - return switch r { - case .Ok as v => f(v); - case .Err as v => .{ Err = v }; - }; - } +/// Monadic chaining operation. +Result.and_then :: (r: #Self, f: (r.Ok_Type) -> Result($R, r.Err_Type)) -> Result(R, r.Err_Type) { + return switch r { + case .Ok as v => f(v); + case .Err as v => .{ Err = v }; + }; +} + +/// If the Result contains Err, generate is called to make a value +Result.or_else :: (r: #Self, generate: () -> typeof r) => { + return switch r { + case .Ok as v => v; + case _ => generate(); + }; +} - #doc "If the Result contains Err, generate is called to make a value" - or_else :: (r: #Self, generate: () -> typeof r) => { - return switch r { - case .Ok as v => v; - case #default => generate(); - }; +/// If result contains Err, the error is returned from the enclosing +/// procedure. Otherwise, the Ok value is returned. +/// +/// f :: () -> Result(i32, str) { +/// return .{ Err = "Oh no..." }; +/// } +/// +/// g :: () -> Result(str, str) { +/// // This returns from g with the error returned from f. +/// v := f()->forward_err(); +/// println(v); +/// +/// return .{ Ok = "Success!" }; +/// } +Result.forward_err :: macro (r: Result($T, $E)) -> T { + switch res := r; res { + case .Ok as v do return v; + case .Err as v do return return .{ Err = v }; } +} - #doc """ - If result contains Err, the error is returned from the enclosing - procedure. Otherwise, the Ok value is returned. - - f :: () -> Result(i32, str) { - return .{ Err = "Oh no..." }; - } - - g :: () -> Result(str, str) { - // This returns from g with the error returned from f. - v := f()->forward_err(); - println(v); - - return .{ Ok = "Success!" }; - } - """ - forward_err :: macro (r: Result($T, $E)) -> T { - switch res := r; res { - case .Ok as v do return v; - case .Err as v do return return .{ Err = v }; - } +/// If result contains Err, the error is mapped to a new error type. +Result.transform_err :: macro (r: Result($T, $E), f: (E) -> $N) -> Result(T, N) { + switch res := r; res { + case .Ok as v do return .{ Ok = v }; + case .Err as v do return .{ Err = f(v) }; } +} - #doc """ - If result contains Err, the given value is returned from the - enclosing procedure. Otherwise, the Ok value is returned. - """ - or_return :: macro (r: Result($T, $E), v: $V) -> T { - switch res := r; res { - case .Ok as v do return v; - case .Err do return return v; - } +/// If result contains Err, the given value is returned from the +/// enclosing procedure. Otherwise, the Ok value is returned. +Result.or_return :: macro (r: Result($T, $E), v: $V) -> T { + switch res := r; res { + case .Ok as v do return v; + case .Err do return return v; } +} - #doc """ - If result contains Err, the given code is run. This code is - expected to either: - - Return a good value with `return` - - Return an error value with `return return` - - This procedure is subject to change. - """ - catch :: macro (r: Result($T, $E), on_err: Code) -> T { - switch res := r; res { - case .Ok as v do return v; - case .Err as err { - #unquote on_err(err); - } +/// If result contains Err, the given code is run. This code is +/// expected to either: +/// - Return a good value with `return` +/// - Return an error value with `return return` +/// +/// This procedure is subject to change. +Result.catch :: macro (r: Result($T, $E), on_err: Code) -> T { + switch res := r; res { + case .Ok as v do return v; + case .Err as err { + #unquote on_err(err); } } } @@ -179,19 +170,28 @@ Result :: union (Ok_Type: type_expr, Err_Type: type_expr) { #overload __implicit_bool_cast :: macro (r: Result($O, $E)) => cast(Result(O, E).tag_enum, r) == .Ok; +#operator! :: macro (r: Result($T, $E)) => r->unwrap() -#operator ? macro (r: Result($T, $E)) -> T { +#operator? :: macro (r: Result($T, $E)) -> T { switch res := r; res { case .Ok as v do return v; - case .Err as v do return return .{ Err = v }; + case .Err as v do return #from_proc .{ Err = v }; } } -#operator ?? macro (r: Result($T, $E), v: T) -> T { +#operator?? :: macro (r: Result($T, $E), v: T) -> T { return switch res := r; res { case .Ok as val => val; case .Err => v; }; } +#operator?? :: macro (r: Result($T, $E), handler: Code) -> T { + return switch res := r; res { + case .Ok as val => val; + case .Err as e { + #unquote handler(e) + } + }; +} diff --git a/core/container/set.onyx b/core/container/set.onyx index 47cbf9287..5bc42f82f 100644 --- a/core/container/set.onyx +++ b/core/container/set.onyx @@ -1,7 +1,6 @@ package core.set use core -use core.array use core.hash use core.memory use core.math @@ -28,52 +27,49 @@ Set :: struct (Elem_Type: type_expr) where SetValue(Elem_Type) { } } -#inject Set { - init :: init - free :: free - has :: has - get :: get - get_ptr :: get_ptr - get_opt :: get_opt - insert :: insert - remove :: remove - clear :: clear - empty :: empty - as_iter :: as_iter -} +builtin.Set :: Set -#inject builtin { - Set :: Set; -} -make :: ($T: type_expr, allocator := context.allocator) -> Set(T) { +Set.make :: ($T: type_expr, allocator := context.allocator) -> Set(T) { set : Set(T); - init(&set, allocator=allocator); + Set.init(&set, allocator=allocator); return set; } +Set.from :: (arr: [] $T, allocator := context.allocator) -> Set(T) { + set : Set(T) + Set.init(&set, allocator=allocator) + + for a in arr { + Set.insert(&set, a) + } + + return set +} + #overload -builtin.__make_overload :: macro (x: &Set, allocator: Allocator) => #this_package.make(x.Elem_Type, allocator = allocator); +builtin.__make_overload :: macro (x: &Set, allocator: Allocator) => + #this_package.Set.make(x.Elem_Type, allocator = allocator); -init :: (set: &Set($T), allocator := context.allocator) { +Set.init :: (set: &Set($T), allocator := context.allocator) { set.allocator = allocator; memory.alloc_slice(&set.hashes, 8, allocator=allocator); - array.fill(set.hashes, -1); + Array.fill(set.hashes, -1); - array.init(&set.entries, 4, allocator=allocator); + Array.init(&set.entries, 4, allocator=allocator); } -free :: (use set: &Set) { +Set.free :: (use set: &Set) { memory.free_slice(&hashes, allocator=allocator); - array.free(&entries); + Array.free(&entries); } #overload -builtin.delete :: #this_package.free +builtin.delete :: #this_package.Set.free -insert :: (use set: &Set, value: set.Elem_Type) { - if hashes.data == null do init(set); +Set.insert :: (use set: &Set, value: set.Elem_Type) { + if hashes.data == null do Set.init(set); lr := lookup(set, value); if lr.entry_index >= 0 do return; @@ -85,32 +81,32 @@ insert :: (use set: &Set, value: set.Elem_Type) { } #operator << macro (set: Set($T), value: T) { - #this_package.insert(&set, value); + #this_package.Set.insert(&set, value); } -has :: (use set: &Set, value: set.Elem_Type) -> bool { +Set.has :: (use set: &Set, value: set.Elem_Type) -> bool { lr := lookup(set, value); return lr.entry_index >= 0; } -get :: (use set: &Set, value: set.Elem_Type) -> set.Elem_Type { +Set.get :: (use set: &Set, value: set.Elem_Type) -> set.Elem_Type { lr := lookup(set, value); return entries[lr.entry_index].value if lr.entry_index >= 0 else set.Elem_Type.{}; } -get_ptr :: (use set: &Set, value: set.Elem_Type) -> &set.Elem_Type { +Set.get_ptr :: (use set: &Set, value: set.Elem_Type) -> &set.Elem_Type { lr := lookup(set, value); return (&entries[lr.entry_index].value) if lr.entry_index >= 0 else null; } -get_opt :: (use set: &Set, value: set.Elem_Type) -> ? set.Elem_Type { +Set.get_opt :: (use set: &Set, value: set.Elem_Type) -> ? set.Elem_Type { lr := lookup(set, value); if lr.entry_index >= 0 do entries[lr.entry_index].value; return .{}; } -remove :: (use set: &Set, value: set.Elem_Type) { +Set.remove :: (use set: &Set, value: set.Elem_Type) { lr := lookup(set, value); if lr.entry_index < 0 do return; @@ -118,37 +114,36 @@ remove :: (use set: &Set, value: set.Elem_Type) { else do entries[lr.entry_prev].next = entries[lr.entry_index].next; if lr.entry_index == entries.count - 1 { - array.pop(&entries); + Array.pop(&entries); return; } - array.fast_delete(&entries, lr.entry_index); + Array.fast_delete(&entries, lr.entry_index); last := lookup(set, entries[lr.entry_index].value); if last.entry_prev >= 0 do entries[last.entry_prev].next = lr.entry_index; else do hashes[last.hash_index] = lr.entry_index; } -clear :: (use set: &Set) { - array.fill(hashes, -1); - array.clear(&entries); +Set.clear :: (use set: &Set) { + Array.fill(hashes, -1); + Array.clear(&entries); } -empty :: (use set: &Set) -> bool { +Set.empty :: (use set: &Set) -> bool { return entries.count == 0; } #overload core.iter.as_iter as_iter -as_iter :: (s: &Set) => +Set.as_iter :: (s: &Set) => core.iter.generator( &.{ s = s, i = 0 }, (ctx) => { - if ctx.i >= ctx.s.entries.count { - return (typeof &ctx.s.entries.data[0].value).{}, false; + if ctx.i < ctx.s.entries.count { + defer ctx.i += 1; + return Optional.make(&ctx.s.entries.data[ctx.i].value); } - - defer ctx.i += 1; - return &ctx.s.entries.data[ctx.i].value, true; + return .None; }); // @@ -194,7 +189,7 @@ as_iter :: (s: &Set) => rehash :: (use set: &Set, new_size: i32) { memory.free_slice(&hashes, allocator); hashes = builtin.make([] u32, new_size, allocator=allocator); - array.fill(hashes, -1); + Array.fill(hashes, -1); for &entry in entries do entry.next = -1; @@ -208,3 +203,21 @@ as_iter :: (s: &Set) => } } } + + +// +// Everything below here only exists for backwards compatibility. +// + +make :: Set.make +init :: Set.init +free :: Set.free +has :: Set.has +get :: Set.get +get_ptr :: Set.get_ptr +get_opt :: Set.get_opt +insert :: Set.insert +remove :: Set.remove +clear :: Set.clear +empty :: Set.empty +as_iter :: Set.as_iter diff --git a/core/container/slice.onyx b/core/container/slice.onyx index f0a5d0941..d4996d6a5 100644 --- a/core/container/slice.onyx +++ b/core/container/slice.onyx @@ -12,43 +12,37 @@ use core.memory // } // -#doc """ - Creates a zeroed slice of type `T` and length `length`, using the allocator provided. - - use core {slice, println} - - sl := slice.make(i32, 10); - println(sl); - // 0 0 0 0 0 0 0 0 0 0 -""" -make :: ($T: type_expr, length: u32, allocator := context.allocator) -> [] T { +/// Creates a zeroed slice of type `T` and length `length`, using the allocator provided. +/// +/// use core {slice, println} +/// +/// sl := slice.make(i32, 10); +/// println(sl); +/// // 0 0 0 0 0 0 0 0 0 0 +Slice.make :: ($T: type_expr, length: u32, allocator := context.allocator) -> [] T { data := raw_alloc(allocator, sizeof T * length); memory.set(data, 0, sizeof T * length); return .{ data, length }; } -#doc """ - Initializes a slice of type `T` to have a length of `length`, using the allocator provided. - - use core {slice, println} - - sl: [] i32; - slice.init(&sl, 10); - println(sl); - // 0 0 0 0 0 0 0 0 0 0 -""" -init :: (sl: &[] $T, length: u32, allocator := context.allocator) -> [] T { +/// Initializes a slice of type `T` to have a length of `length`, using the allocator provided. +/// +/// use core {slice, println} +/// +/// sl: [] i32; +/// slice.init(&sl, 10); +/// println(sl); +/// // 0 0 0 0 0 0 0 0 0 0 +Slice.init :: (sl: &[] $T, length: u32, allocator := context.allocator) { sl.count = length; sl.data = raw_alloc(allocator, sizeof T * length); memory.set(sl.data, 0, sizeof T * length); } -#doc """ - Frees the data inside the slice. - The slice is taken by pointer because this procedure also sets the data pointer to `null`, and the length to `0`, - to prevent accidental future use of the slice. -""" -free :: (sl: &[] $T, allocator := context.allocator) { +/// Frees the data inside the slice. +/// The slice is taken by pointer because this procedure also sets the data pointer to `null`, and the length to `0`, +/// to prevent accidental future use of the slice. +Slice.free :: (sl: &[] $T, allocator := context.allocator) { if sl.data == null do return; raw_free(allocator, sl.data); @@ -62,7 +56,7 @@ free :: (sl: &[] $T, allocator := context.allocator) { __make_overload :: macro (_: &[] $T, count: u32, allocator := context.allocator) -> [] T { use core.memory - ret := #this_package.make(T, count, allocator); + ret := Slice.make(T, count, allocator); memory.set(ret.data, 0, sizeof T * count); return ret; } @@ -71,14 +65,12 @@ __make_overload :: macro (_: &[] $T, count: u32, allocator := context.allocator) // Allows for delete(&sl); #overload builtin.delete :: macro (x: &[] $T, allocator := context.allocator) { - #this_package.free(x, allocator); + Slice.free(x, allocator); } -#doc """ - Copies a slice to a new slice, allocated from the provided allocator. -""" -copy :: (sl: [] $T, allocator := context.allocator) -> [] T { +/// Copies a slice to a new slice, allocated from the provided allocator. +Slice.copy :: (sl: [] $T, allocator := context.allocator) -> [] T { data := raw_alloc(allocator, sl.count * sizeof T); memory.copy(data, sl.data, sl.count * sizeof T); @@ -86,20 +78,54 @@ copy :: (sl: [] $T, allocator := context.allocator) -> [] T { } -#doc """ - Moves an element to a new index, ensuring that order of other elements is retained. +/// Creates a new slice and populates by performing the transform function on each element of the existing slice. +Slice.map :: #match #local {} + +#overload +Slice.map :: (sl: [] $T, transform: (T) -> $R, allocator := context.allocator) -> [] R { + new_slice := Slice.make(R, sl.count, allocator) + for v, i in sl do new_slice[i] = transform(v) + return new_slice +} + +#overload +Slice.map :: macro (sl: [] $T, transform: Code, allocator := context.allocator) => { + _s := sl + new_slice := Slice.make(typeof #unquote transform(sl[0]), _s.count, allocator) + for v, i in _s do new_slice[i] = #unquote transform(v) + return new_slice +} - use core {slice, println} - arr := i32.[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; +/// Modifies a slice in-place without allocating any memory and returns the original slice. +Slice.map_inplace :: #match #local {} - // Move element at index 4 to index 8 - slice.transplant(arr, 4, 8); +#overload +Slice.map_inplace :: (sl: [] $T, transform: (T) -> T, allocator := context.allocator) => { + for &v in sl do *v = transform(*v) + return sl +} - println(arr); - // 1 2 3 4 6 7 8 9 5 10 -""" -transplant :: (arr: [] $T, old_index: i32, new_index: i32) -> bool { +#overload +Slice.map_inplace :: macro (sl: [] $T, transform: Code, allocator := context.allocator) => { + _s := sl + for &v in _s do *v = #unquote transform(*v) + return _s +} + + +/// Moves an element to a new index, ensuring that order of other elements is retained. +/// +/// use core {slice, println} +/// +/// arr := i32.[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; +/// +/// // Move element at index 4 to index 8 +/// slice.transplant(arr, 4, 8); +/// +/// println(arr); +/// // 1 2 3 4 6 7 8 9 5 10 +Slice.transplant :: (arr: [] $T, old_index: i32, new_index: i32) -> bool { if old_index < 0 || old_index >= arr.count do return false; if new_index < 0 || new_index >= arr.count do return false; if old_index == new_index do return true; @@ -123,10 +149,8 @@ transplant :: (arr: [] $T, old_index: i32, new_index: i32) -> bool { return true; } -#doc """ - Get an element from a slice, with negative and wrap-around indexing. -""" -get :: (arr: [] $T, idx: i32) -> T { +/// Get an element from a slice, with negative and wrap-around indexing. +Slice.get :: (arr: [] $T, idx: i32) -> T { if arr.count == 0 do return .{}; while idx < 0 do idx += arr.count; @@ -135,10 +159,18 @@ get :: (arr: [] $T, idx: i32) -> T { return arr.data[idx]; } -#doc """ - Get a pointer to an element from a slice, with negative and wrap-around indexing. -""" -get_ptr :: (arr: [] $T, idx: i32) -> &T { +/// Get an element from a slice, with negative and wrap-around indexing. +Slice.get_opt :: (arr: [] $T, idx: i32) -> ? T { + if arr.count == 0 do return .None + + while idx < 0 do idx += arr.count + while idx >= arr.count do idx -= arr.count + + return arr.data[idx] +} + +/// Get a pointer to an element from a slice, with negative and wrap-around indexing. +Slice.get_ptr :: (arr: [] $T, idx: i32) -> &T { if arr.count == 0 do return null; while idx < 0 do idx += arr.count; @@ -147,10 +179,8 @@ get_ptr :: (arr: [] $T, idx: i32) -> &T { return &arr.data[idx]; } -#doc """ - Set a value in a slice, with negative and wrap-around indexing. -""" -set :: (arr: [] $T, idx: i32, value: T) { +/// Set a value in a slice, with negative and wrap-around indexing. +Slice.set :: (arr: [] $T, idx: i32, value: T) { if arr.count == 0 do return; while idx < 0 do idx += arr.count; @@ -159,7 +189,7 @@ set :: (arr: [] $T, idx: i32, value: T) { arr.data[idx] = value; } -contains :: #match #locked { +Slice.contains :: #match #locked { (arr: [] $T, x: T) -> bool { for it in arr do if it == x do return true; return false; @@ -171,50 +201,44 @@ contains :: #match #locked { } } -#doc """ - Tests if the slice is empty. - - Normally this is unneeded, as arrays have a 'truthiness' - that depends on their count. For example, instead of saying: - - if slice.empty(arr) { ... } - - You can simply say: - - if !arr { ... } -""" -empty :: (arr: [] $T) => arr.count == 0; - -#doc "Uses `+` to sum all elements in the slice." -sum :: (arr: [] $T, start: T = 0) -> T { +/// Tests if the slice is empty. +/// +/// Normally this is unneeded, as arrays have a 'truthiness' +/// that depends on their count. For example, instead of saying: +/// +/// if slice.empty(arr) { ... } +/// +/// You can simply say: +/// +/// if !arr { ... } +Slice.empty :: (arr: [] $T) => arr.count == 0; + +/// Uses `+` to sum all elements in the slice. +Slice.sum :: (arr: [] $T, start: T = 0) -> T { sum := start; for it in arr do sum += it; return sum; } -#doc "Uses `*` to multiply all elements in the slice." -product :: (arr: [] $T, start: T = 1) -> T { +/// Uses `*` to multiply all elements in the slice. +Slice.product :: (arr: [] $T, start: T = 1) -> T { prod := start; for it in arr do prod *= it; return prod; } -#doc """ - Uses `+` to add the elements together. - Then use `/ i32` to divide by the number of elements. - Both of these are assumed to work. -""" -average :: (arr: [] $T) -> T { +/// Uses `+` to add the elements together. +/// Then use `/ i32` to divide by the number of elements. +/// Both of these are assumed to work. +Slice.average :: (arr: [] $T) -> T { sum := cast(T) 0; for it in *arr do sum += it; return sum / cast(T) arr.count; } -#doc """ - Reverses a slice in-place. -""" -reverse :: (arr: [] $T) { +/// Reverses a slice in-place. +Slice.reverse :: (arr: [] $T) { for i in arr.count / 2 { tmp := arr[i]; arr[i] = arr[arr.count - 1 - i]; @@ -222,19 +246,17 @@ reverse :: (arr: [] $T) { } } -#doc """ - Simple insertion sort. - - `cmp` should return greater-than 0 if `left > right`. - - Returns the array to be used in '|>' chaining. - - **Not a copy of the slice.** -""" -sort :: #match #local {} +/// Simple insertion sort. +/// +/// `cmp` should return greater-than 0 if `left > right`. +/// +/// Returns the array to be used in '|>' chaining. +/// +/// **Not a copy of the slice.** +Slice.sort :: #match #local {} #overload -sort :: (arr: [] $T, cmp: (T, T) -> i32) -> [] T { +Slice.sort :: (arr: [] $T, cmp: (T, T) -> i32) -> [] T { for i in 1 .. arr.count { x := arr.data[i]; j := i - 1; @@ -257,7 +279,7 @@ sort :: (arr: [] $T, cmp: (T, T) -> i32) -> [] T { } #overload -sort :: (arr: [] $T, cmp: (&T, &T) -> i32) -> [] T { +Slice.sort :: (arr: [] $T, cmp: (&T, &T) -> i32) -> [] T { for i in 1 .. arr.count { j := i; @@ -277,12 +299,10 @@ sort :: (arr: [] $T, cmp: (&T, &T) -> i32) -> [] T { return arr; } -#doc """ - Quicksort a slice. - - `cmp` should return greater-than 0 if `left > right`. -""" -quicksort :: #match #locked { +/// Quicksort a slice. +/// +/// `cmp` should return greater-than 0 if `left > right`. +Slice.quicksort :: #match #locked { (arr: [] $T, cmp: ( T, T) -> i32) => { quicksort_impl(arr, cmp, 0, arr.count - 1); return arr; }, (arr: [] $T, cmp: (&T, &T) -> i32) => { quicksort_impl(arr, cmp, 0, arr.count - 1); return arr; }, } @@ -334,14 +354,12 @@ quicksort :: #match #locked { } } -#doc """ - Shrinks a slice, removing all duplicates of elements, using `==` - to compare elements. - - This assumes that the elements are sorted in some fashion, - such that equal elements would be next to each other. -""" -unique :: (arr: &[] $T) { +/// Shrinks a slice, removing all duplicates of elements, using `==` +/// to compare elements. +/// +/// This assumes that the elements are sorted in some fashion, +/// such that equal elements would be next to each other. +Slice.unique :: (arr: &[] $T) { idx := 0; while i := 0; i < arr.count - 1 { defer i += 1; @@ -360,61 +378,128 @@ unique :: (arr: &[] $T) { } -#doc """ - Reduces a slice down to a single value, using successive calls to `f`, or invokations of the `body`. - - use core {slice, println} - - arr := i32.[1, 2, 3, 4, 5]; - - slice.fold(arr, 0, (it, acc) => it + acc) |> println(); - - // OR - - slice.fold(arr, 0, [it, acc](it + acc)) |> println(); -""" -fold :: #match #local {} +/// Reduces a slice down to a single value, using successive calls to `f`, or invokations of the `body`. +/// +/// use core {slice, println} +/// +/// arr := i32.[1, 2, 3, 4, 5]; +/// +/// slice.fold(arr, 0, (it, acc) => it + acc) |> println(); +/// +/// // OR +/// +/// slice.fold(arr, 0, [it, acc](it + acc)) |> println(); +Slice.fold :: #match #local {} #overload -fold :: (arr: [] $T, init: $R, f: (T, R) -> R) -> R { +Slice.fold :: (arr: [] $T, init: $R, f: (T, R) -> R) -> R { val := init; for it in arr do val = f(it, val); return val; } #overload -fold :: macro (arr: [] $T, init: $R, body: Code) -> R { +Slice.fold :: macro (arr: [] $T, init: $R, body: Code) -> R { acc := init; for it in arr do acc = #unquote body(it, acc); return acc; } -#doc """ - Returns `true` if *every* element in the slice meets the predicate test. -""" -every :: #match #local {} + +Slice.fold1 :: #match #local {} + +#overload +Slice.fold1 :: (arr: [] $T, f: (T, T) -> T) -> ? T { + if arr.count == 0 do return .None + + val := arr[0]; + for it in arr[1 .. arr.count] do val = f(it, val); + return val; +} + +#overload +Slice.fold1 :: macro (arr: [] $T, body: Code) -> ? T { + if arr.count == 0 do return .None + + acc := arr[0]; + for it in arr[1 .. arr.count] do acc = #unquote body(it, acc); + return acc; +} + + +Slice.scan :: #match #local {} + +#overload +Slice.scan :: (arr: [] $T, init: $R, f: (T, R) -> R) -> [] R { + results := builtin.make([] R, arr.length) + acc := init + for it, index in arr { + acc = f(it, acc) + results[index] = acc + } + return results +} + +#overload +Slice.scan :: macro (arr: [] $T, init: $R, body: Code) -> [] R { + results := builtin.make([] R, arr.length) + acc := init + for it, index in arr { + acc = #unquote body(it, acc) + results[index] = acc + } + return results +} + + +Slice.scan1 :: #match #local {} #overload -every :: macro (arr: [] $T, predicate: (T) -> bool) => #this_package.every(arr, [it](predicate(it))); +Slice.scan1 :: (arr: [] $T, f: (T, T) -> T) -> [] T { + if !arr do return .{} + + results := builtin.make([] T, arr.length) + results[0] = arr[0] + for it, index in arr[1 .. arr.length] { + results[index+1] = f(it, results[index]) + } + return results +} #overload -every :: macro (arr: [] $T, predicate_body: Code) -> bool { +Slice.scan1 :: macro (arr: [] $T, body: Code) -> [] T { + if !arr do return .{} + + results := builtin.make([] T, arr.length) + results[0] = arr[0] + for it, index in arr[1 .. arr.length] { + results[index+1] = #unquote body(it, results[index]) + } + return results +} + +/// Returns `true` if *every* element in the slice meets the predicate test. +Slice.every :: #match #local {} + +#overload +Slice.every :: macro (arr: [] $T, predicate: (T) -> bool) => Slice.every(arr, [it](predicate(it))); + +#overload +Slice.every :: macro (arr: [] $T, predicate_body: Code) -> bool { for arr { if !(#unquote predicate_body(it)) do return false; } return true; } -#doc """ - Returns `true` if *at least one* element in the slice meets the predicate test. -""" -some :: #match #local {} +/// Returns `true` if *at least one* element in the slice meets the predicate test. +Slice.some :: #match #local {} #overload -some :: macro (arr: [] $T, predicate: (T) -> bool) => #this_package.some(arr, [it](predicate(it))); +Slice.some :: macro (arr: [] $T, predicate: (T) -> bool) => Slice.some(arr, [it](predicate(it))); #overload -some :: macro (arr: [] $T/type_is_struct, predicate_body: Code) -> bool { +Slice.some :: macro (arr: [] $T/type_is_struct, predicate_body: Code) -> bool { for & arr { if #unquote predicate_body(it) do return true; } @@ -422,36 +507,30 @@ some :: macro (arr: [] $T/type_is_struct, predicate_body: Code) -> bool { } #overload -some :: macro (arr: [] $T, predicate_body: Code) -> bool { +Slice.some :: macro (arr: [] $T, predicate_body: Code) -> bool { for arr { if #unquote predicate_body(it) do return true; } return false; } -#doc """ - Sets all elements in a slice to be `value`. -""" -fill :: (arr: [] $T, value: T) { +/// Sets all elements in a slice to be `value`. +Slice.fill :: (arr: [] $T, value: T) { for i in arr.count { arr[i] = value; } } -#doc """ - Sets all elements in the range to be `value`. -""" -fill_range :: (arr: [] $T, r: range, value: T) { +/// Sets all elements in the range to be `value`. +Slice.fill_range :: (arr: [] $T, r: range, value: T) { for i in r { if i >= arr.count || i < 0 do continue; arr[i] = value; } } -#doc """ - Converts a slice to a linked list. -""" -to_list :: (arr: [] $T, allocator := context.allocator) -> List(T) { +/// Converts a slice to a linked list. +Slice.to_list :: (arr: [] $T, allocator := context.allocator) -> List(T) { new_list := list.make(T, allocator); for &it in arr { @@ -461,15 +540,13 @@ to_list :: (arr: [] $T, allocator := context.allocator) -> List(T) { return new_list; } -#doc """ - Returns the index of the first element that matches the predicate. - - Returns `-1` if no matching element is found. -""" -find :: #match #local {} +/// Returns the index of the first element that matches the predicate. +/// +/// Returns `-1` if no matching element is found. +Slice.find :: #match #local {} #overload -find :: (arr: [] $T, value: T) -> i32 { +Slice.find :: (arr: [] $T, value: T) -> i32 { for i in arr.count { if value == arr.data[i] do return i; } @@ -478,7 +555,7 @@ find :: (arr: [] $T, value: T) -> i32 { } #overload -find :: macro (arr: [] $T/type_is_struct, pred: Code) -> i32 { +Slice.find :: macro (arr: [] $T/type_is_struct, pred: Code) -> i32 { for i in arr.count { it := &arr[i]; if #unquote pred(it) do return i; @@ -488,7 +565,7 @@ find :: macro (arr: [] $T/type_is_struct, pred: Code) -> i32 { } #overload -find :: macro (arr: [] $T, pred: Code) -> i32 { +Slice.find :: macro (arr: [] $T, pred: Code) -> i32 { for i in arr.count { it := arr[i]; if #unquote pred(it) do return i; @@ -497,12 +574,10 @@ find :: macro (arr: [] $T, pred: Code) -> i32 { return -1; } -#doc """ - Returns a pointer to the first element that equals `value`, compared using `==`. - - Returns `null` if no matching element is found. -""" -find_ptr :: (arr: [] $T, value: T) -> &T { +/// Returns a pointer to the first element that equals `value`, compared using `==`. +/// +/// Returns `null` if no matching element is found. +Slice.find_ptr :: (arr: [] $T, value: T) -> &T { for &it in arr { if value == *it do return it; } @@ -510,10 +585,8 @@ find_ptr :: (arr: [] $T, value: T) -> &T { return null; } -#doc """ - -""" -find_opt :: #match { +/// Returns an optional of the first element that the code block evaluates to a truthy value. +Slice.find_opt :: #match { macro (arr: [] $T/type_is_struct, cond: Code) -> ? T { for& it in arr { if #unquote cond(it) { @@ -533,10 +606,9 @@ find_opt :: #match { }, } -first :: #match #locked { +Slice.first :: #match #locked { macro (arr: [] $T, predicate: (T) -> bool) -> &T { - first :: first - return first(arr, [it](predicate(it))); + return Slice.first(arr, [it](predicate(it))); }, macro (arr: [] $T/type_is_struct, predicate_body: Code) -> &T { @@ -560,16 +632,14 @@ first :: #match #locked { } } -#doc """ - Returns the number of elements for which the predicate is true. -""" -count_where :: #match #local {} +/// Returns the number of elements for which the predicate is true. +Slice.count_where :: #match #local {} #overload -count_where :: macro (arr: [] $T, predicate: (T) -> bool) => #this_package.count_where(arr, [it](predicate(it))); +Slice.count_where :: macro (arr: [] $T, predicate: (T) -> bool) => Slice.count_where(arr, [it](predicate(it))); #overload -count_where :: macro (arr: [] $T, predicate_body: Code) -> u32 { +Slice.count_where :: macro (arr: [] $T, predicate_body: Code) -> u32 { count: u32 = 0; for arr { if #unquote predicate_body(it) do count += 1; @@ -578,65 +648,57 @@ count_where :: macro (arr: [] $T, predicate_body: Code) -> u32 { } -#doc """ - Creates an iterator of a sliding window over the elements of the slice, with width `width`. -""" -windows :: (arr: [] $T, width: i32) -> Iterator([] T) { - use core {iter} - - return iter.generator( +/// Creates an iterator of a sliding window over the elements of the slice, with width `width`. +Slice.windows :: (arr: [] $T, width: i32) -> Iterator([] T) { + return Iterator.generator( &.{ arr=arr, width=width, pos=0 }, ctx => { - if ctx.pos + ctx.width < ctx.arr.count { + if ctx.pos + ctx.width <= ctx.arr.count { defer ctx.pos += 1; - return ctx.arr.data[ctx.pos .. ctx.pos+ctx.width], true; + return Optional.make(ctx.arr.data[ctx.pos .. ctx.pos+ctx.width]) } - return .{}, false; + return .None } ); } -#doc """ - Creates an iterator of chunks over the elements of the slice. - Each chunk has size `width`, with the last chunk having size `arr.count % width`. -""" -chunks :: (arr: [] $T, width: i32) -> Iterator([] T) { - use core {iter} - - return iter.generator( +/// Creates an iterator of chunks over the elements of the slice. +/// Each chunk has size `width`, with the last chunk having size `arr.count % width`. +Slice.chunks :: (arr: [] $T, width: i32) -> Iterator([] T) { + return Iterator.generator( &.{ arr=arr, width=width, pos=0 }, ctx => { + use core {math} + if ctx.pos < ctx.arr.count { defer ctx.pos += ctx.width; - end := core.math.min(ctx.pos+ctx.width, ctx.arr.count); - return ctx.arr.data[ctx.pos .. end], true; + end := math.min(ctx.pos+ctx.width, ctx.arr.count); + return Optional.make(ctx.arr.data[ctx.pos .. end]) } - return .{}, false; + return .None } ); } -#doc """ - Groups a slice into sub-slices using the comparison function. - - `comp` should evaluate to a boolean value. - - Expects slice to sorted so all equal values are next to each other. -""" -group_by :: #match #local {} +/// Groups a slice into sub-slices using the comparison function. +/// +/// `comp` should evaluate to a boolean value. +/// +/// Expects slice to sorted so all equal values are next to each other. +Slice.group_by :: #match #local {} #overload -group_by :: macro (arr: [] $T, comp: (T, T) -> bool, allocator := context.allocator) -> [..] [] T { - return #this_package.group_by(arr, [a, b](comp(a, b)), allocator); +Slice.group_by :: macro (arr: [] $T, comp: (T, T) -> bool, allocator := context.allocator) -> [..] [] T { + return Slice.group_by(arr, [a, b](comp(a, b)), allocator); } #overload -group_by :: macro (arr_: [] $T, comp: Code, allocator := context.allocator) -> [..] [] T { - out := make([..] [] T, allocator); +Slice.group_by :: macro (arr_: [] $T, comp: Code, allocator := context.allocator) -> [..] [] T { + out := builtin.make([..] [] T, allocator); if arr_.count == 0 do return out; start := 0; @@ -659,7 +721,7 @@ group_by :: macro (arr_: [] $T, comp: Code, allocator := context.allocator) -> [ { t == t } -> bool; } -equal :: (arr1: [] $T/HasEquals, arr2: [] T) -> bool { +Slice.equal :: (arr1: [] $T/HasEquals, arr2: [] T) -> bool { if arr1.count != arr2.count do return false; for i in arr1.count { @@ -686,18 +748,54 @@ equal :: (arr1: [] $T/HasEquals, arr2: [] T) -> bool { return idx, elem; } -#doc """ - Returns the largest element in the array, using `>` to compare. -""" -greatest :: macro (arr: [] $T) -> (i32, T) { +/// Returns the largest element in the array, using `>` to compare. +Slice.greatest :: macro (arr: [] $T) -> (i32, T) { fold_idx_elem :: fold_idx_elem return fold_idx_elem(arr, [](*A > *B)); } -#doc """ - Returns the smallest element in the array, using `<` to compare. -""" -least :: macro (arr: [] $T) -> (i32, T) { +/// Returns the smallest element in the array, using `<` to compare. +Slice.least :: macro (arr: [] $T) -> (i32, T) { fold_idx_elem :: fold_idx_elem return fold_idx_elem(arr, [](*A < *B)); } + + +// +// Everything below here only exists for backwards compatibility. +// + +transplant :: Slice.transplant +get :: Slice.get +get_ptr :: Slice.get_ptr +set :: Slice.set +contains :: Slice.contains +empty :: Slice.empty +sum :: Slice.sum +product :: Slice.product +average :: Slice.average +reverse :: Slice.reverse +sort :: Slice.sort +quicksort :: Slice.quicksort +unique :: Slice.unique +fold :: Slice.fold +every :: Slice.every +some :: Slice.some +group_by :: Slice.group_by +fill :: Slice.fill +fill_range :: Slice.fill_range +to_list :: Slice.to_list +find :: Slice.find +find_ptr :: Slice.find_ptr +find_opt :: Slice.find_opt +first :: Slice.first +count_where :: Slice.count_where +windows :: Slice.windows +chunks :: Slice.chunks +greatest :: Slice.greatest +least :: Slice.least +free :: Slice.free +copy :: Slice.copy +make :: Slice.make +init :: Slice.init + diff --git a/core/conv/conv.onyx b/core/conv/conv.onyx index 2878a0ab6..297de9f13 100644 --- a/core/conv/conv.onyx +++ b/core/conv/conv.onyx @@ -7,11 +7,9 @@ package core.conv use core.string use core.math -#doc """ - Converts a string into an integer. Works with positive and - negative integers. If given a pointer to a string, will - modify the string to extract the integer part. -""" +/// Converts a string into an integer. Works with positive and +/// negative integers. If given a pointer to a string, will +/// modify the string to extract the integer part. str_to_i64 :: #match #local {} #overload @@ -28,37 +26,37 @@ str_to_i64 :: (s: &str, base: u32 = 10) -> i64 { value: i64 = 0; mul := 1; - if s.data[0] == #char "-" { + if s.data[0] == '-' { mul = -1; string.advance(s, 1); } - if s.data[0] == #char "+" { + if s.data[0] == '+' { string.advance(s, 1); } while !string.empty(*s) { switch c := s.data[0]; c { - case #char "0" .. #char "9" { + case '0' ..= '9' { value *= ~~base; - value += ~~(c - #char "0"); + value += ~~(c - '0'); } - case #char "A" .. #char "Z" { + case 'A' ..= 'Z' { if base <= 10 do fallthrough; value *= ~~base; - value += ~~((c - #char "A") + 10); + value += ~~((c - 'A') + 10); } - case #char "a" .. #char "z" { + case 'a' ..= 'z' { if base <= 10 do fallthrough; value *= ~~base; - value += ~~((c - #char "a") + 10); + value += ~~((c - 'a') + 10); } - case #default do break break; + case _ do break break; } string.advance(s); @@ -68,7 +66,7 @@ str_to_i64 :: (s: &str, base: u32 = 10) -> i64 { } -#doc "Converts a string to a floating point number." +/// Converts a string to a floating point number. str_to_f64 :: #match #local {} #overload @@ -85,7 +83,7 @@ str_to_f64 :: (s: &str) -> f64 { sign := parse_sign(s); value, _ := parse_digits(s); - if s.data[0] == #char "." { + if s.data[0] == '.' { string.advance(s, 1); fraction, fraction_digits := parse_digits(s); while fraction_digits > 0 { @@ -97,7 +95,7 @@ str_to_f64 :: (s: &str) -> f64 { value *= sign; - if s.data[0] != #char "e" && s.data[0] != #char "E" do return value; + if s.data[0] != 'e' && s.data[0] != 'E' do return value; string.advance(s, 1); exponent_sign := parse_sign(s); @@ -119,35 +117,33 @@ str_to_f64 :: (s: &str) -> f64 { parse_sign :: (s: &str) -> f64 { switch s.data[0] { - case #char "-" { string.advance(s, 1); return -1; } - case #char "+" { string.advance(s, 1); return 1; } - case #default { return 1; } + case '-' { string.advance(s, 1); return -1; } + case '+' { string.advance(s, 1); return 1; } + case _ { return 1; } } } - parse_digits :: (s: &str) -> (f64, digit_count: i32) { + parse_digits :: (s: &str) -> (f64, i32) { value: f64 = 0; count := 0; while s.count > 0 do switch s.data[0] { - case #char "0" .. #char "9" { - value = value * 10 + ~~cast(i32)(s.data[0] - #char "0"); + case '0' ..= '9' { + value = value * 10 + ~~cast(i32)(s.data[0] - '0'); string.advance(s, 1); count += 1; } - case #default do break break; + case _ do break break; } return value, count; } } -#doc """ - Converts an integer into a string using the buffer provided. - Supports upto base 64. If prefix is true, binary numbers are - prefixed with '0b' and hexadecimal numbers are prefixed with - '0x'. -""" +/// Converts an integer into a string using the buffer provided. +/// Supports upto base 64. If prefix is true, binary numbers are +/// prefixed with '0b' and hexadecimal numbers are prefixed with +/// '0x'. i64_to_str :: (n: i64, base: u64, buf: [] u8, min_length := 0, prefix := false) -> str { is_neg := false; if n < 0 { @@ -170,14 +166,14 @@ i64_to_str :: (n: i64, base: u64, buf: [] u8, min_length := 0, prefix := false) n /= base; } else { - c[0] = #char "0"; + c[0] = '0'; len += 1; c -= 1; } if min_length > 0 && len < min_length { for i in min_length - len { - c[0] = #char "0"; + c[0] = '0'; len += 1; c -= 1; } @@ -185,26 +181,26 @@ i64_to_str :: (n: i64, base: u64, buf: [] u8, min_length := 0, prefix := false) if prefix { if base == 16 { - c[0] = #char "x"; + c[0] = 'x'; len += 1; c -= 1; - c[0] = #char "0"; + c[0] = '0'; len += 1; c -= 1; } if base == 2 { - c[0] = #char "b"; + c[0] = 'b'; len += 1; c -= 1; - c[0] = #char "0"; + c[0] = '0'; len += 1; c -= 1; } } if is_neg { - c[0] = #char "-"; + c[0] = '-'; len += 1; c -= 1; } @@ -213,10 +209,8 @@ i64_to_str :: (n: i64, base: u64, buf: [] u8, min_length := 0, prefix := false) } -#doc """ - Converts an unsigned number into a string using the buffer provided. - Behaves like i64_to_str. -""" +/// Converts an unsigned number into a string using the buffer provided. +/// Behaves like i64_to_str. u64_to_str :: (n: u64, base: u64, buf: [] u8, min_length := 0, prefix := false) -> str { c: [&] u8 = &buf[buf.count - 1]; len := 0; @@ -233,14 +227,14 @@ u64_to_str :: (n: u64, base: u64, buf: [] u8, min_length := 0, prefix := false) n /= base; } else { - c[0] = #char "0"; + c[0] = '0'; len += 1; c -= 1; } if min_length > 0 && len < min_length { for i in min_length - len { - c[0] = #char "0"; + c[0] = '0'; len += 1; c -= 1; } @@ -248,19 +242,19 @@ u64_to_str :: (n: u64, base: u64, buf: [] u8, min_length := 0, prefix := false) if prefix { if base == 16 { - c[0] = #char "x"; + c[0] = 'x'; len += 1; c -= 1; - c[0] = #char "0"; + c[0] = '0'; len += 1; c -= 1; } if base == 2 { - c[0] = #char "b"; + c[0] = 'b'; len += 1; c -= 1; - c[0] = #char "0"; + c[0] = '0'; len += 1; c -= 1; } @@ -269,13 +263,11 @@ u64_to_str :: (n: u64, base: u64, buf: [] u8, min_length := 0, prefix := false) return str.{ data = c + 1, count = len }; } -#doc """ - Converts a floating point number into a string, using - the buffer provided. - - This is better than what used to be, but still relies on converting the integer - part of the float to an integer, which could overflow. -""" +/// Converts a floating point number into a string, using +/// the buffer provided. +/// +/// This is better than what used to be, but still relies on converting the integer +/// part of the float to an integer, which could overflow. f64_to_str :: (f: f64, buf: [] u8, digits_after_decimal := 4) -> str { if math.is_nan(f) { return format(buf, "NaN"); @@ -290,7 +282,7 @@ f64_to_str :: (f: f64, buf: [] u8, digits_after_decimal := 4) -> str { if f < 0 { f = -f; - buf[0] = #char "-"; + buf[0] = '-'; len += 1; } @@ -303,7 +295,7 @@ f64_to_str :: (f: f64, buf: [] u8, digits_after_decimal := 4) -> str { len += s1.count; if digits_after_decimal > 0 { - buf.data[len] = #char "."; + buf.data[len] = '.'; len += 1; digits := "0123456789"; @@ -332,4 +324,4 @@ parse_float :: str_to_f64 format_int :: i64_to_str format_uint :: u64_to_str -format_float :: f64_to_str \ No newline at end of file +format_float :: f64_to_str diff --git a/core/conv/format.onyx b/core/conv/format.onyx index 741e87668..9ad11d2d5 100644 --- a/core/conv/format.onyx +++ b/core/conv/format.onyx @@ -12,11 +12,9 @@ use runtime custom_parsers : Map(type_expr, #type (rawptr, str, Allocator) -> bool); } -#doc """ - This procedure is run before main() as it is an #init procedure. - It looks for all custom formatting and parsing definitions and - registers them to be used in format_any and parse_any. -""" +/// This procedure is run before main() as it is an #init procedure. +/// It looks for all custom formatting and parsing definitions and +/// registers them to be used in format_any and parse_any. custom_formatters_initialized :: #init () { map.init(&custom_formatters); map.init(&custom_parsers); @@ -25,19 +23,19 @@ custom_formatters_initialized :: #init () { use runtime.info {*}; for type_idx in type_table.count { - type := type_table[type_idx]; + type := type_table[type_idx].info; if type.kind != .Struct do continue; s_info := cast(&Type_Info_Struct) type; for s_info.tags { if it.type == Custom_Format { custom_format := cast(&Custom_Format) it.data; - custom_formatters[cast(type_expr) type_idx] = custom_format.format; + custom_formatters[type_table[type_idx].type] = custom_format.format; } if it.type == Custom_Parse { custom_parse := cast(&Custom_Parse) it.data; - custom_parsers[cast(type_expr) type_idx] = custom_parse.parse; + custom_parsers[type_table[type_idx].type] = custom_parse.parse; } } } @@ -61,76 +59,61 @@ custom_formatters_initialized :: #init () { } } -#doc """ - Registers a formatting function for a particular type. This type is - inferred from the type of the third argument in the given function. -""" +/// Registers a formatting function for a particular type. This type is +/// inferred from the type of the third argument in the given function. register_custom_formatter :: (formatter: (&Format_Output, &Format, &$T) -> void) { custom_formatters[T] = formatter; } -#doc """ - Registers a parsing function for a particular type. This type is - inferred from the type of the first argument in the given function. -""" +/// Registers a parsing function for a particular type. This type is +/// inferred from the type of the first argument in the given function. register_custom_parser :: (parser: (&$T, str, Allocator) -> bool) { custom_parsers[T] = parser; } -#doc """ - Tag-type used to specify how to format a structure. - - @conv.Custom_Format.{ format_structure } - TheStructure :: struct { ... } -""" +/// Tag-type used to specify how to format a structure. +/// +/// @conv.Custom_Format.{ format_structure } +/// TheStructure :: struct { ... } Custom_Format :: struct { format: (&Format_Output, &Format, rawptr) -> void; } -#doc """ - Tag-type used to specify that a certain procedure should be used - to format a type. - - @conv.Custom_Format_Proc.{ TheStructure } - format_structure :: (output: &conv.Format_Output, format: &conv.Format, data: &TheStructure) { ... } -""" +/// Tag-type used to specify that a certain procedure should be used +/// to format a type. +/// +/// @conv.Custom_Format_Proc.{ TheStructure } +/// format_structure :: (output: &conv.Format_Output, format: &conv.Format, data: &TheStructure) { ... } Custom_Format_Proc :: struct { type: type_expr; } -#doc """ - Tag-type used to specify how to parse a structure. - - @conv.Custom_Parse.{ parse_structure } - TheStructure :: struct { ... } -""" +/// Tag-type used to specify how to parse a structure. +/// +/// @conv.Custom_Parse.{ parse_structure } +/// TheStructure :: struct { ... } Custom_Parse :: struct { parse: (rawptr, str, Allocator) -> bool; } -#doc """ - Tag-type used to specify that a certain procedure should be used - to parse a type. - - @conv.Custom_Parse_Proc.{ TheStructure } - parse_structure :: (data: &TheStructure, input: str, allocator: Allocator) -> bool { ... } - -""" +/// Tag-type used to specify that a certain procedure should be used +/// to parse a type. +/// +/// @conv.Custom_Parse_Proc.{ TheStructure } +/// parse_structure :: (data: &TheStructure, input: str, allocator: Allocator) -> bool { ... } Custom_Parse_Proc :: struct { type: type_expr; } -#doc """ - Passed to any custom formatter. Wraps outputting data to any source, - using a `flush` callback function. Use `write` to output a string. - When the internal buffer is filled, `flush` is called to empty the - buffer to the final destination. -""" +/// Passed to any custom formatter. Wraps outputting data to any source, +/// using a `flush` callback function. Use `write` to output a string. +/// When the internal buffer is filled, `flush` is called to empty the +/// buffer to the final destination. Format_Output :: struct { data: [&] u8; count: u32; @@ -174,7 +157,7 @@ Format_Flush_Callback :: struct { } -#doc "Formatting options passed to a custom formatter." +/// Formatting options passed to a custom formatter. Format :: struct { pretty_printing := false; // p quote_strings := false; // " @@ -182,6 +165,7 @@ Format :: struct { dereference := false; // * custom_format := true; // ! to disable interpret_numbers := true; // d to disable + unpack_any := false; // a digits_after_decimal := cast(u32) 4; // .2 indentation := cast(u32) 0; @@ -204,7 +188,6 @@ str_format_va :: format_va -#inject builtin.logf :: (level: builtin.Log_Level, format: str, va: ..any) { use core {conv} @@ -213,10 +196,8 @@ builtin.logf :: (level: builtin.Log_Level, format: str, va: ..any) { } -#doc """ - Formats a string using the provided arguments and format specified string. - This has many overloads to make it easy to work with. -""" +/// Formats a string using the provided arguments and format specified string. +/// This has many overloads to make it easy to work with. format :: #match {} #overload @@ -248,9 +229,7 @@ format :: (buffer: &dyn_str, format: str, va: ..any) -> str { return *buffer; } -#doc """ - Like `format`, but takes the arguments as an array of `any`s, not a variadic argument array. -""" +/// Like `format`, but takes the arguments as an array of `any`s, not a variadic argument array. format_va :: #match {} #overload @@ -288,9 +267,9 @@ format_va :: (output: &Format_Output, format: str, va: [] any) -> str { ch := format[i]; formatting := Format.{}; - if ch == #char "{" { - if format[i + 1] == #char "{" { - output->write(#char "{"); + if ch == '{' { + if format[i + 1] == '{' { + output->write('{'); i += 1; continue; } @@ -308,18 +287,18 @@ format_va :: (output: &Format_Output, format: str, va: [] any) -> str { ch = format[i]; switch ch { - case #char "*" { + case '*' { i += 1; formatting.dereference = true; } - case #char "." { + case '.' { i += 1; digits := 0; - while format[i] >= #char "0" && format[i] <= #char "9" { + while format[i] >= '0' && format[i] <= '9' { digits *= 10; - digits += ~~(format[i] - #char "0"); + digits += ~~(format[i] - '0'); i += 1; } @@ -327,63 +306,68 @@ format_va :: (output: &Format_Output, format: str, va: [] any) -> str { formatting.digits_after_decimal = digits; } - case #char "p" { + case 'p' { i += 1; formatting.pretty_printing = true; } - case #char "x" { + case 'x' { i += 1; formatting.base = 16; } - case #char "b" { + case 'b' { i += 1; digits := 0; - while format[i] >= #char "0" && format[i] <= #char "9" { + while format[i] >= '0' && format[i] <= '9' { digits *= 10; - digits += ~~(format[i] - #char "0"); + digits += ~~(format[i] - '0'); i += 1; } formatting.base = ~~digits; } - case #char "w" { + case 'w' { i += 1; digits := 0; - while format[i] >= #char "0" && format[i] <= #char "9" { + while format[i] >= '0' && format[i] <= '9' { digits *= 10; - digits += ~~(format[i] - #char "0"); + digits += ~~(format[i] - '0'); i += 1; } formatting.minimum_width = digits; } - case #char "!" { + case '!' { i += 1; formatting.custom_format = false; } - case #char "\"" { + case '"' { i += 1; formatting.quote_strings = true; } - case #char "'" { + case '\'' { i += 1; formatting.single_quote_strings = true; } - case #char "d" { + case 'd' { i += 1; formatting.interpret_numbers = false; } - case #char "}" { + case 'a' { + i += 1; + formatting.unpack_any = true; + } + + case '}' { arg := va[vararg_index]; vararg_index += 1; format_any(output, &formatting, arg); @@ -392,7 +376,7 @@ format_va :: (output: &Format_Output, format: str, va: [] any) -> str { continue continue; } - case #default do break break; + case _ do break break; } } @@ -402,9 +386,9 @@ format_va :: (output: &Format_Output, format: str, va: [] any) -> str { } } - if ch == #char "}" { - if format[i + 1] == #char "}" { - output->write(#char "}"); + if ch == '}' { + if format[i + 1] == '}' { + output->write('}'); i += 1; continue; } @@ -417,11 +401,9 @@ format_va :: (output: &Format_Output, format: str, va: [] any) -> str { } -#doc """ - This procedure converts any value into a string, using the type information system. - If a custom formatter is specified for the type, that is used instead. - This procedure is generally not used directly; instead, through format or format_va. -""" +/// This procedure converts any value into a string, using the type information system. +/// If a custom formatter is specified for the type, that is used instead. +/// This procedure is generally not used directly; instead, through format or format_va. format_any :: (output: &Format_Output, formatting: &Format, v: any) { use runtime.info {*}; @@ -523,7 +505,7 @@ format_any :: (output: &Format_Output, formatting: &Format, v: any) { // @Todo // escape '"' when quote_strings is enabled. output->write(to_output); if to_output.count < width && !(formatting.quote_strings || formatting.single_quote_strings) { - for width - to_output.count do output->write(#char " "); + for width - to_output.count do output->write(' '); } if formatting.quote_strings do output->write("\""); @@ -558,7 +540,16 @@ format_any :: (output: &Format_Output, formatting: &Format, v: any) { output->write(io.buffer_stream_to_str(&stream)); } - case #default { + case any { + if !formatting.unpack_any do fallthrough + + formatting.unpack_any = false + + value := *cast(& any) v.data + format_any(output, formatting, value) + } + + case _ { info := get_type_info(v.type); if info.kind == .Struct { @@ -582,8 +573,8 @@ format_any :: (output: &Format_Output, formatting: &Format, v: any) { if member != s.members.data do output->write(", "); if formatting.pretty_printing { - output->write(#char "\n"); - for i in format.indentation do output->write(#char " "); + output->write('\n'); + for i in format.indentation do output->write(' '); } output->write(member.name); @@ -594,8 +585,8 @@ format_any :: (output: &Format_Output, formatting: &Format, v: any) { } if formatting.pretty_printing { - output->write(#char "\n"); - for i in formatting.indentation do output->write(#char " "); + output->write('\n'); + for i in formatting.indentation do output->write(' '); output->write("}"); } else { @@ -645,7 +636,7 @@ format_any :: (output: &Format_Output, formatting: &Format, v: any) { if formatting.pretty_printing { output->write("\n"); - for _ in format.indentation do output->write(#char " "); + for _ in format.indentation do output->write(' '); } format_any(output, &format, .{ cast([&] u8) data + get_type_info(a.of).size * i, a.of }); @@ -655,8 +646,8 @@ format_any :: (output: &Format_Output, formatting: &Format, v: any) { if formatting.pretty_printing { format.indentation -= 4; output->write("\n"); - for _ in format.indentation do output->write(#char " "); - output->write(#char "]"); + for _ in format.indentation do output->write(' '); + output->write(']'); } else { output->write(" ]"); @@ -687,7 +678,7 @@ format_any :: (output: &Format_Output, formatting: &Format, v: any) { case i16, u16 do value = cast(u64) *(cast(&u16) v.data); case i32, u32 do value = cast(u64) *(cast(&u32) v.data); case i64, u64 do value = cast(u64) *(cast(&u64) v.data); - case #default do assert(false, "Bad enum backing type"); + case _ do panic("Bad enum backing type"); } if !formatting.interpret_numbers { @@ -745,7 +736,7 @@ format_any :: (output: &Format_Output, formatting: &Format, v: any) { case i16, u16 do tag_value = cast(u64) *(cast(&u16) v.data); case i32, u32 do tag_value = cast(u64) *(cast(&u32) v.data); case i64, u64 do tag_value = cast(u64) *(cast(&u64) v.data); - case #default do assert(false, "Bad union backing type"); + case _ do panic("Bad union backing type"); } variant := array.first(u.variants, [x](x.tag_value == ~~tag_value)); diff --git a/core/conv/parse.onyx b/core/conv/parse.onyx index b4f65e632..b6d538357 100644 --- a/core/conv/parse.onyx +++ b/core/conv/parse.onyx @@ -7,10 +7,8 @@ use core.math use core.memory use runtime -#doc """ - Parses many different types from a string into a value. - Uses a custom parser if one has been specified for the type given. -""" +/// Parses many different types from a string into a value. +/// Uses a custom parser if one has been specified for the type given. parse_any :: #match {} #overload @@ -31,7 +29,7 @@ parse_any :: (target: rawptr, data_type: type_expr, to_parse: str, string_alloca case bool { dest := cast(&bool) target; *dest = false; - if to_parse[0] == #char "t" || to_parse[0] == #char "T" { + if to_parse[0] == 't' || to_parse[0] == 'T' { *dest = true; } return true; @@ -73,8 +71,8 @@ parse_any :: (target: rawptr, data_type: type_expr, to_parse: str, string_alloca // If the string does *look* like a quoted string, // simply return a copy of the whole string. - if to_parse[0] != #char "\"" { - *dest = string.alloc_copy(to_parse, string_allocator); + if to_parse[0] != '"' { + *dest = string.copy(to_parse, string_allocator); return true; } @@ -82,11 +80,11 @@ parse_any :: (target: rawptr, data_type: type_expr, to_parse: str, string_alloca line := to_parse; string.advance(&line); - *dest = string.read_until(&line, #char "\"") |> string.alloc_copy(string_allocator); // @BUG // This does not handle escaped strings! + *dest = string.read_until(&line, '"') |> string.copy(string_allocator); // @BUG // This does not handle escaped strings! return true; } - case #default { + case _ { if info.kind == .Enum { val := str_to_i64(to_parse); @@ -123,9 +121,7 @@ parse_any :: (target: rawptr, data_type: type_expr, to_parse: str, string_alloca } -#doc """ - Shortcut to parse a type `T` using `parse_any`. -""" +/// Shortcut to parse a type `T` using `parse_any`. parse :: ($T: type_expr, to_parse: str) -> ? T { v: T; if #this_package.parse_any(&v, to_parse) { @@ -135,9 +131,7 @@ parse :: ($T: type_expr, to_parse: str) -> ? T { } } -#doc """ - Shortcut to parse a type `T` using `parse_any`, and specify an allocator. -""" +/// Shortcut to parse a type `T` using `parse_any`, and specify an allocator. parse_with_allocator :: ($T: type_expr, to_parse: str, allocator: Allocator) -> ? T { v: T; if #this_package.parse_any(&v, to_parse, allocator) { diff --git a/core/crypto/hmac.onyx b/core/crypto/hmac.onyx new file mode 100644 index 000000000..8abcd04e9 --- /dev/null +++ b/core/crypto/hmac.onyx @@ -0,0 +1,86 @@ +package core.crypto +#allow_stale_code + +use core.hash +use core.alloc +use core.memory + +HashingAlgorithm :: enum { + MD5 + SHA1 + SHA256 +} + +#local +_get_block_size :: (ha: HashingAlgorithm) => switch ha { + case .MD5 => 64 + case .SHA1 => 64 + case .SHA256 => 64 + case _ => 0 +} + +#local +_get_output_size :: (ha: HashingAlgorithm) => switch ha { + case .MD5 => 16 + case .SHA1 => 20 + case .SHA256 => 32 +} + +#local +_hash :: (ha: HashingAlgorithm, bytes: [] u8, a: Allocator) -> [] u8 { + return switch ha { + case .MD5 => hash.md5.hash(bytes) |> str.copy(a) + case .SHA1 => hash.sha1.hash(bytes) |> str.copy(a) + case .SHA256 => hash.sha256.hash(bytes) |> str.copy(a) + } +} + +/// Computes the Hashed Message Authentication Code of the provided data, with the provided key, using the specified algorithm. +hmac :: (data: [] u8, key: [] u8, alg: HashingAlgorithm) -> [] u8 { + arena := alloc.arena.make(context.allocator, 16*1024) + defer alloc.arena.free(&arena) + + a := alloc.as_allocator(&arena) + + block_size := _get_block_size(alg) + + k_prime := do { + res := key + if key.length > block_size { + res = _hash(alg, key, a) + } + + if res.length < block_size { + new_res := make([] u8, block_size, a) + memory.set(new_res.data, 0, block_size) + memory.copy(new_res.data, res.data, res.length) + return new_res + } + + return res + } + + out_key := do { + res := str.copy(k_prime, a) + for &c in res do *c ^= 0x5c + return res + } + + in_key := do { + res := str.copy(k_prime, a) + for &c in res do *c ^= 0x36 + return res + } + + inner := make(dyn_str, in_key.length + data.length, a) + inner->concat(in_key) + inner->concat(data) + hashed_inner := _hash(alg, inner, a) + + outer := make(dyn_str, out_key.length + hashed_inner.length, a) + outer->concat(out_key) + outer->concat(hashed_inner) + hashed_outer := _hash(alg, outer, context.allocator) + + return hashed_outer +} diff --git a/core/crypto/keys/jwt.onyx b/core/crypto/keys/jwt.onyx new file mode 100644 index 000000000..64dd76587 --- /dev/null +++ b/core/crypto/keys/jwt.onyx @@ -0,0 +1,193 @@ +/// +/// **EXPERIMENTAL**: JWT support is under active development. +/// The public interface for this package is subject to change. +package core.crypto.keys +#allow_stale_code + +use core { Result, tprintf, aprintf } +use core.misc +use core.crypto +use core.encoding {json, base64} + +/// The algorithm used to secure the JWT. +/// Currently, only `NONE` and `HS256` are supported, as they are the only +/// required algorithms per RFC-7518. When the standard library of Onyx +/// has support for RSA and ECDSA, more algorithms could be implemented. +JWT_Signing_Method :: enum { + UNKNOWN + + NONE + HS256 +} + +JWT_Signing_Method.as_alg :: (m: #Self) => switch m { + case .NONE => "none" + case .HS256 => "HS256" +} + +JWT_Signing_Method.from_alg :: (s: str) -> #Self { + if s->equal_insensitive("hs256") { + return .HS256 + } + + return .UNKNOWN +} + + +JWT :: struct { + method: JWT_Signing_Method + headers: Map(str, str) + claims: Map(str, json.Value) + + _body: dyn_str +} + +JWT.make :: (method: JWT_Signing_Method) -> JWT { + return .{ + method + Map.literal(str, str, .[ + .{ "alg", method->as_alg() |> str.copy() } + .{ "typ", "JWT" |> str.copy() } + ]) + make(Map(str, json.Value)) + } +} + +JWT.make_with_claims :: ( + method: JWT_Signing_Method + claims: Map(str, json.Value) +) -> JWT { + return .{ + method + Map.literal(str, str, .[ + .{ "alg", method->as_alg() |> str.copy() } + .{ "typ", "JWT" |> str.copy() } + ]) + Map.copy(claims) + } +} + +JWT.destroy :: (self: &#Self) { + for& self.headers.entries { + delete(&it.key, self.headers.allocator) + delete(&it.value, self.headers.allocator) + } + delete(&self.headers) + + for& self.claims.entries { + delete(&it.key, self.claims.allocator) + delete(it.value, self.claims.allocator) + } + delete(&self.claims) + + delete(&self._body) +} + +JWT.signed_string :: (self: #Self, key: str) -> Result(str, i32) { + a := context.temp_allocator + + payload := tprintf("{}.{}" + base64.encode_url(json.encode_string_opt(self.headers, a)!, a) + base64.encode_url(json.encode_string_opt(self.claims, a)!, a) + ) + + use signature := sign_string(payload, self.method, key) + |> Optional.or_return(Result(str, i32).{ Err = 0 }) + + signature_enc := base64.encode_url(signature, a) + + return .{ Ok = aprintf("{}.{}", payload, signature_enc) } +} + + + +JWT_Parse_Error :: enum { + Bad_Format + Invalid_Header + Invalid_Body + Invalid_Signature +} + +JWT.parse :: (token: str, signing_key: str, allocator := context.allocator) -> Result(JWT, JWT_Parse_Error) { + header_enc, tmp := str.bisect(token, '.') + body_enc, signature_enc := str.bisect(tmp, '.') + + if !signature_enc do return .{ Err = .Bad_Format } + + use header := base64.decode_url(header_enc, context.temp_allocator) + |> json.decode_with_result(context.temp_allocator) + |> Result.transform(x => x.root) + |> Result.transform_err(x => JWT_Parse_Error.Invalid_Header)? + |> json.Value.as_map(allocator) + + body_raw := base64.decode_url(body_enc, allocator) + body := body_raw + |> json.decode_with_result(allocator) + |> Result.transform(x => x.root) + |> Result.transform_err(x => JWT_Parse_Error.Invalid_Body)? + |> free_after_use( + (x) use (allocator) => json.Value.as_map(x, allocator) + (x) use (allocator) => json.free_value(x, allocator) + ) + + token := JWT.{} + token.claims = body + token.headers = make(Map(str, str), allocator) + for header.entries { + token.headers->put(it.key, it.value->as_str() |> str.copy(allocator)) + delete(it.value, allocator) + } + token.method = JWT_Signing_Method.from_alg(token.headers->get("alg") ?? "") + token._body = Array.raw_from_slice(body_raw, allocator) + + use signature := base64.decode_url(signature_enc) + valid := verify_string( + tprintf("{}.{}", header_enc, body_enc) + token.method + signing_key + signature + ) + + if !valid { + token->destroy() + return .{ Err = .Invalid_Signature } + } + + return .{ Ok = token } +} + + +#local +sign_string :: (s: str, method: JWT_Signing_Method, data: any) -> ? str { + switch method { + case .NONE do return s + case .HS256 { + key := misc.any_as(data, str) + if !key do return .None + + return crypto.hmac(s, *key, .SHA256) + } + } + return .None +} + +#local +verify_string :: (s: str, method: JWT_Signing_Method, data: any, signature: str) -> bool { + switch method { + case .NONE do return true + case .HS256 { + key := misc.any_as(data, str) + if !key do return false + + use hashed := crypto.hmac(s, *key, .SHA256) + return hashed == signature + } + } + return false +} + +#local +free_after_use :: (x: $T, f: (T) -> $R, free: (T) -> void) -> R { + defer { free(x) } + return f(x) +} diff --git a/core/encoding/base64.onyx b/core/encoding/base64.onyx index c4163505f..89333aa69 100644 --- a/core/encoding/base64.onyx +++ b/core/encoding/base64.onyx @@ -1,20 +1,19 @@ +/// +/// A simple Base64 encoding and decoding library. Currently +/// only supports base64 with + and / characters. A simple +/// find and replace could be used to change to other base64 +/// standards. +/// + package core.encoding.base64 #allow_stale_code use core.array -// -// A simple Base64 encoding and decoding library. Currently -// only supports base64 with + and / characters. A simple -// find and replace could be used to change to other base64 -// standards. -// - -#doc """ - Encodes the given data in base64 into a new buffer, allocated - from the allocator provided. It is the callers responsibilty - to free this memory. -""" + +/// Encodes the given data in base64 into a new buffer, allocated +/// from the allocator provided. It is the callers responsibilty +/// to free this memory. encode :: (data: [] u8, allocator := context.allocator) -> [] u8 { out := array.make(u8, allocator=allocator); @@ -33,8 +32,8 @@ encode :: (data: [] u8, allocator := context.allocator) -> [] u8 { c := data[data.count - 1]; out << encode_map[c >> 2]; out << encode_map[(c & 0x3) << 4]; - out << #char "="; - out << #char "="; + out << '='; + out << '='; } elseif data.count % 3 == 2 { c1 := data[data.count - 2]; @@ -42,16 +41,14 @@ encode :: (data: [] u8, allocator := context.allocator) -> [] u8 { out << encode_map[c1 >> 2]; out << encode_map[((c1 & 0x3) << 4) | ((c2 & 0xf0) >> 4)]; out << encode_map[(c2 & 0xf) << 2]; - out << #char "="; + out << '='; } return out; } -#doc """ - Decodes the given base64 data into a new buffer, allocated - from the allocator provided. -""" +/// Decodes the given base64 data into a new buffer, allocated +/// from the allocator provided. decode :: (data: [] u8, allocator := context.allocator) -> [] u8 { if data.count % 4 != 0 do return null_str; @@ -63,29 +60,121 @@ decode :: (data: [] u8, allocator := context.allocator) -> [] u8 { c3 := data[i + 2]; c4 := data[i + 3]; - v1 := decode_map[c1 - #char "+"]; - v2 := decode_map[c2 - #char "+"]; - v3 := decode_map[c3 - #char "+"]; - v4 := decode_map[c4 - #char "+"]; + v1 := decode_map[c1 - '+']; + v2 := decode_map[c2 - '+']; + v3 := decode_map[c3 - '+']; + v4 := decode_map[c4 - '+']; o1 := (v1 << 2) | ((v2 & 0x30) >> 4); o2 := ((v2 & 0xf) << 4) | ((v3 & 0x3c) >> 2); o3 := ((v3 & 0x3) << 6) | (v4 & 0x3f); out << o1; - if c3 != #char "=" do out << o2; - if c4 != #char "=" do out << o3; + if c3 != '=' do out << o2; + if c4 != '=' do out << o3; } return out; } +encode_url :: (data: [] u8, allocator := context.allocator) -> [] u8 { + out := Array.make(u8, allocator=allocator) + + for i in range.{0, data.count - 2, 3} { + c1 := data[i + 0] + c2 := data[i + 1] + c3 := data[i + 2] + + out << encode_url_map[c1 >> 2] + out << encode_url_map[((c1 & 0x3) << 4) | ((c2 & 0xf0) >> 4)] + out << encode_url_map[((c2 & 0xf) << 2) | ((c3 & 0xc0) >> 6)] + out << encode_url_map[c3 & 0x3f] + } + + if data.count % 3 == 1 { + c := data[data.count - 1] + out << encode_url_map[c >> 2] + out << encode_url_map[(c & 0x3) << 4] + + } elseif data.count % 3 == 2 { + c1 := data[data.count - 2] + c2 := data[data.count - 1] + out << encode_url_map[c1 >> 2] + out << encode_url_map[((c1 & 0x3) << 4) | ((c2 & 0xf0) >> 4)] + out << encode_url_map[(c2 & 0xf) << 2] + } + + return out +} + +/// Decodes the given base64 data into a new buffer, allocated +/// from the allocator provided. +decode_url :: (data: [] u8, allocator := context.allocator) -> [] u8 { + out := Array.make(u8, allocator=allocator) + + for i in range.{0, data.count - 3, 4} { + c1 := data[i + 0] + c2 := data[i + 1] + c3 := data[i + 2] + c4 := data[i + 3] + + v1 := decode_url_map[c1 - '-'] + v2 := decode_url_map[c2 - '-'] + v3 := decode_url_map[c3 - '-'] + v4 := decode_url_map[c4 - '-'] + + o1 := (v1 << 2) | ((v2 & 0x30) >> 4) + o2 := ((v2 & 0xf) << 4) | ((v3 & 0x3c) >> 2) + o3 := ((v3 & 0x3) << 6) | (v4 & 0x3f) + + out << o1 + out << o2 + out << o3 + } + + if data.length % 4 == 3 { + l := data.length + c1 := data[l - 3] + c2 := data[l - 2] + c3 := data[l - 1] + + v1 := decode_url_map[c1 - '-'] + v2 := decode_url_map[c2 - '-'] + v3 := decode_url_map[c3 - '-'] + + o1 := (v1 << 2) | ((v2 & 0x30) >> 4) + o2 := ((v2 & 0xf) << 4) | ((v3 & 0x3c) >> 2) + + out << o1 + out << o2 + } + + if data.length % 4 == 2 { + l := data.length + c1 := data[l - 2] + c2 := data[l - 1] + + v1 := decode_url_map[c1 - '-'] + v2 := decode_url_map[c2 - '-'] + + o1 := (v1 << 2) | ((v2 & 0x30) >> 4) + + out << o1 + } + + return out +} + + #local encode_map := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +#local +encode_url_map := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + // Use as: -// decode_map[ch - #char "+"]; +// decode_map[ch - '+']; #local decode_map := u8.[ 62, // + @@ -106,3 +195,23 @@ decode_map := u8.[ 46, 47, 48, 49, 50, 51, ]; +// Use as: +// decode_url_map[ch - '-']; +#local +decode_url_map := u8.[ + 62, 0, // , - . Invalid + 0, // / + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // 0-9 + 0, 0, 0, 0, 0, 0, 0, // : ; < = > ? @ Invalid + + // A-Z + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, + + 0, 0, 0, 0, 63, 0, // [ \ ] ^ _ ` + + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, +]; diff --git a/core/encoding/csv.onyx b/core/encoding/csv.onyx index 5adfdd221..713589161 100644 --- a/core/encoding/csv.onyx +++ b/core/encoding/csv.onyx @@ -25,144 +25,135 @@ use runtime.info { Type_Info_Struct } -#doc "Represents data from a CSV file of a particular type." +/// Represents data from a CSV file of a particular type. CSV :: struct (Output_Type: type_expr) { entries: [..] Output_Type; } -#doc """ - Tag-type used to tell the ingress and egress methods what - the column name of a particular data element should be. - - Data :: struct { - @CSV_Column.{"Actual Column Name"} - variable_name: str; - } -""" +/// Tag-type used to tell the ingress and egress methods what +/// the column name of a particular data element should be. +/// +/// Data :: struct { +/// @CSV_Column.{"Actual Column Name"} +/// variable_name: str; +/// } CSV_Column :: struct { name: str; } -#doc "Define methods used with the CSV structure." -#inject CSV { - #doc "Create and initialize a CSV with no elements." - make :: ($T: type_expr) => { - r := CSV(T).{}; - r.entries = make(typeof r.entries); +/// Create and initialize a CSV with no elements. +CSV.make :: ($T: type_expr) => { + r := CSV(T).{}; + r.entries = make(typeof r.entries); - return r; - } + return r; +} - #doc "Frees all data in a CSV." - delete :: (csv: &CSV) { - delete(&csv.entries); - } +/// Frees all data in a CSV. +CSV.delete :: (csv: &CSV) { + delete(&csv.entries); +} - #doc """ - Ingests data from a string representing CSV data. - Uses the type of the CSV to know what columns should be expected. - If `headers_presents` is true, the first line will be treated as - headers, and cross checked with the CSV_Column tag information. - Use this when the columns from your CSV have a different order - from the order of fields in the structure. - """ - ingress_string :: (csv: &CSV, contents: str, headers_present := true) -> bool { - reader, stream := io.reader_from_string(contents); - defer cfree(stream); - - return csv->ingress(&reader, headers_present); +/// Ingests data from a string representing CSV data. +/// Uses the type of the CSV to know what columns should be expected. +/// If `headers_presents` is true, the first line will be treated as +/// headers, and cross checked with the CSV_Column tag information. +/// Use this when the columns from your CSV have a different order +/// from the order of fields in the structure. +CSV.ingress_string :: (csv: &CSV, contents: str, headers_present := true) -> bool { + reader, stream := io.reader_from_string(contents); + defer cfree(stream); + + return csv->ingress(&reader, headers_present); +} + +/// Ingests data from a Reader containing CSV data. +/// Uses the type of the CSV to know what columns should be expectd. +CSV.ingress :: (csv: &CSV, reader: &io.Reader, headers_present := true) -> bool { + Header :: struct { + type: type_expr; + offset: i32; } - - #doc """ - Ingests data from a Reader containing CSV data. - Uses the type of the CSV to know what columns should be expectd. - """ - ingress :: (csv: &CSV, reader: &io.Reader, headers_present := true) -> bool { - Header :: struct { - type: type_expr; - offset: i32; - } - any_headers := make([..] Header); - defer delete(&any_headers); + any_headers := make([..] Header); + defer delete(&any_headers); - output_type_info: &Type_Info_Struct = ~~ get_type_info(csv.Output_Type); + output_type_info: &Type_Info_Struct = ~~ get_type_info(csv.Output_Type); - if headers_present { - header_line := reader->read_line(allocator=context.temp_allocator) - |> string.strip_trailing_whitespace(); + if headers_present { + header_line := reader->read_line(allocator=context.temp_allocator) + |> string.strip_trailing_whitespace(); - for header in string.split_iter(header_line, #char ",") { - member := array.first(output_type_info.members, [](do { - if tag := array.first(it.tags, [t](t.type == CSV_Column)); tag { - return any_as(*tag, CSV_Column).name == header; - } + for header in string.split_iter(header_line, ',') { + member := array.first(output_type_info.members, [](do { + if tag := array.first(it.tags, [t](t.type == CSV_Column)); tag { + return any_as(*tag, CSV_Column).name == header; + } - return false; - })); + return false; + })); - any_headers << ((.{member.type, member.offset}) if member else .{void, -1}); - } + any_headers << ((.{member.type, member.offset}) if member else .{void, -1}); + } - } else { - for &member in output_type_info.members { - any_headers << .{ member.type, member.offset }; - } + } else { + for &member in output_type_info.members { + any_headers << .{ member.type, member.offset }; } + } - for line in reader->lines(allocator = context.temp_allocator) { - out: csv.Output_Type; + for line in reader->lines(allocator = context.temp_allocator) { + out: csv.Output_Type; - for entry in string.split_iter(string.strip_trailing_whitespace(line), #char ",") - |> iter.enumerate() - { - header := &any_headers[entry.index]; - if header.offset == -1 do continue; + for entry in string.split_iter(string.strip_trailing_whitespace(line), ',') + |> iter.enumerate() + { + header := &any_headers[entry.index]; + if header.offset == -1 do continue; - target := cast([&] u8) &out + header.offset; + target := cast([&] u8) &out + header.offset; - if header.type == str { - *cast(&str) target = string.alloc_copy(entry.value); - } else { - conv.parse_any(target, header.type, entry.value); - } + if header.type == str { + *cast(&str) target = string.copy(entry.value); + } else { + conv.parse_any(target, header.type, entry.value); } - - csv.entries << out; } + + csv.entries << out; } - #doc """ - Outputs data from a CSV into a Writer. - When `include_headers` is true, the first line outputted will be - the headers of the CSV, according to the CSV_Column tag information. - """ - egress :: (csv: &CSV, writer: &io.Writer, include_headers := true) { - output_type_info: &Type_Info_Struct = ~~ get_type_info(csv.Output_Type); - - if include_headers { - for &member in output_type_info.members { - if !#first do io.write(writer, ","); - - if tag := array.first(member.tags, [t](t.type == CSV_Column)); tag { - io.write(writer, any_as(*tag, CSV_Column).name); - } else { - io.write(writer, member.name); - } - } + return true; +} - io.write(writer, "\n"); - } +/// Outputs data from a CSV into a Writer. +/// When `include_headers` is true, the first line outputted will be +/// the headers of the CSV, according to the CSV_Column tag information. +CSV.egress :: (csv: &CSV, writer: &io.Writer, include_headers := true) { + output_type_info: &Type_Info_Struct = ~~ get_type_info(csv.Output_Type); - for &it in csv.entries { - for &member in output_type_info.members { - if !#first do io.write(writer, ","); + if include_headers { + for &member in output_type_info.members { + if !#first do io.write(writer, ","); - io.write_format_va(writer, "{}", .[ .{cast([&] u8) it + member.offset, member.type} ]); + if tag := array.first(member.tags, [t](t.type == CSV_Column)); tag { + io.write(writer, any_as(*tag, CSV_Column).name); + } else { + io.write(writer, member.name); } + } + + io.write(writer, "\n"); + } + + for &it in csv.entries { + for &member in output_type_info.members { + if !#first do io.write(writer, ","); - io.write(writer, "\n"); + io.write_format_va(writer, "{}", .[ .{cast([&] u8) it + member.offset, member.type} ]); } + + io.write(writer, "\n"); } } diff --git a/core/encoding/hex.onyx b/core/encoding/hex.onyx index 0804e1047..26dc46b53 100644 --- a/core/encoding/hex.onyx +++ b/core/encoding/hex.onyx @@ -24,10 +24,10 @@ decode :: (s: str, allocator := context.allocator) -> str { digit_to_value :: (it: u8) -> u32 { return ~~ switch it { - case #char "0" .. #char "9" => it - #char "0"; - case #char "a" .. #char "f" => it - #char "a" + 10; - case #char "A" .. #char "F" => it - #char "A" + 10; - case #default => 0; + case '0' ..= '9' => it - '0'; + case 'a' ..= 'f' => it - 'a' + 10; + case 'A' ..= 'F' => it - 'A' + 10; + case _ => 0; }; } -} \ No newline at end of file +} diff --git a/core/encoding/ini.onyx b/core/encoding/ini.onyx index 937eb57c8..91ee11262 100644 --- a/core/encoding/ini.onyx +++ b/core/encoding/ini.onyx @@ -100,10 +100,10 @@ parse_ini_file_inner :: (r: &io.Reader, output_ptr: any) -> (IniParseResult, Ini while !(r->is_empty()) { defer r->skip_whitespace(); - if b, e := r->peek_byte(); b == #char "[" { - assert(r->read_byte() == #char "[", "expected ["); - section_name := r->read_until(#char "]", allocator=context.temp_allocator); - assert(r->read_byte() == #char "]", "expected ]"); + if b, e := r->peek_byte(); b == '[' { + assert(r->read_byte() == '[', "expected ["); + section_name := r->read_until(']', allocator=context.temp_allocator); + assert(r->read_byte() == ']', "expected ]"); stripped_section_name := string.strip_whitespace(section_name); member := info.get_struct_member(output.type, stripped_section_name); @@ -123,7 +123,7 @@ parse_ini_file_inner :: (r: &io.Reader, output_ptr: any) -> (IniParseResult, Ini error(msg = aprintf("Failed parsing.")); } - r->read_until(#char "[", inplace=true); + r->read_until('[', inplace=true); } continue; @@ -135,12 +135,12 @@ parse_ini_file_inner :: (r: &io.Reader, output_ptr: any) -> (IniParseResult, Ini defer next_line(); - field_name := r->read_until(#char "=", allocator=context.temp_allocator); - assert(r->read_byte() == #char "=", "expected ="); + field_name := r->read_until('=', allocator=context.temp_allocator); + assert(r->read_byte() == '=', "expected ="); field := info.get_struct_member(active_item_type, string.strip_whitespace(field_name)); target := cast([&] u8) active_item_ptr + field.offset; - value_string := r->read_until(#char "\n", allocator=context.temp_allocator); + value_string := r->read_until('\n', allocator=context.temp_allocator); parsed_successfully := conv.parse_any(target, field.type, value_string); if !parsed_successfully { @@ -150,7 +150,7 @@ parse_ini_file_inner :: (r: &io.Reader, output_ptr: any) -> (IniParseResult, Ini if field.type == str { *cast(&str) target = value_string |> string.strip_whitespace() - |> string.alloc_copy(); + |> string.copy(); } else { error(aprintf("Failed to parse value of type '{}' from string '{}'.", field.type, value_string)); diff --git a/core/encoding/json/decoder.onyx b/core/encoding/json/decoder.onyx index fd2f7b279..4dca2a667 100644 --- a/core/encoding/json/decoder.onyx +++ b/core/encoding/json/decoder.onyx @@ -3,12 +3,10 @@ package core.encoding.json use core {*} -#doc """ - Unsafely decodes a strings into a json object, returning an invalid - Json value if it failed to parse. - - This procedure is not very useful and should be considered deprecated. -""" +/// Unsafely decodes a strings into a json object, returning an invalid +/// Json value if it failed to parse. +/// +/// This procedure is not very useful and should be considered deprecated. decode :: (data: str, allocator := context.allocator, print_errors := true) -> Json { json: Json; json.allocator = allocator; @@ -38,18 +36,14 @@ _Decode_Error :: struct { Decode_Error :: #distinct ^_Decode_Error; -#inject Decode_Error { - has_error :: (this: Decode_Error) => cast(^_Decode_Error) this != null; - message :: (this: Decode_Error) => (cast(^_Decode_Error) this).errmsg; - position :: (this: Decode_Error) => (cast(^_Decode_Error) this).pos; -} - -#doc """ - Decodes a string into a Json object, and returns the Json object and - and a `Decode_Error` that is non-null if an error occured. +Decode_Error.has_error :: (this: Decode_Error) => cast(^_Decode_Error) this != null; +Decode_Error.message :: (this: Decode_Error) => (cast(^_Decode_Error) this).errmsg; +Decode_Error.position :: (this: Decode_Error) => (cast(^_Decode_Error) this).pos; - This procedure should be considered deprecated in favor of `decode_with_result`. -""" +/// Decodes a string into a Json object, and returns the Json object and +/// and a `Decode_Error` that is non-null if an error occured. +/// +/// This procedure should be considered deprecated in favor of `decode_with_result`. decode_with_error :: (data: str, allocator := context.allocator) -> (Json, Decode_Error) { json: Json; json.allocator = allocator; @@ -74,9 +68,7 @@ decode_with_error :: (data: str, allocator := context.allocator) -> (Json, Decod return json, Decode_Error.{null}; } -#doc """ - Decodes a string into a possible Json object. If parsing fails, an error is returned instead. -""" +/// Decodes a string into a possible Json object. If parsing fails, an error is returned instead. decode_with_result :: (data: str, allocator := context.allocator) -> Result(Json, Error) { root, err := parse(data, allocator); if err.kind != .None { @@ -88,11 +80,9 @@ decode_with_result :: (data: str, allocator := context.allocator) -> Result(Json }; } -#doc """ - Decodes a string into any Onyx type. - - Internally uses `decode_with_result` and `as_any`. -""" +/// Decodes a string into any Onyx type. +/// +/// Internally uses `decode_with_result` and `as_any`. decode_into :: (data: str, out: &$T) -> Error { obj := decode_with_result(data)->catch([err] { return return err; diff --git a/core/encoding/json/encoder.onyx b/core/encoding/json/encoder.onyx index df7630ff6..86ecba868 100644 --- a/core/encoding/json/encoder.onyx +++ b/core/encoding/json/encoder.onyx @@ -21,7 +21,7 @@ encode_string :: (v: $T, allocator := context.allocator) -> (str, Encoding_Error return "", err; } - s := string.alloc_copy(io.buffer_stream_to_str(^stream), allocator); + s := string.copy(io.buffer_stream_to_str(^stream), allocator); return s, .None; } @@ -66,48 +66,48 @@ encode :: #match { }, (w: ^io.Writer, v: [] $T) -> Encoding_Error { - io.write_byte(w, #char "["); + io.write_byte(w, '['); for i in v.count { - if i > 0 do io.write_byte(w, #char ","); + if i > 0 do io.write_byte(w, ','); err := encode(w, v[i]); if err != .None do return err; } - io.write_byte(w, #char "]"); + io.write_byte(w, ']'); return .None; }, (w: ^io.Writer, v: [..] $T) -> Encoding_Error { - io.write_byte(w, #char "["); + io.write_byte(w, '['); for i in v.count { - if i > 0 do io.write_byte(w, #char ","); + if i > 0 do io.write_byte(w, ','); err := encode(w, v[i]); if err != .None do return err; } - io.write_byte(w, #char "]"); + io.write_byte(w, ']'); return .None; }, (w: ^io.Writer, v: Map(str, $T)) -> Encoding_Error { - io.write_byte(w, #char "{"); + io.write_byte(w, '{'); for i in v.entries.count { - if i > 0 do io.write_byte(w, #char ","); + if i > 0 do io.write_byte(w, ','); entry := ^v.entries[i]; io.write_escaped_str(w, entry.key); - io.write_byte(w, #char ":"); + io.write_byte(w, ':'); err := encode(w, entry.value); if err != .None do return err; } - io.write_byte(w, #char "}"); + io.write_byte(w, '}'); return .None; }, @@ -121,23 +121,23 @@ encode :: #match { case .Array do encode(w, v->as_array()); case .Object { - io.write_byte(w, #char "{"); + io.write_byte(w, '{'); obj := cast(^_Value_Object) cast(^_Value) v; for i in obj.object_.count { - if i > 0 do io.write_byte(w, #char ","); + if i > 0 do io.write_byte(w, ','); io.write_escaped_str(w, obj.object_[i].key); - io.write_byte(w, #char ":"); + io.write_byte(w, ':'); err := encode(w, obj.object_[i].value); if err != .None do return err; } - io.write_byte(w, #char "}"); + io.write_byte(w, '}'); } - case #default { + case _ { return .Unsupported_Type; } } @@ -252,10 +252,12 @@ encode :: (w: ^io.Writer, data: any) -> Encoding_Error { } } - case #default { + case _ { return .Unsupported_Type; } } + + return .None; } @@ -290,6 +292,13 @@ from_any :: macro (v: ^$T, allocator := context.allocator) -> Value { return from_any(T, v, allocator); } +#overload +from_any :: macro (v: $T, allocator := context.allocator) -> Value { + from_any :: from_any + x := v + return from_any(T, &x, allocator) +} + #overload from_any :: (type: type_expr, input: rawptr, allocator := context.allocator) -> Value { use runtime.info; @@ -323,7 +332,7 @@ from_any :: (type: type_expr, input: rawptr, allocator := context.allocator) -> // Strings are handled differently if type == str { v := new(_Value_String, allocator); - v.str_ = string.alloc_copy(*cast(^str) input, allocator); + v.str_ = string.copy(*cast(^str) input, allocator); return Value.{v}; } @@ -448,7 +457,7 @@ as_any :: (value: Value, type: type_expr, out: rawptr) { case .Slice { // Strings are handled differently if type == str { - *cast(^str) out = string.alloc_copy(value->as_str()); + *cast(^str) out = string.copy(value->as_str()); return; } diff --git a/core/encoding/json/parser.onyx b/core/encoding/json/parser.onyx index b3ea5fe04..1feaef814 100644 --- a/core/encoding/json/parser.onyx +++ b/core/encoding/json/parser.onyx @@ -113,7 +113,7 @@ parse_value :: (use parser: ^Parser) -> (Value, Error) { return_value = cast(^_Value) value; } - case #default { + case _ { consume_token(parser); return Value.{return_value}, .{ .Unexpected_Token, current.position }; } @@ -172,7 +172,7 @@ parse_object :: (use parser: ^Parser) -> (Value, Error) { if err.kind != .None do return Value.{value}, err; // This uses the context allocators because the array resizing needs to happen in a general purpose heap allocator - array.init(^value.object_, allocator=context.allocator); + array.init(^value.object_, allocator=allocator); defer if err.kind != .None { free(Value.{value}, allocator); } @@ -241,7 +241,7 @@ unescape_string :: (token: Token, allocator: Allocator) -> str { i := 0; for c in s { - if c == #char "\\" || c == #char "\"" || c < #char " " { + if c == '\\' || c == '"' || c < ' ' { break; } @@ -249,65 +249,65 @@ unescape_string :: (token: Token, allocator: Allocator) -> str { } if i == s.count { - return string.alloc_copy(s, allocator); + return string.copy(s, allocator); } buffer := memory.make_slice(u8, s.count, allocator=allocator); - string.copy(s.data[0 .. i], buffer); + string.copy_into(s.data[0 .. i], buffer); buffer_write := i; while i < s.count { c := s[i]; switch c { - case #char "\\" { + case '\\' { i += 1; if i >= s.count do break break; switch s[i] { - case #char "\"", #char "\\", #char "/" { + case '"', '\\', '/' { buffer[buffer_write] = s[i]; i += 1; buffer_write += 1; } - case #char "n" { - buffer[buffer_write] = #char "\n"; + case 'n' { + buffer[buffer_write] = '\n'; i += 1; buffer_write += 1; } - case #char "t" { - buffer[buffer_write] = #char "\t"; + case 't' { + buffer[buffer_write] = '\t'; i += 1; buffer_write += 1; } - case #char "r" { - buffer[buffer_write] = #char "\r"; + case 'r' { + buffer[buffer_write] = '\r'; i += 1; buffer_write += 1; } - case #char "b" { - buffer[buffer_write] = #char "\b"; + case 'b' { + buffer[buffer_write] = '\b'; i += 1; buffer_write += 1; } - case #char "f" { - buffer[buffer_write] = #char "\f"; + case 'f' { + buffer[buffer_write] = '\f'; i += 1; buffer_write += 1; } - case #char "v" { - buffer[buffer_write] = #char "\v"; + case 'v' { + buffer[buffer_write] = '\v'; i += 1; buffer_write += 1; } - case #char "u" { + case 'u' { i += 1; wrote, consumed := parse_and_write_utf8_character(s[i..s.length], ~~&buffer[buffer_write]); buffer_write += wrote; @@ -316,7 +316,7 @@ unescape_string :: (token: Token, allocator: Allocator) -> str { } } - case #default { + case _ { buffer[buffer_write] = c; i += 1; buffer_write += 1; diff --git a/core/encoding/json/tokenizer.onyx b/core/encoding/json/tokenizer.onyx index 3e6813822..f904ffb02 100644 --- a/core/encoding/json/tokenizer.onyx +++ b/core/encoding/json/tokenizer.onyx @@ -59,14 +59,14 @@ token_get :: (use tkn: ^Tokenizer) -> (Token, Error) { if !has_next do return .{}, .{ .EOF, token.position }; switch curr_char { - case #char "{" do token.kind = .Open_Brace; - case #char "}" do token.kind = .Close_Brace; - case #char "[" do token.kind = .Open_Bracket; - case #char "]" do token.kind = .Close_Bracket; - case #char "," do token.kind = .Comma; - case #char ":" do token.kind = .Colon; - - case #char "a" .. #char "z" { + case '{' do token.kind = .Open_Brace; + case '}' do token.kind = .Close_Brace; + case '[' do token.kind = .Open_Bracket; + case ']' do token.kind = .Close_Bracket; + case ',' do token.kind = .Comma; + case ':' do token.kind = .Colon; + + case 'a' ..= 'z' { token.kind = .Invalid; skip_alpha_numeric(tkn); @@ -76,10 +76,10 @@ token_get :: (use tkn: ^Tokenizer) -> (Token, Error) { if identifier == "false" do token.kind = .False; } - case #char "-" { + case '-' { switch data[offset] { - case #char "0" .. #char "9" --- - case #default { + case '0' ..= '9' --- + case _ { err.kind = .Illegal_Character; err.pos = token.position; break break; @@ -89,42 +89,42 @@ token_get :: (use tkn: ^Tokenizer) -> (Token, Error) { fallthrough; } - case #char "0" .. #char "9" { + case '0' ..= '9' { token.kind = .Integer; skip_numeric(tkn); - if data[offset] == #char "." { + if data[offset] == '.' { token.kind = .Float; next_character(tkn); skip_numeric(tkn); } - if data[offset] == #char "e" || data[offset] == #char "E" { + if data[offset] == 'e' || data[offset] == 'E' { next_character(tkn); - if data[offset] == #char "-" || data[offset] == #char "+" { + if data[offset] == '-' || data[offset] == '+' { next_character(tkn); } skip_numeric(tkn); } } - case #char "\"" { + case '"' { token.kind = .String; while offset < data.count { ch := data[offset]; - if ch == #char "\n" { + if ch == '\n' { err.kind = .String_Unterminated; err.pos = token.position; break break; } next_character(tkn); - if ch == #char "\"" { + if ch == '"' { break; } - if ch == #char "\\" { + if ch == '\\' { skip_escape(tkn); } } @@ -153,17 +153,17 @@ next_character :: (use tkn: ^Tokenizer) -> (u8, bool) { skip_whitespace :: (use tkn: ^Tokenizer) { while offset < data.count { switch data[offset] { - case #char "\t", #char " ", #char "\r", #char "\v" { + case '\t', ' ', '\r', '\v' { next_character(tkn); } - case #char "\n" { + case '\n' { line += 1; column = 1; offset += 1; } - case #default { + case _ { break break; } } @@ -174,7 +174,7 @@ skip_whitespace :: (use tkn: ^Tokenizer) { skip_alpha_numeric :: (use tkn: ^Tokenizer) { while offset < data.count { switch data[offset] { - case #char "A" .. #char "Z", #char "a" .. #char "z", #char "0" .. #char "9", #char "_" { + case 'A' ..= 'Z', 'a' ..= 'z', '0' ..= '9', '_' { next_character(tkn); continue; } @@ -188,7 +188,7 @@ skip_alpha_numeric :: (use tkn: ^Tokenizer) { skip_numeric :: (use tkn: ^Tokenizer) { while offset < data.count { switch data[offset] { - case #char "0" .. #char "9" { + case '0' ..= '9' { next_character(tkn); continue; } @@ -201,20 +201,20 @@ skip_numeric :: (use tkn: ^Tokenizer) { #local skip_escape :: (use tkn: ^Tokenizer) { switch data[offset] { - case #char "u" { + case 'u' { for i in 4 { ch, _ := next_character(tkn); switch ch { - case #char "0" .. #char "9", - #char "A" .. #char "F", - #char "a" .. #char "f" --- + case '0' ..= '9', + 'A' ..= 'F', + 'a' ..= 'f' --- - case #default do return; + case _ do return; } } } - case #default { + case _ { next_character(tkn); } } diff --git a/core/encoding/json/types.onyx b/core/encoding/json/types.onyx index bc5d47c8c..b1c3ef96d 100644 --- a/core/encoding/json/types.onyx +++ b/core/encoding/json/types.onyx @@ -43,98 +43,103 @@ Error :: struct { Value :: #distinct ^_Value -#inject Value { - type :: (v: Value) -> Value_Type { - if cast(rawptr) v == null do return .Null; - return (cast(^_Value) v).type; - } +Value.type :: (v: Value) -> Value_Type { + if cast(rawptr) v == null do return .Null; + return (cast(^_Value) v).type; +} - as_bool :: (v: Value) -> bool { - if cast(rawptr) v == null do return false; +Value.as_bool :: (v: Value) -> bool { + if cast(rawptr) v == null do return false; - if (cast(^_Value) v).type == .Bool do return (cast(^_Value_Bool) cast(^_Value) v).bool_; - return false; - } + if (cast(^_Value) v).type == .Bool do return (cast(^_Value_Bool) cast(^_Value) v).bool_; + return false; +} - as_str :: (v: Value) -> str { - if cast(rawptr) v == null do return null_str; +Value.as_str :: (v: Value) -> str { + if cast(rawptr) v == null do return null_str; - if (cast(^_Value) v).type == .String do return (cast(^_Value_String) cast(^_Value) v).str_; - return ""; - } + if (cast(^_Value) v).type == .String do return (cast(^_Value_String) cast(^_Value) v).str_; + return ""; +} - as_int :: (v: Value) -> i64 { - if cast(rawptr) v == null do return 0; +Value.as_int :: (v: Value) -> i64 { + if cast(rawptr) v == null do return 0; - if (cast(^_Value) v).type == .Integer do return (cast(^_Value_Integer) cast(^_Value) v).int_; - if (cast(^_Value) v).type == .Float do return ~~ (cast(^_Value_Float) cast(^_Value) v).float_; - return 0; - } + if (cast(^_Value) v).type == .Integer do return (cast(^_Value_Integer) cast(^_Value) v).int_; + if (cast(^_Value) v).type == .Float do return ~~ (cast(^_Value_Float) cast(^_Value) v).float_; + return 0; +} - as_float :: (v: Value) -> f64 { - if cast(rawptr) v == null do return 0; +Value.as_float :: (v: Value) -> f64 { + if cast(rawptr) v == null do return 0; - if (cast(^_Value) v).type == .Float do return (cast(^_Value_Float) cast(^_Value) v).float_; - if (cast(^_Value) v).type == .Integer do return ~~ (cast(^_Value_Integer) cast(^_Value) v).int_; - return 0; - } + if (cast(^_Value) v).type == .Float do return (cast(^_Value_Float) cast(^_Value) v).float_; + if (cast(^_Value) v).type == .Integer do return ~~ (cast(^_Value_Integer) cast(^_Value) v).int_; + return 0; +} + +Value.as_array :: (v: Value) -> [..] Value { + if cast(rawptr) v == null do return .{}; + if (cast(^_Value) v).type != .Array do return .{}; - as_array :: (v: Value) -> [..] Value { - if cast(rawptr) v == null do return .{}; - if (cast(^_Value) v).type != .Array do return .{}; + return (cast(^_Value_Array) cast(^_Value) v).array_; +} + +Value.as_map :: (v: Value, allocator := context.allocator) -> Map(str, Value) { + if cast(rawptr) v == null do return .{}; + if (cast(^_Value) v).type != .Object do return .{}; - return (cast(^_Value_Array) cast(^_Value) v).array_; + m := make(Map(str, Value), allocator) + for ^(cast(^_Value_Object, cast(^_Value, v))).object_ { + m->put(it.key, it.value); } - as_map :: (v: Value) -> Map(str, Value) { - if cast(rawptr) v == null do return .{}; - if (cast(^_Value) v).type != .Object do return .{}; + return m; +} - m: Map(str, Value); - for ^(cast(^_Value_Object, cast(^_Value, v))).object_ { - m->put(it.key, it.value); - } +Value.as_entry_array :: (v: Value) -> [] _Value_Object_Entry { + if cast(rawptr) v == null do return .{}; + if (cast(^_Value) v).type != .Object do return .{}; - return m; - } + return cast(&_Value_Object, cast(&_Value, v)).object_ +} - as_array_iter :: (v: Value) -> Iterator(Value) { - if cast(rawptr) v == null do return iter.empty(Value); - if (cast(^_Value) v).type != .Array do return iter.empty(Value); - - return iter.generator( - &.{ arr = cast(&_Value_Array, cast(&_Value, v)).array_, index = 0 }, - ctx => { - if ctx.index < ctx.arr.count { - defer ctx.index += 1; - return ctx.arr[ctx.index], true; - } - return .{}, false; +Value.as_array_iter :: (v: Value) -> Iterator(Value) { + if cast(rawptr) v == null do return iter.empty(Value); + if (cast(^_Value) v).type != .Array do return iter.empty(Value); + + return iter.generator( + &.{ arr = cast(&_Value_Array, cast(&_Value, v)).array_, index = 0 }, + (ctx: $C) -> ? Value { + if ctx.index < ctx.arr.count { + defer ctx.index += 1; + return ctx.arr[ctx.index]; } - ); - } + return .None; + } + ); +} - as_map_iter :: (v: Value) -> Iterator(Pair(str, Value)) { - if cast(rawptr) v == null do return iter.empty(Pair(str, Value)); - if (cast(^_Value) v).type != .Object do return iter.empty(Pair(str, Value)); - - return iter.generator( - &.{ obj = cast(&_Value_Object, cast(&_Value, v)).object_, index = 0 }, - ctx => { - if ctx.index < ctx.obj.count { - defer ctx.index += 1; - v := &ctx.obj[ctx.index]; - return Pair.make(v.key, v.value), true; - } - return .{}, false; +Value.as_map_iter :: (v: Value) -> Iterator(Pair(str, Value)) { + if cast(rawptr) v == null do return iter.empty(Pair(str, Value)); + if (cast(^_Value) v).type != .Object do return iter.empty(Pair(str, Value)); + + return iter.generator( + &.{ obj = cast(&_Value_Object, cast(&_Value, v)).object_, index = 0 }, + (ctx: $C) -> ? Pair(str, Value) { + if ctx.index < ctx.obj.count { + defer ctx.index += 1; + v := &ctx.obj[ctx.index]; + return Pair.make(v.key, v.value); } - ); - } + return .None; + } + ); +} - is_null :: (v: Value) -> bool { - if cast(rawptr) v == null do return true; - return cast(^_Value) v == ^_null_value || (cast(^_Value) v).type == .Null; - } +Value.is_null :: (v: Value) -> bool { + if cast(rawptr) v == null do return true; + return cast(^_Value) v == ^_null_value || (cast(^_Value) v).type == .Null; } Value_Type :: enum { @@ -186,15 +191,18 @@ _Value_Array :: struct { array_: [..] Value; } +#local +_Value_Object_Entry :: struct { + key : str; + dont_free_key := false; + + value : Value; +} + #package _Value_Object :: struct { use base := _Value.{ type = .Object }; - object_: [..] struct { - key : str; - dont_free_key := false; - - value : Value; - }; + object_: [..] _Value_Object_Entry; } #operator [] get @@ -236,7 +244,7 @@ set :: #match { v_ := cast(^_Value) v; if v_.type == .Object { - k := key if dont_copy_key else string.alloc_copy(key); + k := key if dont_copy_key else string.copy(key); (cast(^_Value_Object) v_).object_ << .{ k, dont_copy_key, value }; } @@ -254,8 +262,8 @@ set :: #match { v_ := cast(^_Value) v; if v_.type != .Object do return 0; - k := key if dont_copy_key else string.alloc_copy(key); - val := value if dont_copy_value else string.alloc_copy(value); + k := key if dont_copy_key else string.copy(key); + val := value if dont_copy_value else string.copy(value); json_value := _Value_String.{ str_ = val, @@ -309,6 +317,32 @@ free :: (use j: Json) { free(root, allocator); } +/// Frees a single Value, non-recursively. +free_value :: (v: Value, allocator: Allocator) { + switch v_ := cast(&_Value) v; v_.type { + case .String { + v_str := cast(&_Value_String) v_ + if !v_str.str_.data do return + + if !v_str.dont_free { + raw_free(allocator, v_str.str_.data) + } + } + + case .Array { + v_arr := cast(&_Value_Array) v_ + Array.free(&v_arr.array_) + } + + case .Object { + v_obj := cast(&_Value_Object) v_ + Array.free(&v_obj.object_) + } + } + + raw_free(allocator, cast(&_Value) v) +} + static_string :: (s: str) -> _Value_String { return .{ str_ = s, dont_free = true }; } diff --git a/core/encoding/kdl/encoder.onyx b/core/encoding/kdl/encoder.onyx index b52de03ed..14f6378b9 100644 --- a/core/encoding/kdl/encoder.onyx +++ b/core/encoding/kdl/encoder.onyx @@ -20,7 +20,7 @@ write_node :: (n: &Node, w: &io.Writer, indentation := 0) { io.write_format(w, "({}) ", ta); }); - if contains(n.node, "//") do io.write_format(w, "{\"}", n.node); + if contains(n.node, "/") do io.write_format(w, "{\"}", n.node); else do io.write(w, n.node); io.write(w, " "); diff --git a/core/encoding/kdl/kdl.onyx b/core/encoding/kdl/kdl.onyx index 99d2ed9ef..c6e9b0187 100644 --- a/core/encoding/kdl/kdl.onyx +++ b/core/encoding/kdl/kdl.onyx @@ -45,9 +45,7 @@ KDL_Number :: union { String: str; } -#doc """ - Creates a new KDL document, using the allocator provided. -""" +/// Creates a new KDL document, using the allocator provided. new_doc :: (allocator := context.allocator) -> Document { doc: Document; doc.allocator = allocator; @@ -56,11 +54,9 @@ new_doc :: (allocator := context.allocator) -> Document { } -#doc """ - Parses a string or `io.Reader` into a KDL document, using the allocator provided for internal allocations. - - Call `core.encoding.kdl.free` to free the returned document. -""" +/// Parses a string or `io.Reader` into a KDL document, using the allocator provided for internal allocations. +/// +/// Call `core.encoding.kdl.free` to free the returned document. parse :: #match #local -> Result(Document, Parse_Error) {} #overload @@ -96,9 +92,7 @@ parse :: (r: &io.Reader, allocator := context.allocator) -> Result(Document, Par #overload builtin.delete :: free -#doc """ - Releases all resources allocated for the document. -""" +/// Releases all resources allocated for the document. free :: (d: Document) { for d.nodes do free_node(d.allocator, it); delete(&d.nodes); @@ -135,10 +129,10 @@ free_value :: (al: Allocator, v: &Value) { string.free(s, al); } - case #default --- + case _ --- } - case #default --- + case _ --- } } diff --git a/core/encoding/kdl/kql.onyx b/core/encoding/kdl/kql.onyx index 4b07d9bb5..e09d02bf0 100644 --- a/core/encoding/kdl/kql.onyx +++ b/core/encoding/kdl/kql.onyx @@ -3,15 +3,11 @@ package core.encoding.kdl use core {iter, alloc, array, string} -#inject Document { - query :: query_doc - query_all :: query_doc_all -} +Document.query :: query_doc +Document.query_all :: query_doc_all -#inject Node { - query :: query_node - query_all :: query_node_all -} +Node.query :: query_node +Node.query_all :: query_node_all query :: #match #local { query_doc, query_node @@ -23,11 +19,10 @@ query_all :: #match #local { query_doc :: (d: &Document, query: str) -> ? &Node { query_iter := query_doc_all(d, query); - node, cont := iter.next(query_iter); + node := iter.next(query_iter); iter.close(query_iter); - if cont do return node; - return .{}; + return node; } query_doc_all :: (d: &Document, query: str) -> Iterator(&Node) { @@ -45,11 +40,10 @@ query_doc_all :: (d: &Document, query: str) -> Iterator(&Node) { query_node :: (n: &Node, query: str) -> ? &Node { query_iter := query_node_all(n, query); - node, cont := iter.next(query_iter); + node := iter.next(query_iter); iter.close(query_iter); - if cont do return node; - return .{}; + return node; } query_node_all :: (n: &Node, query: str) -> Iterator(&Node) { @@ -79,7 +73,6 @@ QueryIterator :: struct { current_selector := 0; } -#inject QueryIterator.make :: ( d: &Document, q: Query, @@ -98,7 +91,7 @@ QueryStack :: struct { } #local -query_next :: (ctx: &QueryIterator) -> (&Node, bool) { +query_next :: (ctx: &QueryIterator) -> ? &Node { while true { if !ctx.stack { if ctx.d { @@ -124,12 +117,12 @@ query_next :: (ctx: &QueryIterator) -> (&Node, bool) { defer array.pop(&ctx.stack); for ctx.q.matches_any { if query_selector_matches(it, ctx.stack) { - return last_query.node, true; + return last_query.node; } } } - return .{}, false; + return .None; } #local @@ -200,7 +193,7 @@ query_selector_matches :: (s: &Selector, trail: [] QueryStack) -> bool { #local query_matcher_matches :: (s: &Matcher, node: &Node) => iter.as_iter(s.details) - |> iter.every((d, [node]) => { + |> iter.every((d) use (node) => { if d.accessor.Scope do return false; if d.accessor.Node { @@ -252,7 +245,7 @@ operate_on_values :: (v1, v2: Value, op: AttributeOp) -> bool { case .EndsWith => string.ends_with(left, right); case .Contains => string.contains(left, right); - case #default => false; + case _ => false; }; } @@ -376,7 +369,7 @@ parse_selector :: (p: &QueryParser) -> &Selector { case '>' => SelectorOp.Child; case '+' => .Neighbor; case '~' => .Sibling; - case #default => .Descendant; + case _ => .Descendant; }; if segment.op->unwrap() != .Descendant { @@ -533,7 +526,6 @@ skip_whitespace :: (p: &QueryParser) { #local reached_end :: macro (p: &QueryParser) => p.cursor >= p.query.length; -#inject QueryParser.rem :: (p: &QueryParser) => p.query[p.cursor .. p.query.length]; #local diff --git a/core/encoding/kdl/parser.onyx b/core/encoding/kdl/parser.onyx index f131ed8b3..f1c0124d9 100644 --- a/core/encoding/kdl/parser.onyx +++ b/core/encoding/kdl/parser.onyx @@ -46,184 +46,182 @@ Token :: union { EOF: void; } -#inject Tokenizer { - make :: #match { - ((r: &io.Reader) => #Self.{ doc = r->read_all(), doc_is_owned = true }), - ((s: str) => #Self.{ doc = s }), - } +Tokenizer.make :: #match { + ((r: &io.Reader) => #Self.{ doc = r->read_all(), doc_is_owned = true }), + ((s: str) => #Self.{ doc = s }), +} - destroy :: (self: &#Self) { - if self.doc_is_owned { - delete(&self.doc); - } +Tokenizer.destroy :: (self: &#Self) { + if self.doc_is_owned { + delete(&self.doc); } +} - peek_char :: (self: &#Self) -> ? u32 { - if self.cursor >= self.doc.length do return .None; +Tokenizer.peek_char :: (self: &#Self) -> ? u32 { + if self.cursor >= self.doc.length do return .None; - codepoint_length := utf8.rune_length_from_first_byte(self.doc[self.cursor]); - if self.cursor + codepoint_length > self.doc.length do return .None; - - value := utf8.decode_rune(string.advance(self.doc, self.cursor)); - return value; - } + codepoint_length := utf8.rune_length_from_first_byte(self.doc[self.cursor]); + if self.cursor + codepoint_length > self.doc.length do return .None; + + value := utf8.decode_rune(string.advance(self.doc, self.cursor)); + return value; +} - eat_char :: (self: &#Self) -> ? u32 { - if self.cursor >= self.doc.length do return .None; +Tokenizer.eat_char :: (self: &#Self) -> ? u32 { + if self.cursor >= self.doc.length do return .None; - codepoint_length := utf8.rune_length_from_first_byte(self.doc[self.cursor]); - if self.cursor + codepoint_length > self.doc.length do return .None; - - value := utf8.decode_rune(string.advance(self.doc, self.cursor)); - self.cursor += codepoint_length; + codepoint_length := utf8.rune_length_from_first_byte(self.doc[self.cursor]); + if self.cursor + codepoint_length > self.doc.length do return .None; + + value := utf8.decode_rune(string.advance(self.doc, self.cursor)); + self.cursor += codepoint_length; - return value; - } + return value; +} - peek_token :: (use self: &#Self) -> Token { - if peeked_token do return peeked_token->unwrap(); - - // :CompilerBug - // There is a weird bug related to an optimization happening here. - // I would like the following code to just be: - // - // peeked_token = self->next_token(); - // - // But sadly, this does not work. This is because the next_token return value - // is being upcasted to an optional token. The tag for the optional saying - // that it is a "some" not a "none" is emitted first, then the actual call. - // The problem is that when assigning a structure literal (which is what this - // is internally implemented as), the assignment is done in parts (1st member - // emit and store, 2nd member emit and store, etc.). Well, the first member - // says that the result is a Some, so at this point peeked_token is a Some - // of invalid data. Then in next_token, this is consumed and returned as - // a valid token, even though it is not. - // - new_token := self->next_token(); - peeked_token = new_token; - - return peeked_token?; - } +Tokenizer.peek_token :: (use self: &#Self) -> Token { + if peeked_token do return peeked_token->unwrap(); + + // :CompilerBug + // There is a weird bug related to an optimization happening here. + // I would like the following code to just be: + // + // peeked_token = self->next_token(); + // + // But sadly, this does not work. This is because the next_token return value + // is being upcasted to an optional token. The tag for the optional saying + // that it is a "some" not a "none" is emitted first, then the actual call. + // The problem is that when assigning a structure literal (which is what this + // is internally implemented as), the assignment is done in parts (1st member + // emit and store, 2nd member emit and store, etc.). Well, the first member + // says that the result is a Some, so at this point peeked_token is a Some + // of invalid data. Then in next_token, this is consumed and returned as + // a valid token, even though it is not. + // + new_token := self->next_token(); + peeked_token = new_token; + + return peeked_token?; +} - next_token :: (self: &#Self) -> Token { - if self.peeked_token { - tkn := self.peeked_token->unwrap(); - self.peeked_token->reset(); - return tkn; - } +Tokenizer.next_token :: (self: &#Self) -> Token { + if self.peeked_token { + tkn := self.peeked_token->unwrap(); + self.peeked_token->reset(); + return tkn; + } - c := self->peek_char()->or_return(Token.{ EOF = .{} }); + c := self->peek_char()->or_return(Token.{ EOF = .{} }); - if is_whitespace(c) { - self->consume_while([c](is_whitespace(c))); - return self->next_token(); - } + if is_whitespace(c) { + self->consume_while([c](is_whitespace(c))); + return self->next_token(); + } - if c == '/' { - comment := self->handle_comment(); - if comment.Slashdash do return comment; - return self->next_token(); - } + if c == '/' { + comment := self->handle_comment(); + if comment.Slashdash do return comment; + return self->next_token(); + } - if is_newline(c) { + if is_newline(c) { + self->eat_char(); + if c == '\r' { + // Consume one more character for CRLF. self->eat_char(); - if c == '\r' { - // Consume one more character for CRLF. - self->eat_char(); - } - - return .{ Newline = .{} }; } - if c == ';' { self->eat_char(); return .{ Semicolon = .{} }; } - if c == '\\' { self->eat_char(); return .{ Line_Continuation = .{} }; } - if c == '(' { self->eat_char(); return .{ Start_Type = .{} }; } - if c == ')' { self->eat_char(); return .{ End_Type = .{} }; } - if c == '{' { self->eat_char(); return .{ Start_Children = .{} }; } - if c == '}' { self->eat_char(); return .{ End_Children = .{} }; } - if c == '=' { self->eat_char(); return .{ Equals = .{} }; } - if c == '"' { - return self->handle_string(); - } - if is_id(c) { - return self->handle_word(); - } + return .{ Newline = .{} }; + } - return .{ Error = .{} }; + if c == ';' { self->eat_char(); return .{ Semicolon = .{} }; } + if c == '\\' { self->eat_char(); return .{ Line_Continuation = .{} }; } + if c == '(' { self->eat_char(); return .{ Start_Type = .{} }; } + if c == ')' { self->eat_char(); return .{ End_Type = .{} }; } + if c == '{' { self->eat_char(); return .{ Start_Children = .{} }; } + if c == '}' { self->eat_char(); return .{ End_Children = .{} }; } + if c == '=' { self->eat_char(); return .{ Equals = .{} }; } + if c == '"' { + return self->handle_string(); + } + if is_id(c) { + return self->handle_word(); } - consume_while :: macro (self: &#Self, cond: Code) -> str { - res := self.doc[self.cursor .. self.cursor]; - while true { - codepoint := self->peek_char()->or_return(res); - if !(#unquote cond(codepoint)) { - return res; - } else { - self->eat_char(); - res.length += 1; - } + return .{ Error = .{} }; +} + +Tokenizer.consume_while :: macro (self: &#Self, cond: Code) -> str { + res := self.doc[self.cursor .. self.cursor]; + while true { + codepoint := self->peek_char()->or_return(res); + if !(#unquote cond(codepoint)) { + return res; + } else { + self->eat_char(); + res.length += 1; } } +} - handle_comment :: (self: &#Self) -> Token { - self->eat_char(); - c := self->eat_char()->or_return(Token.{EOF=.{}}); - switch c { - case '-' { - return .{ Slashdash = .{} }; - } - case '/' { - body := self->consume_while([c](!is_newline(c))); - return .{ Single_Line_Comment = body }; - } - case '*' { - cursor_start := self.cursor; - - depth := 1; - prev_char := 0; - while depth >= 1 { - c := self->eat_char()->or_return(Token.{ Error=.{} }); - if c == '*' && prev_char == '/' { - depth += 1; - c = 0; - } - if c == '/' && prev_char == '*' { - depth -= 1; - c = 0; - } - - prev_char = c; +Tokenizer.handle_comment :: (self: &#Self) -> Token { + self->eat_char(); + c := self->eat_char()->or_return(Token.{EOF=.{}}); + switch c { + case '-' { + return .{ Slashdash = .{} }; + } + case '/' { + body := self->consume_while([c](!is_newline(c))); + return .{ Single_Line_Comment = body }; + } + case '*' { + cursor_start := self.cursor; + + depth := 1; + prev_char := 0; + while depth >= 1 { + c := self->eat_char()->or_return(Token.{ Error=.{} }); + if c == '*' && prev_char == '/' { + depth += 1; + c = 0; + } + if c == '/' && prev_char == '*' { + depth -= 1; + c = 0; } - return .{ Multi_Line_Comment = self.doc[cursor_start .. self.cursor-2] }; + prev_char = c; } + + return .{ Multi_Line_Comment = self.doc[cursor_start .. self.cursor-2] }; } } +} - handle_string :: (self: &#Self) -> Token { - c := self->eat_char()->or_return(Token.{EOF=.{}}); - if c != '"' do return Token.{Error=.{}}; +Tokenizer.handle_string :: (self: &#Self) -> Token { + c := self->eat_char()->or_return(Token.{EOF=.{}}); + if c != '"' do return Token.{Error=.{}}; - cursor_start := self.cursor; - prev_char := 0; - while true { - c := self->eat_char()->or_return(Token.{Error=.{}}); - if c == '\\' && prev_char == '\\' { - c = 0; - } - if c == '"' && prev_char != '\\' { - break; - } - prev_char = c; + cursor_start := self.cursor; + prev_char := 0; + while true { + c := self->eat_char()->or_return(Token.{Error=.{}}); + if c == '\\' && prev_char == '\\' { + c = 0; } - - return .{ String = self.doc[cursor_start .. self.cursor-1] }; + if c == '"' && prev_char != '\\' { + break; + } + prev_char = c; } - handle_word :: (self: &#Self) -> Token { - word := self->consume_while([c](!is_end_of_word(c) && is_id(c))); - return .{ Word = word }; - } + return .{ String = self.doc[cursor_start .. self.cursor-1] }; +} + +Tokenizer.handle_word :: (self: &#Self) -> Token { + word := self->consume_while([c](!is_end_of_word(c) && is_id(c))); + return .{ Word = word }; } @@ -258,279 +256,277 @@ Parse_Error :: union { Parser_Error: str; } -#inject Parser { - make :: #match { - ((r: &io.Reader) => Parser.{ Tokenizer.make(r) }), - ((s: str) => Parser.{ Tokenizer.make(s) }), - } +Parser.make :: #match { + ((r: &io.Reader) => Parser.{ Tokenizer.make(r) }), + ((s: str) => Parser.{ Tokenizer.make(s) }), +} - parse :: (self: &#Self, doc: &Document) -> Parse_Error { - self.result_allocator = doc.allocator; +Parser.parse :: (self: &#Self, doc: &Document) -> Parse_Error { + self.result_allocator = doc.allocator; - while true { - token := self.tokenizer->peek_token(); - switch token { - case .EOF { - break break; - } + while true { + token := self.tokenizer->peek_token(); + switch token { + case .EOF { + break break; + } - case .Error { - self.tokenizer->next_token(); - return .{ Parser_Error = tprintf("bad token: {}", token) }; - } + case .Error { + self.tokenizer->next_token(); + return .{ Parser_Error = tprintf("bad token: {}", token) }; + } - case .Whitespace, .Newline { - self.tokenizer->next_token(); - if self.state & .Whitespace_Banned { - return .{ Whitespace_Banned = .{} }; - } + case .Whitespace, .Newline { + self.tokenizer->next_token(); + if self.state & .Whitespace_Banned { + return .{ Whitespace_Banned = .{} }; } + } - case .Single_Line_Comment, .Multi_Line_Comment { - self.tokenizer->next_token(); - if self.state & .Whitespace_Banned { - return .{ Whitespace_Banned = .{} }; - } + case .Single_Line_Comment, .Multi_Line_Comment { + self.tokenizer->next_token(); + if self.state & .Whitespace_Banned { + return .{ Whitespace_Banned = .{} }; } + } - case #default { - node_result := self->parse_node(); - if err := node_result->err(); err { - logf(.Info, self.tokenizer.doc[self.tokenizer.cursor .. self.tokenizer.doc.length]); - return err?; - } + case _ { + node_result := self->parse_node(); + if err := node_result->err(); err { + logf(.Info, self.tokenizer.doc[self.tokenizer.cursor .. self.tokenizer.doc.length]); + return err?; + } - node := node_result->ok()->unwrap(); - if node { - doc.nodes << node; - } + node := node_result->ok()->unwrap(); + if node { + doc.nodes << node; } } } - - return .{}; } - parse_node :: (self: &#Self) -> Result(&Node, Parse_Error) { - self.depth += 1; - defer self.depth -= 1; + return .{}; +} - self->skip_linespace(); +Parser.parse_node :: (self: &#Self) -> Result(&Node, Parse_Error) { + self.depth += 1; + defer self.depth -= 1; - if_next_token_is(self, .End_Children, [] { return .{ Ok = null }; }); + self->skip_linespace(); - is_ignored := false; - if self.tokenizer->peek_token().Slashdash { - self.tokenizer->next_token(); - is_ignored = true; - } + if_next_token_is(self, .End_Children, [] { return .{ Ok = null }; }); - type_annotation := self->parse_type_if_present()?; - name := self->parse_identifier()?; - - if !name do return .{ Ok = null }; - - node_to_return := self.result_allocator->move(Node.{ - node = name->unwrap(), - type_annotation = type_annotation, - props = make(Map(str, Value), self.result_allocator), - values = make([..] Value, 0, self.result_allocator), - children = make([..] &Node, 0, self.result_allocator), - }); - - while true { - switch tkn := self.tokenizer->peek_token(); tkn { - case .Newline, .Semicolon { - self.tokenizer->next_token(); - _apply_slashdash(node_to_return); - return .{ Ok = node_to_return }; - } + is_ignored := false; + if self.tokenizer->peek_token().Slashdash { + self.tokenizer->next_token(); + is_ignored = true; + } - case .Word, .Raw_String, .String { - self.tokenizer->next_token(); - if_next_token_is(self, .Equals, [] { - // Is this good? Or just too hacky? - prop_name := self->parse_into_string(tkn)->or_return( - Result(&Node, Parse_Error).{ Err = .{ Parser_Error = "Error parsing property key" } } - ); + type_annotation := self->parse_type_if_present()?; + name := self->parse_identifier()?; + + if !name do return .{ Ok = null }; + + node_to_return := self.result_allocator->move(Node.{ + node = name->unwrap(), + type_annotation = type_annotation, + props = make(Map(str, Value), self.result_allocator), + values = make([..] Value, 0, self.result_allocator), + children = make([..] &Node, 0, self.result_allocator), + }); + + while true { + switch tkn := self.tokenizer->peek_token(); tkn { + case .Newline, .Semicolon { + self.tokenizer->next_token(); + _apply_slashdash(node_to_return); + return .{ Ok = node_to_return }; + } - type := self->parse_type_if_present()?; - value := self->parse_value(self.tokenizer->next_token()) ?? [] { - return return .{ Err = .{ Parser_Error = "Error parsing property value" } }; - }; + case .Word, .Raw_String, .String { + self.tokenizer->next_token(); + if_next_token_is(self, .Equals, [] { + // Is this good? Or just too hacky? + prop_name := self->parse_into_string(tkn)->or_return( + Result(&Node, Parse_Error).{ Err = .{ Parser_Error = "Error parsing property key" } } + ); - value.type_annotation = type; + type := self->parse_type_if_present()?; + value := self->parse_value(self.tokenizer->next_token()) ?? [] { + return return .{ Err = .{ Parser_Error = "Error parsing property value" } }; + }; - node_to_return.props[prop_name] = value; - continue; - }); + value.type_annotation = type; - value := self->parse_value(tkn) ?? [] { - return return .{ Err = .{ Parser_Error = "Error parsing argument value" } }; - }; + node_to_return.props[prop_name] = value; + continue; + }); - node_to_return.values << value; - } + value := self->parse_value(tkn) ?? [] { + return return .{ Err = .{ Parser_Error = "Error parsing argument value" } }; + }; - case .Start_Type { - type := self->parse_type_if_present()?; + node_to_return.values << value; + } - value := self->parse_value(self.tokenizer->next_token()) ?? [] { - return return .{ Err = .{ Parser_Error = "Error parsing argument value" } }; - }; + case .Start_Type { + type := self->parse_type_if_present()?; - value.type_annotation = type; - node_to_return.values << value; - } + value := self->parse_value(self.tokenizer->next_token()) ?? [] { + return return .{ Err = .{ Parser_Error = "Error parsing argument value" } }; + }; - case .Start_Children { - self.tokenizer->next_token(); - self->skip_linespace(); + value.type_annotation = type; + node_to_return.values << value; + } - while !self.tokenizer->peek_token().End_Children { - child := self->parse_node()?; - if child { - node_to_return.children << child; - } + case .Start_Children { + self.tokenizer->next_token(); + self->skip_linespace(); - self->skip_linespace(); + while !self.tokenizer->peek_token().End_Children { + child := self->parse_node()?; + if child { + node_to_return.children << child; } - self->expect_token(.End_Children); - break break; + self->skip_linespace(); } - case .End_Children { - break break; - } + self->expect_token(.End_Children); + break break; + } - case #default { - return .{ Err = .{ Parser_Error = tprintf("Unexpected token {}, expected node", tkn) } }; - } + case .End_Children { + break break; } - } - - _apply_slashdash(node_to_return); - return .{ Ok = node_to_return }; - _apply_slashdash :: macro (n: &Node) { - if is_ignored { - n = null; + case _ { + return .{ Err = .{ Parser_Error = tprintf("Unexpected token {}, expected node", tkn) } }; } } } + + _apply_slashdash(node_to_return); + return .{ Ok = node_to_return }; - parse_value :: (self: &#Self, token: Token) -> ? Value { - switch token { - case .Raw_String as s { + _apply_slashdash :: macro (n: &Node) { + if is_ignored { + n = null; + } + } +} + +Parser.parse_value :: (self: &#Self, token: Token) -> ? Value { + switch token { + case .Raw_String as s { + return Value.{ + data = .{ String = string.copy(s, self.result_allocator) } + }; + } + + case .String as s { + // TODO: Handle escaped strings here + return Value.{ + data = .{ String = string.copy(s, self.result_allocator) } + }; + } + + case .Word as w { + if w == "null" { return Value.{ - data = .{ String = string.alloc_copy(s, self.result_allocator) } + data = .{ Null = .{} } }; } - case .String as s { - // TODO: Handle escaped strings here + if w == "true" { return Value.{ - data = .{ String = string.alloc_copy(s, self.result_allocator) } + data = .{ Boolean = true } }; } - - case .Word as w { - if w == "null" { - return Value.{ - data = .{ Null = .{} } - }; - } - - if w == "true" { - return Value.{ - data = .{ Boolean = true } - }; - } - - if w == "false" { - return Value.{ - data = .{ Boolean = false } - }; - } - - // TODO: parse numbers + if w == "false" { return Value.{ - data = .{ String = string.alloc_copy(w, self.result_allocator) } + data = .{ Boolean = false } }; } - case #default do return .{}; - } - } - - parse_type_if_present :: (self: &#Self) -> Result(? str, Parse_Error) { - if_next_token_is(self, .Start_Type, [start_tkn] { - type_token := self.tokenizer->next_token(); - switch type_token { - case .Word, .String, .Raw_String { - self->expect_token(.End_Type); - return .{ Ok = self->parse_into_string(type_token) }; - } + // TODO: parse numbers - case #default { - return .{ Err = .{Parser_Error = tprintf("Expected identifier or string, got {}.", type_token)}}; - } - } - }); + return Value.{ + data = .{ String = string.copy(w, self.result_allocator) } + }; + } - return .{ Ok = .None }; + case _ do return .{}; } +} - parse_identifier :: (self: &#Self) -> Result(? str, Parse_Error) { - id_token := self.tokenizer->next_token(); - switch id_token { +Parser.parse_type_if_present :: (self: &#Self) -> Result(? str, Parse_Error) { + if_next_token_is(self, .Start_Type, [start_tkn] { + type_token := self.tokenizer->next_token(); + switch type_token { case .Word, .String, .Raw_String { - name := self->parse_into_string(id_token) - ->catch([] { fallthrough; }); - return .{ Ok = .{ Some = name } }; + self->expect_token(.End_Type); + return .{ Ok = self->parse_into_string(type_token) }; } - case .EOF { - return .{ Ok = .None }; + case _ { + return .{ Err = .{Parser_Error = tprintf("Expected identifier or string, got {}.", type_token)}}; } + } + }); - case #default { - return .{ Err = .{Parser_Error = tprintf("Expected identifier or string, got {}.", id_token)}}; - } + return .{ Ok = .None }; +} + +Parser.parse_identifier :: (self: &#Self) -> Result(? str, Parse_Error) { + id_token := self.tokenizer->next_token(); + switch id_token { + case .Word, .String, .Raw_String { + name := self->parse_into_string(id_token) + ->catch([] { fallthrough; }); + return .{ Ok = .{ Some = name } }; } - } - skip_linespace :: (self: &#Self) { - while true { - switch tkn := self.tokenizer->peek_token(); tkn { - case .Newline, .Single_Line_Comment { - self.tokenizer->next_token(); - } + case .EOF { + return .{ Ok = .None }; + } - case #default { - return; - } - } + case _ { + return .{ Err = .{Parser_Error = tprintf("Expected identifier or string, got {}.", id_token)}}; } } +} +Parser.skip_linespace :: (self: &#Self) { + while true { + switch tkn := self.tokenizer->peek_token(); tkn { + case .Newline, .Single_Line_Comment { + self.tokenizer->next_token(); + } - expect_token :: macro (self: &#Self, type: Token.tag_enum) -> Token { - tkn := self.tokenizer->next_token(); - if tkn.tag != type { - return return .{ Err = .{Parser_Error = tprintf("Expected {}, got {}", type, tkn) } }; - } else { - return tkn; + case _ { + return; + } } } +} + - parse_into_string :: (self: &#Self, tkn: Token) -> ? str { - return self->parse_value(tkn)->and_then(x => x.data.String); +Parser.expect_token :: macro (self: &#Self, type: Token.tag_enum) -> Token { + tkn := self.tokenizer->next_token(); + if tkn.tag != type { + return return .{ Err = .{Parser_Error = tprintf("Expected {}, got {}", type, tkn) } }; + } else { + return tkn; } } +Parser.parse_into_string :: (self: &#Self, tkn: Token) -> ? str { + return self->parse_value(tkn)->and_then(x => x.data.String); +} + #package { MIN_BUFFER_SIZE :: 1024 BUFFER_SIZE_INCREMENT :: 4096 @@ -583,7 +579,7 @@ Parse_Error :: union { p.tokenizer->next_token(); #unquote body(__tkn); } - case #default --- + case _ --- } } } diff --git a/core/encoding/kdl/utils.onyx b/core/encoding/kdl/utils.onyx index 8c6421a8b..5c90936d1 100644 --- a/core/encoding/kdl/utils.onyx +++ b/core/encoding/kdl/utils.onyx @@ -3,59 +3,55 @@ package core.encoding.kdl use core {string} -#inject Value { - as_str :: (v: Value) -> ? str { - return v.data.String; - } - - as_int :: (v: Value) -> ? i64 { - return v.data.Number?.Integer; - } - - as_float :: (v: Value) -> ? f64 { - return v.data.Number?.Float; - } - - as_bool :: (v: Value) -> ? bool { - return v.data.Boolean; - } -} - -#inject Document { - create_node :: (d: &Document, name: str) -> &Node { - return d.allocator->move(Node.{ - node = name, - type_annotation = .None, - values = make([..] Value, d.allocator), - props = make(Map(str, Value), d.allocator), - children = make([..] &Node, d.allocator) - }); - } -} - -#inject Node { - add_value :: (n: &Node, value: Value.Value_Data) { - n.values << Value.{data = value}; - } - - set_prop :: (n: &Node, name: str, value: Value.Value_Data) { - n.props->put( - string.alloc_copy(name, n.props.allocator), - .{ data = value } - ); - } - - value :: (n: &Node, index := 0) -> ? Value { - if index >= n.values.length do return .{}; - - return n.values[index]; - } - - value_or_null :: (n: &Node, index := 0) -> Value { - if index >= n.values.length do return .{ - data = .{ Null = .{} } - }; - - return n.values[index]; - } -} \ No newline at end of file +Value.as_str :: (v: Value) -> ? str { + return v.data.String; +} + +Value.as_int :: (v: Value) -> ? i64 { + return v.data.Number?.Integer; +} + +Value.as_float :: (v: Value) -> ? f64 { + return v.data.Number?.Float; +} + +Value.as_bool :: (v: Value) -> ? bool { + return v.data.Boolean; +} + + +Document.create_node :: (d: &Document, name: str) -> &Node { + return d.allocator->move(Node.{ + node = name, + type_annotation = .None, + values = make([..] Value, d.allocator), + props = make(Map(str, Value), d.allocator), + children = make([..] &Node, d.allocator) + }); +} + + +Node.add_value :: (n: &Node, value: Value.Value_Data) { + n.values << Value.{data = value}; +} + +Node.set_prop :: (n: &Node, name: str, value: Value.Value_Data) { + n.props->put( + string.copy(name, n.props.allocator), + .{ data = value } + ); +} + +Node.value :: (n: &Node, index := 0) -> ? Value { + if index >= n.values.length do return .{}; + + return n.values[index]; +} + +Node.value_or_null :: (n: &Node, index := 0) -> Value { + if index >= n.values.length do return .{ + data = .{ Null = .{} } + }; + + return n.values[index]; +} diff --git a/core/encoding/osad.onyx b/core/encoding/osad.onyx index 70c007937..7430e3fa1 100644 --- a/core/encoding/osad.onyx +++ b/core/encoding/osad.onyx @@ -33,7 +33,7 @@ serialize :: (v: any) -> ? [] u8 { return .{}; } - return string.as_str(stream); + return str.as_str(stream); } #overload @@ -74,6 +74,15 @@ serialize :: (v: any, w: &io.Writer) -> bool { } case .Slice, .Dynamic_Array { + if v.type == str { + untyped_slice := cast(&array.Untyped_Array, v.data); + base: [&] u8 = untyped_slice.data; + count: = untyped_slice.count; + output_u32(w, count); + io.write_str(w, str.{ base, count }); + break; + } + s_info := cast(&type_info.Type_Info_Slice, info); untyped_slice := cast(&array.Untyped_Array, v.data); @@ -118,7 +127,7 @@ serialize :: (v: any, w: &io.Writer) -> bool { case i16, u16 do tag_value = cast(u64) *(cast(&u16) v.data); case i32, u32 do tag_value = cast(u64) *(cast(&u32) v.data); case i64, u64 do tag_value = cast(u64) *(cast(&u64) v.data); - case #default do assert(false, "Bad union backing type"); + case _ do panic("Bad union backing type"); } variant := array.first(u_info.variants, [x](x.tag_value == ~~tag_value)); @@ -255,7 +264,7 @@ deserialize :: (target: rawptr, type: type_expr, r: &io.Reader, allocator := con case i16, u16 do *(cast(&u16) target) = ~~variant_value; case i32, u32 do *(cast(&u32) target) = ~~variant_value; case i64, u64 do *(cast(&u64) target) = ~~variant_value; - case #default do assert(false, "Bad union backing type"); + case _ do panic("Bad union backing type"); } try(deserialize(base + u_info.alignment, variant.type, r)); diff --git a/core/encoding/utf8.onyx b/core/encoding/utf8.onyx index a80450927..8d0cff482 100644 --- a/core/encoding/utf8.onyx +++ b/core/encoding/utf8.onyx @@ -37,7 +37,7 @@ next_rune :: (s: str) -> str { return s.data[0 .. rune_length_from_first_byte(s[0])]; } -decode_rune :: (s: str) -> (r: rune, size: i32) { +decode_rune :: (s: str) -> (rune, i32) { len := rune_length_from_first_byte(s[0]); if len == 1 do return (cast(rune) s[0]), 1; @@ -52,7 +52,7 @@ decode_rune :: (s: str) -> (r: rune, size: i32) { return r, len; } -decode_last_rune :: (s: str) -> (r: rune, size: i32) { +decode_last_rune :: (s: str) -> (rune, i32) { i := s.length - 1; while i >= 0 && (s[i] & 0xC0 == 0x80) { i -= 1; @@ -146,13 +146,13 @@ RuneIterValue :: struct { runes :: (s: str) -> Iterator(RuneIterValue) { return iter.generator( &.{ s = s }, - ctx => { - if string.empty(ctx.s) do return RuneIterValue.{}, false; + (ctx: $C) -> ? RuneIterValue { + if string.empty(ctx.s) do return .None; r, len := decode_rune(ctx.s); defer string.advance(&ctx.s, len); - return .{ r, ctx.s[0..len] }, true; + return RuneIterValue.{ r, ctx.s[0..len] }; } ); } diff --git a/core/encoding/xml/accessor.onyx b/core/encoding/xml/accessor.onyx new file mode 100644 index 000000000..cb0ff13f7 --- /dev/null +++ b/core/encoding/xml/accessor.onyx @@ -0,0 +1,23 @@ +package core.encoding.xml +#allow_stale_code + +Document.children :: (doc: &Document, parent: Element.Index) -> Iterator(Element.Index) { + if doc.elements.count <= parent do return Iterator.empty(Element.Index) + + return Iterator.from(doc.elements[parent].children) + |> Iterator.flatten(x => x.Element) +} + +Document.child_with_name :: (doc: &Document, parent: Element.Index, name: str) -> ? Element.Index { + if doc.elements.count <= parent do return .None + + for v in doc.elements[parent].children { + v.Element->with([e] { + if doc.elements[e].name == name { + return e + } + }) + } + + return .None +} diff --git a/core/encoding/xml/parser.onyx b/core/encoding/xml/parser.onyx new file mode 100644 index 000000000..c8ce19584 --- /dev/null +++ b/core/encoding/xml/parser.onyx @@ -0,0 +1,219 @@ +package core.encoding.xml +#allow_stale_code + +use core {Result, tprintf} +use core.io +use core.string + +ParseError :: union { + None: void + Error: str + Expected_Token: Token.Kind.tag_enum + Invalid_Token: str + Token_Error: Tokenizer.Error +} + +parse :: #match #local {} + +#overload +parse :: (s: str, allocator := context.allocator) -> Result(Document, ParseError) { + use stream := io.buffer_stream_make(s) + return parse(&stream, allocator) +} + +#overload +parse :: (s: &io.Stream, allocator := context.allocator) -> Result(Document, ParseError) { + return parse_from_stream(s, allocator) +} + + +#local +parse_from_stream :: (s: &io.Stream, allocator := context.allocator) -> Result(Document, ParseError) { + use t := Tokenizer.make(allocator, s) + + doc: Document + doc.strings = string.StringPool.make(allocator = allocator) + doc.elements = make([..] Element, 16, allocator) + + element, parent: Element.Index + + while true { + t->skip_whitespace() + + if t.current == '<' { + t->advance() + + switch open_tkn := t->scan(); open_tkn.kind { + case .Identifier as ident { + element = create_element(&doc) + if element == 0 { + parent = element + } else { + doc.elements[parent].children->push(.{ Element = element }) + } + + doc.elements[element].parent = parent + doc.elements[element].name = doc.strings->add(ident) + + parse_attributes(&doc, &doc.elements[element].attributes, &t)? + + // TODO: Add doctype checking + + end_tkn := t->scan() + switch end_tkn.kind { + case .Gt do parent = element + + case .Slash { + expect(&t, .Gt)? + parent = doc.elements[element].parent + element = parent + } + + case _ { + return .{ Err = .{ Invalid_Token = tprintf("Expected closing tag, but got '{}'", end_tkn) } } + } + } + } + + case .Slash { + ident := expect(&t, .Identifier)? + expect(&t, .Gt)? + + got := ident.kind.Identifier! + + if doc.elements[element].name != got { + return .{ + Err = .{ + Error = tprintf("Mismatched closing tag. Expected '{}' but got '{}'.", doc.elements[element].name, got) + } + } + } + + parent = doc.elements[element].parent + element = parent + } + + case .Exclamation { + next := t->scan() + switch next.kind { + case .Identifier as ident { + if ident == "DOCTYPE" { + // TODO Handle doctypes... + + skip_element(&t)? + } + } + + case .Dash { + expect(&t, .Dash)? + t->scan_comment() + } + + case _ { + return .{ + Err = .{ + Error = tprintf("Invalid token after 'scan() + switch next.kind { + case .Identifier as ident { + // if ident == "xml" { + // parse_prologue(&doc, &t)? + // } else { + skip_element(&t)? + // } + } + + case _ { + return .{ + Err = .{ + Error = "Expected 'scan_string() + if !ok do return .{ Err = .{ Token_Error = t.error } } + + doc.elements[element].children->push(.{ + Text = doc.strings->add(body_text) + }) + } + } + + return .{ Ok = doc } +} + +#local +parse_attributes :: (doc: &Document, attrs: &[..] Attribute, t: &Tokenizer) -> Result(i32, ParseError) { + while t->peek().kind.tag == .Identifier { + key := expect(t, .Identifier)? + expect(t, .Eq)? + value := expect(t, .String)? + + attr := Attribute.{ + key = doc.strings->add(key.kind.Identifier!) + value = doc.strings->add(value.kind.String!) + } + attrs->push(attr) + } + + return .{ Ok = .{} } +} + + +#local +skip_element :: (t: &Tokenizer) -> Result(i32, ParseError) { + to_close := 1 + while true { + tkn := t->scan() + switch tkn.kind { + case .EOF do return .{ Err = .{ Token_Error = .Early_EOF } } + case .Lt do to_close += 1 + case .Gt { + to_close -= 1 + if to_close == 0 do break break + } + + case _ --- + } + } + + return .{ Ok = 0 } +} + +#local +expect :: (t: &Tokenizer, type: Token.Kind.tag_enum) -> Result(Token, ParseError) { + tkn := t->scan() + if tkn.kind.tag == type { + return .{ Ok = tkn } + } + + return .{ Err = .{ Expected_Token = type } } +} + +#local +create_element :: (doc: &Document) -> Element.Index { + e := doc.elements->alloc_one() + *e = .{} + return doc.elements.count - 1 +} diff --git a/core/encoding/xml/tokenizer.onyx b/core/encoding/xml/tokenizer.onyx new file mode 100644 index 000000000..71dc88404 --- /dev/null +++ b/core/encoding/xml/tokenizer.onyx @@ -0,0 +1,344 @@ +package core.encoding.xml +#allow_stale_code + +use core.alloc {arena, as_allocator} +use core.io +use core.encoding.utf8 + +Tokenizer :: struct { + arena: arena.Arena + + s: &io.Stream + r: io.Reader + + current: utf8.rune + offset: i32 + line: i32 + line_offset: i32 + + error: Tokenizer.Error + + peeked_token: ? Token +} + +Tokenizer.Error :: enum { + None + Early_EOF + Unterminated_String + Unterminated_Comment + Unterminated_CDATA + Invalid_Double_Dash_In_Comment +} + +Token :: struct { + kind: Token.Kind + pos: Token.Position +} + +Token.Position :: struct { + offset: i32 + line: i32 + column: i32 +} + +Token.Kind :: union { + Invalid: void + Identifier: str + Literal: str + String: str + + Double_Quote: void + Single_Quote: void + Colon: void + + Eq: void + Lt: void + Gt: void + Exclamation: void + Question: void + Hash: void + Slash: void + Dash: void + + Open_Bracket: void + Close_Bracket: void + + EOF: void +} + + +Tokenizer.make :: (a: Allocator, s: &io.Stream) -> (tkn: Tokenizer) { + tkn.arena = arena.make(a, 16384) + tkn.s = s + tkn.r = io.Reader.make(s) + tkn.offset = 0 + tkn.line = 1 + tkn.line_offset = 0 + + tkn->advance() + if tkn.current == 0xFEFF { + tkn->advance() + } + + return +} + +Tokenizer.destroy :: (self: &#Self) { + arena.free(&self.arena) + self.r->free() +} + +Tokenizer.advance :: (self: &#Self) { + if self.current == '\n' { + self.line_offset = self.offset + self.line += 1 + } + + if self.r->empty() { + self.current = -1 + return + } + + b := self.r->peek_byte() + rlen := utf8.rune_length_from_first_byte(b) + assert(rlen > 0, "Bad UTF8 Encoding") + + rbuf: [4] u8 + self.r->read_fill_buffer(rbuf[0 .. rlen]) + self.offset += rlen + + self.current = utf8.decode_rune(rbuf[0 .. rlen]) + if self.current == 0 { + self.current = -1 + } +} + +Tokenizer.skip_whitespace :: (self: &#Self) { + while true { + switch self.current { + case ' ', '\t', '\r', '\n' do self->advance() + case _ do return + } + } +} + +Tokenizer.scan_ident :: (self: &#Self) -> Token.Kind { + namespaced := false + + ident := make(dyn_str, 32, as_allocator(&self.arena)) + + while is_ident_rune(self.current) { + utf8.append_rune(&ident, self.current) + + self->advance() + if self.current == ':' { + if namespaced do break + namespaced = true + } + } + + return .{ Identifier = ident } + + is_ident_rune :: macro (x: utf8.rune) -> bool { + // TODO: Add support for letters and numbers in other languages + switch x { + case '_', '-', ':' do return true + case 'A'..='Z', 'a'..='z' do return true + case '0'..='9' do return true + case _ do return false + } + } +} + +Tokenizer.skip_cdata :: (self: &#Self) -> bool { + CDATA_START :: "" + + start, err := self.r->peek_bytes(0, CDATA_START.count) + if err != .None do return true + + if start != CDATA_START do return true + + self.r->skip_bytes(CDATA_START.count) + while true { + self->advance() + if self.current < 0 { + self.error = .Unterminated_CDATA + return false + } + + end, err := self.r->peek_bytes(0, CDATA_END.count) + if end == CDATA_END { + return true + } + } + + return true +} + +Tokenizer.scan_comment :: (self: &#Self) -> bool { + // For now, comment contents are discarded + + while true { + self->advance() + if self.current < 0 { + self.error = .Unterminated_Comment + return false + } + + if self.current == '-' { + b := self.r->peek_byte() + c := self.r->peek_byte(1) + if b == '-' { + if c == '>' { + break + } + + self.error = .Invalid_Double_Dash_In_Comment + return false + } + } + } + + self->advance() + self->advance() + + return true +} + +Tokenizer.scan_string :: (self: &#Self, end: utf8.rune = '<', multiline := true) -> (str, bool) { + out := make(dyn_str, 32, as_allocator(&self.arena)) + + while true { + if self.current == -1 do err(.Early_EOF) + + switch self.current { + case '<' { + if b := self.r->peek_byte(); b == '!' { + if b := self.r->peek_byte(1); b == '[' { + // CDATA + if !self->skip_cdata() { + return "", false + } + } else { + if c := self.r->peek_byte(2); c == '-' && b == '-' { + // Comment + self.r->skip_bytes(3) + self.offset += 3 + if !self->scan_comment() { + return "", false + } + } + } + } + } + + case '\n' { + if !multiline do err(.Unterminated_String) + } + } + + if self.current == end { + break + } + + utf8.append_rune(&out, self.current) + self->advance() + } + + str.strip_trailing_whitespace(~~ &out) + + return out, true + + err :: macro (e: Tokenizer.Error) { + self.error = e + delete(&out) + return "", false + } +} + + +Tokenizer.scan :: (self: &#Self) -> Token { + if self.peeked_token { + out := self.peeked_token! + self.peeked_token = .None + return out + } + + self->skip_whitespace() + + pos := pos_from_offset(self, self.offset) + + if is_letter(self.current) { + return Token.{ + kind = self->scan_ident() + pos = pos + } + } + + ch := self.current + self->advance() + + kind: Token.Kind + if ch == -1 { + return Token.{ .EOF, pos } + } + + switch ch { + case '<' do kind = .Lt + case '>' do kind = .Gt + case '=' do kind = .Eq + case '!' do kind = .Exclamation + case '?' do kind = .Question + case '#' do kind = .Hash + case '/' do kind = .Slash + case '-' do kind = .Dash + case ':' do kind = .Colon + + case '"', '\'' { + kind = .Invalid + s, ok := self->scan_string(ch, false) + if ok { + self->advance() + kind = .{ String = s } + } + } + + case _ { + kind = .Invalid + } + } + + return Token.{ kind, pos } +} + +Tokenizer.peek :: (self: &#Self) -> Token { + if self.peeked_token { + return self.peeked_token! + } + + tkn := self->scan() + self.peeked_token = tkn + return tkn +} + + + +#local +is_letter :: (c: utf8.rune) -> bool { + switch c { + case '_', 'A'..='Z', 'a'..='z' do return true + case _ do return false + } +} + +#local +pos_from_offset :: (t: &Tokenizer, offset: i32) -> Token.Position { + line := t.line + column := offset - t.line_offset + return .{ + offset = offset + line = line + column = column + } +} diff --git a/core/encoding/xml/xml.onyx b/core/encoding/xml/xml.onyx new file mode 100644 index 000000000..1c3e3f66d --- /dev/null +++ b/core/encoding/xml/xml.onyx @@ -0,0 +1,40 @@ +package core.encoding.xml +#allow_stale_code + +use core { string } + +Document :: struct { + elements: [..] Element + strings: string.StringPool +} + +Element :: struct { + Index :: u32 + + name: str + parent: Element.Index + attributes: [..] Attribute + children: [..] Child +} + +Attribute :: struct { + key, value: str +} + +Child :: union { + Text: str + Element: Element.Index +} + +Document.destroy :: (doc: &Document) { + delete(&doc.strings) + + for &el in doc.elements { + delete(&el.attributes) + delete(&el.children) + } + + delete(&doc.elements) +} + + diff --git a/core/hash/hash.onyx b/core/hash/hash.onyx index 4e08791ca..a83ec7fe5 100644 --- a/core/hash/hash.onyx +++ b/core/hash/hash.onyx @@ -8,20 +8,18 @@ use core.intrinsics.types {type_is_enum} // core.hash.hash instead. to_u32 :: hash -#doc """ - This overloaded procedure defines how to hash something. - It is used throughout the standard library to hash values. - There are many overloads to it in the standard library, and - more can be added using #overload. - - Alternatively, a hash method can be defined for a structure or distinct type. - - Person :: struct { - hash :: (p: Person) -> u32 { - return // ... - } - } -""" +/// This overloaded procedure defines how to hash something. +/// It is used throughout the standard library to hash values. +/// There are many overloads to it in the standard library, and +/// more can be added using #overload. +/// +/// Alternatively, a hash method can be defined for a structure or distinct type. +/// +/// Person :: struct { +/// hash :: (p: Person) -> u32 { +/// return // ... +/// } +/// } hash :: #match -> u32 { // Does this need to have a higher precedence value? // Because if I wanted to have a custom type as the key diff --git a/core/hash/md5.onyx b/core/hash/md5.onyx index 93b373b19..4b837677f 100644 --- a/core/hash/md5.onyx +++ b/core/hash/md5.onyx @@ -4,18 +4,33 @@ package core.hash.md5 use core {io, memory, conv} use core.intrinsics.wasm {rotl_i32} +BLOCK_SIZE :: 16 -#doc "Produces an MD5 digest of a string or stream." +/// Produces an MD5 digest of a string or stream. +hash :: #match #local {} + +#overload +hash :: (x: str) -> [BLOCK_SIZE] u8 { + return digest(x)->as_bytes(); +} + +#overload +hash :: (s: &io.Stream) -> ? [BLOCK_SIZE] u8 { + return digest(s)?->as_bytes(); +} + + +/// Produces an MD5 digest of a string or stream. digest :: #match #local {} -#doc "Produces an MD5 digest of a string. This is guaranteed to succeed." +/// Produces an MD5 digest of a string. This is guaranteed to succeed. #overload digest :: (x: str) -> MD5_Digest { string_stream := io.buffer_stream_make(x, write_enabled=false); return digest(&string_stream)?; } -#doc "Produces an MD5 digest of a stream. This is not guaranteed to succeed, as the stream may fail part way through." +/// Produces an MD5 digest of a stream. This is not guaranteed to succeed, as the stream may fail part way through. #overload digest :: (s: &io.Stream) -> ?MD5_Digest { dig := MD5_Digest.make(); @@ -24,18 +39,22 @@ digest :: (s: &io.Stream) -> ?MD5_Digest { bytes_to_digest: [64] u8; while true { - err, byte_count := io.stream_read(s, bytes_to_digest); - - if err != .None && err != .EOF { - return .{}; - } - - if byte_count < 64 { - remaining_bytes_to_digest = byte_count; - break; + switch io.stream_read(s, bytes_to_digest) { + case .Err as err { + if err != .EOF { + return .None; + } + break break; + } + case .Ok as byte_count { + if byte_count < 64 { + remaining_bytes_to_digest = byte_count; + break break; + } + + do_cycle(&dig, bytes_to_digest); + } } - - do_cycle(&dig, bytes_to_digest); } dig->_finish(bytes_to_digest[0..remaining_bytes_to_digest]); @@ -48,56 +67,64 @@ MD5_Digest :: struct { bytes_digested: u64; } -#inject MD5_Digest { - make :: () => MD5_Digest.{ - a = 0x67452301, - b = 0xefcdab89, - c = 0x98badcfe, - d = 0x10325476, - bytes_digested = 0, - } - - reset :: (self: &#Self) { - *self = MD5_Digest.make(); - } +MD5_Digest.make :: () => MD5_Digest.{ + a = 0x67452301, + b = 0xefcdab89, + c = 0x98badcfe, + d = 0x10325476, + bytes_digested = 0, +} - _finish :: (self: &#Self, tail: [] u8) { - assert(tail.count < 64, "Tail too long"); +MD5_Digest.reset :: (self: &#Self) { + *self = MD5_Digest.make(); +} - bytes_to_digest: [64] u8; - memory.copy(~~bytes_to_digest, tail.data, tail.count); - bytes_to_digest[tail.count] = 0x80; +MD5_Digest._finish :: (self: &#Self, tail: [] u8) { + assert(tail.count < 64, "Tail too long"); - if tail.count >= 56 { - do_cycle(self, bytes_to_digest, accumulate=false); - memory.set(~~bytes_to_digest, 0, 64); - } - - self.bytes_digested += ~~tail.count; - *cast(&u64, &bytes_to_digest[56]) = self.bytes_digested * 8; + bytes_to_digest: [64] u8; + memory.copy(~~bytes_to_digest, tail.data, tail.count); + bytes_to_digest[tail.count] = 0x80; + if tail.count >= 56 { do_cycle(self, bytes_to_digest, accumulate=false); + memory.set(~~bytes_to_digest, 0, 64); } - #doc "Returns a temporary byte array of the hash." - as_str :: (self: #Self) -> [] u8 { - result := make_temp([] u8, 16); - for i in 0 .. 4 do result[i] = ~~((self.a & (0xff << shift(i))) >> shift(i)); - for i in 4 .. 8 do result[i] = ~~((self.b & (0xff << shift(i))) >> shift(i)); - for i in 8 .. 12 do result[i] = ~~((self.c & (0xff << shift(i))) >> shift(i)); - for i in 12 .. 16 do result[i] = ~~((self.d & (0xff << shift(i))) >> shift(i)); - return result; - } + self.bytes_digested += ~~tail.count; + *cast(&u64, &bytes_to_digest[56]) = self.bytes_digested * 8; - #doc "Returns a temporary string of the hash." - as_hex_str :: (self: #Self) -> str { - result := make_temp([..] u8, 32); - for i in 0 .. 4 do conv.format(&result, "{w2b16}", (self.a & (0xff << shift(i)) >> shift(i))); - for i in 4 .. 8 do conv.format(&result, "{w2b16}", (self.b & (0xff << shift(i)) >> shift(i))); - for i in 8 .. 12 do conv.format(&result, "{w2b16}", (self.c & (0xff << shift(i)) >> shift(i))); - for i in 12 .. 16 do conv.format(&result, "{w2b16}", (self.d & (0xff << shift(i)) >> shift(i))); - return result; - } + do_cycle(self, bytes_to_digest, accumulate=false); +} + +/// Returns a byte array of the hash. +MD5_Digest.as_bytes :: (self: #Self) -> [BLOCK_SIZE] u8 { + result: [16] u8; + for i in 0 .. 4 do result[i] = ~~((self.a & (0xff << shift(i))) >> shift(i)); + for i in 4 .. 8 do result[i] = ~~((self.b & (0xff << shift(i))) >> shift(i)); + for i in 8 .. 12 do result[i] = ~~((self.c & (0xff << shift(i))) >> shift(i)); + for i in 12 .. 16 do result[i] = ~~((self.d & (0xff << shift(i))) >> shift(i)); + return result; +} + +/// Returns a temporary allocated byte array of the hash. +MD5_Digest.as_str :: (self: #Self) -> [] u8 { + result := make_temp([] u8, 16); + for i in 0 .. 4 do result[i] = ~~((self.a & (0xff << shift(i))) >> shift(i)); + for i in 4 .. 8 do result[i] = ~~((self.b & (0xff << shift(i))) >> shift(i)); + for i in 8 .. 12 do result[i] = ~~((self.c & (0xff << shift(i))) >> shift(i)); + for i in 12 .. 16 do result[i] = ~~((self.d & (0xff << shift(i))) >> shift(i)); + return result; +} + +/// Returns a temporary string of the hash in hexadecimal format. +MD5_Digest.as_hex_str :: (self: #Self) -> str { + result := make_temp([..] u8, 32); + for i in 0 .. 4 do conv.format(&result, "{w2b16}", (self.a & (0xff << shift(i)) >> shift(i))); + for i in 4 .. 8 do conv.format(&result, "{w2b16}", (self.b & (0xff << shift(i)) >> shift(i))); + for i in 8 .. 12 do conv.format(&result, "{w2b16}", (self.c & (0xff << shift(i)) >> shift(i))); + for i in 12 .. 16 do conv.format(&result, "{w2b16}", (self.d & (0xff << shift(i)) >> shift(i))); + return result; } #local diff --git a/core/hash/sha1.onyx b/core/hash/sha1.onyx new file mode 100644 index 000000000..67b981759 --- /dev/null +++ b/core/hash/sha1.onyx @@ -0,0 +1,152 @@ +package core.hash.sha1 +#allow_stale_code + +use core {memory} +use core.intrinsics.wasm {wasm :: package} + +BLOCK_SIZE :: 20 + +Hasher :: struct { + data: [64] u8; + data_length: u32; + bit_length: u64; + state: [5] u32; +} + +Hasher.make :: () -> #Self { + return .{ + state = .[ + 0x67452301, + 0xefcdab89, + 0x98badcfe, + 0x10325476, + 0xc3d2e1f0 + ], + data_length = 0, + bit_length = 0 + }; +} + +Hasher.update :: (self: &#Self, data: [] u8) { + for i in 0 .. data.count { + self.data[self.data_length] = data[i]; + self.data_length += 1; + + if self.data_length == 64 { + do_cycle(self, self.data); + self.bit_length += 512; + self.data_length = 0; + } + } +} + +Hasher.final :: (self: &#Self) -> [BLOCK_SIZE] u8 { + out: [BLOCK_SIZE] u8; + + i := self.data_length; + if self.data_length < 56 { + self.data[i] = 0x80; + i += 1; + while i < 56 { + self.data[i] = 0; + i += 1; + } + } + else { + self.data[i] = 0x80; + i += 1; + while i < 64 { + self.data[i] = 0; + i += 1; + } + do_cycle(self, self.data); + memory.set(&self.data, 0, 56); + } + + self.bit_length += ~~(self.data_length * 8); + self.data[63] = cast(u8, self.bit_length >> 0 ); + self.data[62] = cast(u8, self.bit_length >> 8 ); + self.data[61] = cast(u8, self.bit_length >> 16); + self.data[60] = cast(u8, self.bit_length >> 24); + self.data[59] = cast(u8, self.bit_length >> 32); + self.data[58] = cast(u8, self.bit_length >> 40); + self.data[57] = cast(u8, self.bit_length >> 48); + self.data[56] = cast(u8, self.bit_length >> 56); + do_cycle(self, self.data); + + for i in 0 .. 4 { + out[i + 0] = ~~((self.state[0] >> (24 - i * 8)) & 0xff); + out[i + 4] = ~~((self.state[1] >> (24 - i * 8)) & 0xff); + out[i + 8] = ~~((self.state[2] >> (24 - i * 8)) & 0xff); + out[i + 12] = ~~((self.state[3] >> (24 - i * 8)) & 0xff); + out[i + 16] = ~~((self.state[4] >> (24 - i * 8)) & 0xff); + } + + return out; +} + +hash :: #match #local {} + +#overload +hash :: (x: str) -> [BLOCK_SIZE] u8 { + h := Hasher.make(); + h->update(x); + return h->final(); +} + + +#local +do_cycle :: (self: &Hasher, data: [] u8) { + m: [80] u32; + + for i in 0 .. 16 { + j := 4 * i; + m[i] = (cast(u32, data[j]) << 24) | + (cast(u32, data[j + 1]) << 16) | + (cast(u32, data[j + 2]) << 8) | + (cast(u32, data[j + 3])); + } + + for i in 16 .. 80 { + m[i] = wasm.rotl_i32(m[i-3] ^ m[i-8] ^ m[i-14] ^ m[i-16], 1); + } + + a := self.state[0]; + b := self.state[1]; + c := self.state[2]; + d := self.state[3]; + e := self.state[4]; + + for i in 0 .. 80 { + f, k: u32; + + if 0 <= i && i <= 19 { + f = (b & c) | ((~b) & d); + k = 0x5A827999; + } elseif 20 <= i && i <= 39 { + f = b ^ c ^ d; + k = 0x6ED9EBA1; + } elseif 40 <= i && i <= 59 { + f = (b & c) | (b & d) | (c & d); + k = 0x8F1BBCDC; + } elseif 60 <= i && i <= 79 { + f = b ^ c ^ d; + k = 0xCA62C1D6; + } + + temp := wasm.rotl_i32(a, 5) + f + e + k + m[i]; + e = d; + d = c; + c = wasm.rotl_i32(b, 30); + b = a; + a = temp; + } + + + self.state[0] += a; + self.state[1] += b; + self.state[2] += c; + self.state[3] += d; + self.state[4] += e; +} + diff --git a/core/hash/sha256.onyx b/core/hash/sha256.onyx index 9ff449f77..41e6817d3 100644 --- a/core/hash/sha256.onyx +++ b/core/hash/sha256.onyx @@ -13,81 +13,80 @@ Hasher :: struct { state: [8] u32; } -#inject Hasher { - make :: () -> #Self { - return .{ - state = .[ - 0x6a09e667, - 0xbb67ae85, - 0x3c6ef372, - 0xa54ff53a, - 0x510e527f, - 0x9b05688c, - 0x1f83d9ab, - 0x5be0cd19 - ] - }; - } +Hasher.make :: () -> #Self { + return .{ + state = .[ + 0x6a09e667, + 0xbb67ae85, + 0x3c6ef372, + 0xa54ff53a, + 0x510e527f, + 0x9b05688c, + 0x1f83d9ab, + 0x5be0cd19 + ] + }; +} - update :: (self: &#Self, data: [] u8) { - for i in 0 .. data.count { - self.data[self.data_length] = data[i]; - self.data_length += 1; +Hasher.update :: (self: &#Self, data: [] u8) { + for i in 0 .. data.count { + self.data[self.data_length] = data[i]; + self.data_length += 1; - if self.data_length == 64 { - do_cycle(self, self.data); - self.bit_length += 512; - self.data_length = 0; - } + if self.data_length == 64 { + do_cycle(self, self.data); + self.bit_length += 512; + self.data_length = 0; } } +} - final :: (self: &#Self) -> [BLOCK_SIZE] u8 { - out: [BLOCK_SIZE] u8; +Hasher.final :: (self: &#Self) -> [BLOCK_SIZE] u8 { + out: [BLOCK_SIZE] u8; - i := self.data_length; - if self.data_length < 56 { - self.data[i] = 0x80; + i := self.data_length; + if self.data_length < 56 { + self.data[i] = 0x80; + i += 1; + while i < 56 { + self.data[i] = 0; i += 1; - while i < 56 { - self.data[i] = 0; - i += 1; - } } - else { - self.data[i] = 0x80; - while i < 64 { - self.data[i] = 0; - i += 1; - } - do_cycle(self, self.data); - memory.set(&self.data, 0, 56); + } + else { + self.data[i] = 0x80; + i += 1; + while i < 64 { + self.data[i] = 0; + i += 1; } - - self.bit_length += ~~(self.data_length * 8); - self.data[63] = cast(u8, self.bit_length >> 0); - self.data[62] = cast(u8, self.bit_length >> 8); - self.data[61] = cast(u8, self.bit_length >> 16); - self.data[60] = cast(u8, self.bit_length >> 24); - self.data[59] = cast(u8, self.bit_length >> 32); - self.data[58] = cast(u8, self.bit_length >> 40); - self.data[57] = cast(u8, self.bit_length >> 48); - self.data[56] = cast(u8, self.bit_length >> 56); do_cycle(self, self.data); + memory.set(&self.data, 0, 56); + } - for i in 0 .. 4 { - out[i + 0] = ~~((self.state[0] >> (24 - i * 8)) & 0xff); - out[i + 4] = ~~((self.state[1] >> (24 - i * 8)) & 0xff); - out[i + 8] = ~~((self.state[2] >> (24 - i * 8)) & 0xff); - out[i + 12] = ~~((self.state[3] >> (24 - i * 8)) & 0xff); - out[i + 16] = ~~((self.state[4] >> (24 - i * 8)) & 0xff); - out[i + 20] = ~~((self.state[5] >> (24 - i * 8)) & 0xff); - out[i + 24] = ~~((self.state[6] >> (24 - i * 8)) & 0xff); - out[i + 28] = ~~((self.state[7] >> (24 - i * 8)) & 0xff); - } - - return out; + self.bit_length += ~~(self.data_length * 8); + self.data[63] = cast(u8, self.bit_length >> 0); + self.data[62] = cast(u8, self.bit_length >> 8); + self.data[61] = cast(u8, self.bit_length >> 16); + self.data[60] = cast(u8, self.bit_length >> 24); + self.data[59] = cast(u8, self.bit_length >> 32); + self.data[58] = cast(u8, self.bit_length >> 40); + self.data[57] = cast(u8, self.bit_length >> 48); + self.data[56] = cast(u8, self.bit_length >> 56); + do_cycle(self, self.data); + + for i in 0 .. 4 { + out[i + 0] = ~~((self.state[0] >> (24 - i * 8)) & 0xff); + out[i + 4] = ~~((self.state[1] >> (24 - i * 8)) & 0xff); + out[i + 8] = ~~((self.state[2] >> (24 - i * 8)) & 0xff); + out[i + 12] = ~~((self.state[3] >> (24 - i * 8)) & 0xff); + out[i + 16] = ~~((self.state[4] >> (24 - i * 8)) & 0xff); + out[i + 20] = ~~((self.state[5] >> (24 - i * 8)) & 0xff); + out[i + 24] = ~~((self.state[6] >> (24 - i * 8)) & 0xff); + out[i + 28] = ~~((self.state[7] >> (24 - i * 8)) & 0xff); } + + return out; } hash :: #match #local {} diff --git a/core/io/binary.onyx b/core/io/binary.onyx index 507760ad5..066afca0e 100644 --- a/core/io/binary.onyx +++ b/core/io/binary.onyx @@ -22,7 +22,7 @@ binary_write :: (use bw: &BinaryWriter, $T: type_expr, v: &T) { } binary_write_slice :: (use bw: &BinaryWriter, sl: [] $T, output_size := false) { - assert(false, "binary_write_slice is not working at the moment"); + panic("binary_write_slice is not working at the moment"); if output_size do binary_write(bw, i32, sl.count); bytes := ([] u8).{ @@ -48,13 +48,13 @@ binary_reader_make :: (s: &Stream) -> BinaryReader { } binary_read_byte :: (use br: &BinaryReader) -> u8 { - _, byte := stream_read_byte(stream); + byte := stream_read_byte(stream).Ok ?? 0; return byte; } binary_read :: (use br: &BinaryReader, $T: type_expr) -> T { buf: [sizeof T] u8; - _, bytes_read := stream_read(stream, ~~ buf); + bytes_read := stream_read(stream, ~~ buf).Ok ?? 0; return *(cast(&T) buf); } @@ -64,14 +64,14 @@ binary_read_slice :: (use br: &BinaryReader, size := 0, read_size := false, allocator := context.allocator) -> [] T { - assert(false, "binary_write_slice is not working at the moment"); + panic("binary_write_slice is not working at the moment"); if size == 0 && read_size { size = binary_read(br, i32); } sl := memory.make_slice(u8, size * sizeof T, allocator = allocator); - _, bytes_read := stream_read(stream, sl); + bytes_read := stream_read(stream, sl).Ok ?? 0; return .{ data = cast(&T) sl.data, count = size }; } diff --git a/core/io/reader.onyx b/core/io/reader.onyx index e5f0c3a7f..584af7185 100644 --- a/core/io/reader.onyx +++ b/core/io/reader.onyx @@ -24,38 +24,56 @@ Reader :: struct { error: Error; done: bool; // If an .EOF was reached. + + newline_format: NewLineFormat; } -#inject Reader { - is_empty :: reader_empty; - empty :: reader_empty; - read_all :: read_all; - read_byte :: read_byte; - unread_byte :: unread_byte; - read_bytes :: read_bytes; - read_fill_buffer :: read_fill_buffer; - read_string :: read_string; - read_i32 :: read_i32; - read_i64 :: read_i64; - read_u32 :: read_u32; - read_u64 :: read_u64; - read_line :: read_line; - read_word :: read_word; - read_until :: read_until; - peek_byte :: peek_byte; - advance_line :: advance_line; - skip_whitespace :: skip_whitespace; - skip_bytes :: skip_bytes; - - lines :: lines; +#local +NewLineFormat :: enum { + LF; + CRLF; } -reader_make :: (s: &Stream, buffer_size := 4096, allocator := context.allocator) -> Reader { +Reader.make :: reader_make; +Reader.reset :: reader_reset; +Reader.free :: reader_free; +Reader.from_string :: reader_from_string; +Reader.get_buffered :: reader_get_buffered; +Reader.ensure_buffered :: reader_ensure_buffered +Reader.is_empty :: reader_empty; +Reader.empty :: reader_empty; +Reader.read_all :: read_all; +Reader.read_byte :: read_byte; +Reader.unread_byte :: unread_byte; +Reader.read_bytes :: read_bytes; +Reader.read_fill_buffer :: read_fill_buffer; +Reader.read_string :: read_string; +Reader.read_i32 :: read_i32; +Reader.read_i64 :: read_i64; +Reader.read_u32 :: read_u32; +Reader.read_u64 :: read_u64; +Reader.read_line :: read_line; +Reader.read_word :: read_word; +Reader.read_until :: read_until; +Reader.peek_byte :: peek_byte; +Reader.peek_bytes :: peek_bytes +Reader.advance_line :: advance_line; +Reader.skip_whitespace :: skip_whitespace; +Reader.skip_bytes :: skip_bytes; +Reader.lines :: lines; + +reader_make :: ( + s: &Stream, + buffer_size := 4096, + allocator := context.allocator, + newline_format := NewLineFormat.LF +) -> Reader { assert(s.vtable != null, "Stream vtable was not setup correctly."); reader: Reader; reader.stream = s; reader.error = .None; + reader.newline_format = newline_format; memory.alloc_slice(&reader.buffer, buffer_size, allocator); reader.buffer_allocator = allocator; @@ -67,7 +85,7 @@ reader_make :: (s: &Stream, buffer_size := 4096, allocator := context.allocator) reader_reset :: (use reader: &Reader) { start, end = 0, 0; - last_byte = #char "\0"; + last_byte = '\0'; done = false; } @@ -94,6 +112,24 @@ reader_empty :: (use reader: &Reader) -> bool { return done && reader_get_buffered(reader) == 0; } +reader_ensure_buffered :: (use reader: &Reader, bytes: u32) -> Error { + if bytes > buffer.length do return .OutOfBounds + + if start == end && end == 0 { + return .EOF + } + + while start + bytes > end { + // This handles the weird case where a stream could be + // waiting for data to arrive and continually returning None + if err := reader_read_next_chunk(reader); err != .None { + return err + } + } + + return .None +} + read_all :: (use reader: &Reader, allocator := context.allocator) -> [] u8 { output := array.make(u8, 128, allocator=allocator); @@ -233,15 +269,15 @@ read_i32 :: (use reader: &Reader) -> i32 { is_negative := false; curr, err := peek_byte(reader); - if curr == #char "-" { + if curr == '-' { is_negative = true; start += 1; } curr, err = peek_byte(reader); - while curr >= #char "0" && curr <= #char "9" { + while curr >= '0' && curr <= '9' { n *= 10; - n += cast(u32) (curr - #char "0"); + n += cast(u32) (curr - '0'); start += 1; curr, err = peek_byte(reader); @@ -259,15 +295,15 @@ read_i64 :: (use reader: &Reader) -> i64 { is_negative := false; curr, err := peek_byte(reader); - if curr == #char "-" { + if curr == '-' { is_negative = true; start += 1; } curr, err = peek_byte(reader); - while curr >= #char "0" && curr <= #char "9" { + while curr >= '0' && curr <= '9' { n *= 10; - n += cast(u64) (curr - #char "0"); + n += cast(u64) (curr - '0'); start += 1; curr, err = peek_byte(reader); @@ -284,9 +320,9 @@ read_u32 :: (use reader: &Reader) -> u32 { skip_whitespace(reader); curr, err := peek_byte(reader); - while curr >= #char "0" && curr <= #char "9" { + while curr >= '0' && curr <= '9' { n *= 10; - n += cast(u32) (curr - #char "0"); + n += cast(u32) (curr - '0'); start += 1; curr, err = peek_byte(reader); @@ -302,9 +338,9 @@ read_u64 :: (use reader: &Reader) -> u64 { skip_whitespace(reader); curr, err := peek_byte(reader); - while curr >= #char "0" && curr <= #char "9" { + while curr >= '0' && curr <= '9' { n *= 10; - n += cast(u64) (curr - #char "0"); + n += cast(u64) (curr - '0'); start += 1; curr, err = peek_byte(reader); @@ -327,10 +363,16 @@ read_line :: (use reader: &Reader, consume_newline := true, allocator := context count := start; defer start = count; - while count < end && buffer[count] != #char "\n" { + while count < end && buffer[count] != '\n' { count += 1; } + if newline_format == .CRLF && count >= 1 { + if buffer[count - 1] == '\r' && !consume_newline { + count -= 1; + } + } + if consume_newline && count < end { count += 1; } @@ -346,12 +388,19 @@ read_line :: (use reader: &Reader, consume_newline := true, allocator := context } count := start; - while count < end && buffer[count] != #char "\n" { + while count < end && buffer[count] != '\n' { count += 1; } - if buffer[count] == #char "\n" { + if buffer[count] == '\n' { if consume_newline && count < end do count += 1; + + if newline_format == .CRLF && count >= 1 { + if buffer[count - 1] == '\r' && !consume_newline { + count -= 1; + } + } + done = true; } @@ -393,11 +442,11 @@ read_word :: (use reader: &Reader, numeric_allowed := false, allocator := contex while count < end { curr := buffer[count]; - if (curr >= #char "a" && curr <= #char "z") || - (curr >= #char "A" && curr <= #char "Z") || - curr == #char "_" { + if (curr >= 'a' && curr <= 'z') || + (curr >= 'A' && curr <= 'Z') || + curr == '_' { count += 1; - } elseif numeric_allowed && (curr >= #char "0" && curr <= #char "9") { + } elseif numeric_allowed && (curr >= '0' && curr <= '9') { count += 1; } else { break; @@ -413,11 +462,11 @@ read_word :: (use reader: &Reader, numeric_allowed := false, allocator := contex count := start; while count < end { curr := buffer[count]; - if (curr >= #char "a" && curr <= #char "z") || - (curr >= #char "A" && curr <= #char "Z") || - curr == #char "_" { + if (curr >= 'a' && curr <= 'z') || + (curr >= 'A' && curr <= 'Z') || + curr == '_' { count += 1; - } elseif numeric_allowed && (curr >= #char "0" && curr <= #char "9") { + } elseif numeric_allowed && (curr >= '0' && curr <= '9') { count += 1; } else { done = true; @@ -526,6 +575,22 @@ peek_byte :: (use reader: &Reader, advance := 0) -> (u8, Error) { return buffer[start + advance], .None; } +peek_bytes :: (use reader: &Reader, advance: u32, length: u32) -> (str, Error) { + if reader_empty(reader) do return "", .EOF + + if start == end { + if fill_err := reader_read_next_chunk(reader); fill_err != .None { + return "", fill_err + } + } + + if start + advance + length >= end { + return "", .EOF + } + + return buffer[start + advance .. start + advance + length], .None +} + advance_line :: (use reader: &Reader) { if reader_empty(reader) do return; @@ -537,7 +602,7 @@ advance_line :: (use reader: &Reader) { } defer start += 1; - if buffer[start] == #char "\n" { + if buffer[start] == '\n' { return; } } @@ -546,7 +611,7 @@ advance_line :: (use reader: &Reader) { skip_whitespace :: (use reader: &Reader) { if reader_empty(reader) do return; - while start < end { + while start <= end { if start == end { if fill_err := reader_read_next_chunk(reader); fill_err != .None { return; @@ -554,16 +619,16 @@ skip_whitespace :: (use reader: &Reader) { } switch buffer[start] { - case #char " ", #char "\t", #char "\n", #char "\r" { + case ' ', '\t', '\n', '\r' { start += 1; } - case #default do return; + case _ do return; } } } -skip_bytes :: (use reader: &Reader, bytes: u32) -> (skipped: i32, err: Error) { +skip_bytes :: (use reader: &Reader, bytes: u32) -> (i32, Error) { if bytes == 0 do return 0, .None; if reader_empty(reader) do return 0, .EOF; @@ -597,11 +662,11 @@ lines :: (r: &Reader, inplace := false, allocator := context.allocator) => r = r, inplace = inplace, allocator = allocator, - }, (ctx: $C) -> (str, bool) { + }, (ctx: $C) -> ? str { line := ctx.r->read_line(consume_newline=true, inplace=ctx.inplace, allocator=ctx.allocator); - if line.count > 0 do return line, true; - else do return "", false; + if line.count > 0 do return line; + else do return .None; }); @@ -642,11 +707,13 @@ lines :: (r: &Reader, inplace := false, allocator := context.allocator) => // 'stream_poll' that tests if the next read would block, but that // can happen later. if end > 0 && stream.flags & .Block_On_Read { - _, data_available := stream_poll(stream, .Read, 0); + data_available := stream_poll(stream, .Read, 0).Ok ?? false; if !data_available do return .None; } - err, n := stream_read(stream, buffer[end .. buffer.count]); + read_result := stream_read(stream, buffer[end .. buffer.count]); + err := read_result.Err ?? Error.None; + n := read_result.Ok ?? 0; // // ReadPending is not an error, just a signal that data is not ready diff --git a/core/io/stdio.onyx b/core/io/stdio.onyx index 1f6904218..91d64d267 100644 --- a/core/io/stdio.onyx +++ b/core/io/stdio.onyx @@ -69,7 +69,7 @@ print :: #match #locked { if !x do return; io.write(&stdio.print_writer, x); - if x[x.count - 1] == #char "\n" && stdio.auto_flush do __flush_stdio(); + if x[x.count - 1] == '\n' && stdio.auto_flush do __flush_stdio(); }, (x) => { io.write(&stdio.print_writer, x); }, @@ -100,7 +100,7 @@ printf :: (format: str, va: ..any) { #if #defined(runtime.platform.__output_error) { // // Prints to standard error, if available. - eprintf :: (format: str, va: ..any) -> str { + eprintf :: (format: str, va: ..any) { buffer: [1024] u8; runtime.platform.__output_error( conv.format_va(buffer, format, va, .{ @@ -120,7 +120,7 @@ printf :: (format: str, va: ..any) { aprintf :: (format: str, va: ..any) -> str { buffer: [8196] u8; out := conv.format_va(buffer, format, va); - return string.alloc_copy(out); + return string.copy(out); } // @@ -129,7 +129,7 @@ aprintf :: (format: str, va: ..any) -> str { tprintf :: (format: str, va: ..any) -> str { buffer: [8196] u8; out := conv.format_va(buffer, format, va); - return string.alloc_copy(out, allocator=context.temp_allocator); + return string.copy(out, allocator=context.temp_allocator); } @@ -147,7 +147,7 @@ __byte_dump :: (ptr: rawptr, byte_count: u32, bytes_per_line := 8) { temp[0] = map_to_ascii(val >> 4); temp[1] = map_to_ascii(val & 15); - temp[2] = #char " "; + temp[2] = ' '; runtime.platform.__output_string(~~temp); @@ -157,23 +157,23 @@ __byte_dump :: (ptr: rawptr, byte_count: u32, bytes_per_line := 8) { map_to_ascii :: (x: u8) -> u8 { return switch x { - case 0 => #char "0"; - case 1 => #char "1"; - case 2 => #char "2"; - case 3 => #char "3"; - case 4 => #char "4"; - case 5 => #char "5"; - case 6 => #char "6"; - case 7 => #char "7"; - case 8 => #char "8"; - case 9 => #char "9"; - case 10 => #char "A"; - case 11 => #char "B"; - case 12 => #char "C"; - case 13 => #char "D"; - case 14 => #char "E"; - case 15 => #char "F"; - case #default => #char "X"; + case 0 => '0'; + case 1 => '1'; + case 2 => '2'; + case 3 => '3'; + case 4 => '4'; + case 5 => '5'; + case 6 => '6'; + case 7 => '7'; + case 8 => '8'; + case 9 => '9'; + case 10 => 'A'; + case 11 => 'B'; + case 12 => 'C'; + case 13 => 'D'; + case 14 => 'E'; + case 15 => 'F'; + case _ => 'X'; }; } } @@ -186,24 +186,24 @@ __byte_dump :: (ptr: rawptr, byte_count: u32, bytes_per_line := 8) { // // The v-table for the stream in stdio. #local stdio_vtable := io.Stream_Vtable.{ - read = (_: &io.Stream, buffer: [] u8) -> (io.Error, u32) { + read = (_: &io.Stream, buffer: [] u8) -> Result(u32, io.Error) { __flush_stdio(); bytes_read := runtime.platform.__read_from_input(buffer); - if bytes_read <= 0 do return .EOF, 0; + if bytes_read <= 0 do return .{ Err = .EOF }; - return .None, bytes_read; + return .{ Ok = bytes_read }; }, - read_byte = (_: &io.Stream) -> (io.Error, u8) { + read_byte = (_: &io.Stream) -> Result(u8, io.Error) { __flush_stdio(); buf: [1] u8; bytes_read := runtime.platform.__read_from_input(buf); - if bytes_read <= 0 do return .EOF, 0; + if bytes_read <= 0 do return .{ Err = .EOF }; - return .None, buf[0]; + return .{ Ok = buf[0] }; }, - write = (_: &io.Stream, buffer: [] u8) -> (io.Error, u32) { + write = (_: &io.Stream, buffer: [] u8) -> Result(u32, io.Error) { return io.stream_write(&stdio.print_stream, buffer); }, @@ -216,11 +216,11 @@ __byte_dump :: (ptr: rawptr, byte_count: u32, bytes_per_line := 8) { return .None; }, - poll = (_: &io.Stream, ev: io.PollEvent, timeout: i32) -> (io.Error, bool) { + poll = (_: &io.Stream, ev: io.PollEvent, timeout: i32) -> Result(bool, io.Error) { // TODO: This might not be right, as maybe sometimes you can't actually // write because a the output was closed? - if ev == .Write do return .None, true; + if ev == .Write do return .{ Ok = true }; - return .None, runtime.platform.__wait_for_input(timeout); + return .{ Ok = runtime.platform.__wait_for_input(timeout) }; } } diff --git a/core/io/stream.onyx b/core/io/stream.onyx index ecea55593..b51cf9220 100644 --- a/core/io/stream.onyx +++ b/core/io/stream.onyx @@ -17,23 +17,23 @@ Stream_Flags :: enum #flags { } Stream_Vtable :: struct { - seek : (s: &Stream, to: i32, whence: SeekFrom) -> Error = null_proc; - tell : (s: &Stream) -> (Error, u32) = null_proc; + seek : (s: &Stream, to: i32, whence: SeekFrom) -> Error = null_proc; + tell : (s: &Stream) -> Result(u32, Error) = null_proc; - read : (s: &Stream, buffer: [] u8) -> (Error, u32) = null_proc; - read_at : (s: &Stream, at: u32, buffer: [] u8) -> (Error, u32) = null_proc; - read_byte : (s: &Stream) -> (Error, u8) = null_proc; + read : (s: &Stream, buffer: [] u8) -> Result(u32, Error) = null_proc; + read_at : (s: &Stream, at: u32, buffer: [] u8) -> Result(u32, Error) = null_proc; + read_byte : (s: &Stream) -> Result(u8, Error) = null_proc; - write : (s: &Stream, buffer: [] u8) -> (Error, u32) = null_proc; - write_at : (s: &Stream, at: u32, buffer: [] u8) -> (Error, u32) = null_proc; - write_byte : (s: &Stream, byte: u8) -> Error = null_proc; + write : (s: &Stream, buffer: [] u8) -> Result(u32, Error) = null_proc; + write_at : (s: &Stream, at: u32, buffer: [] u8) -> Result(u32, Error) = null_proc; + write_byte : (s: &Stream, byte: u8) -> Error = null_proc; - close : (s: &Stream) -> Error = null_proc; - flush : (s: &Stream) -> Error = null_proc; + close : (s: &Stream) -> Error = null_proc; + flush : (s: &Stream) -> Error = null_proc; - size : (s: &Stream) -> i32 = null_proc; + size : (s: &Stream) -> i32 = null_proc; - poll : (s: &Stream, ev: PollEvent, timeout: i32) -> (Error, bool) = null_proc; + poll : (s: &Stream, ev: PollEvent, timeout: i32) -> Result(bool, Error) = null_proc; } PollEvent :: enum { @@ -56,61 +56,65 @@ stream_seek :: (use s: &Stream, to: i32, whence: SeekFrom) -> Error { return vtable.seek(s, to, whence); } -stream_tell :: (use s: &Stream) -> (Error, u32) { - if vtable == null do return .NoVtable, 0; - if vtable.tell == null_proc do return .NotImplemented, 0; +stream_tell :: (use s: &Stream) -> Result(u32, Error) { + if vtable == null do return .{ Err = .NoVtable }; + if vtable.tell == null_proc do return .{ Err = .NotImplemented }; return vtable.tell(s); } -stream_read :: (use s: &Stream, buffer: [] u8) -> (Error, u32) { - if vtable == null do return .NoVtable, 0; - if vtable.read == null_proc do return .NotImplemented, 0; +stream_read :: (use s: &Stream, buffer: [] u8) -> Result(u32, Error) { + if vtable == null do return .{ Err = .NoVtable }; + if vtable.read == null_proc do return .{ Err = .NotImplemented }; return vtable.read(s, buffer); } -stream_read_at :: (use s: &Stream, at: u32, buffer: [] u8) -> (Error, u32) { - if vtable == null do return .NoVtable, 0; - if vtable.read_at == null_proc do return .NotImplemented, 0; +stream_read_at :: (use s: &Stream, at: u32, buffer: [] u8) -> Result(u32, Error) { + if vtable == null do return .{ Err = .NoVtable }; + if vtable.read_at == null_proc do return .{ Err = .NotImplemented }; return vtable.read_at(s, at, buffer); } -stream_read_byte :: (use s: &Stream) -> (Error, u8) { - if vtable == null do return .NoVtable, cast(u8) 0; - if vtable.read_byte == null_proc do return .NotImplemented, 0; +stream_read_byte :: (use s: &Stream) -> Result(u8, Error) { + if vtable == null do return .{ Err = .NoVtable }; + if vtable.read_byte == null_proc do return .{ Err = .NotImplemented }; return vtable.read_byte(s); } -stream_read_until_full :: (use s: &Stream, buffer: [] u8) -> (Error, u32) { - if vtable == null do return .NoVtable, 0; - if vtable.read == null_proc do return .NotImplemented, 0; +stream_read_until_full :: (use s: &Stream, buffer: [] u8) -> Result(u32, Error) { + if vtable == null do return .{ Err = .NoVtable }; + if vtable.read == null_proc do return .{ Err = .NotImplemented }; bytes_read := 0; while bytes_read < buffer.count { - err, r := vtable.read(s, buffer[bytes_read .. buffer.length]); - bytes_read += r; - - if err != .ReadPending && err != .ReadLater && err != .None { - return err, bytes_read; + switch vtable.read(s, buffer[bytes_read .. buffer.length]) { + case .Ok as r { + bytes_read += r; + } + case .Err as err { + if err != .ReadPending && err != .ReadLater && err != .None { + return .{ Err = err }; + } + } } } - return .None, bytes_read; + return .{ Ok = bytes_read }; } -stream_write :: (use s: &Stream, buffer: [] u8) -> (Error, u32) { - if vtable == null do return .NoVtable, 0; - if vtable.write == null_proc do return .NotImplemented, 0; +stream_write :: (use s: &Stream, buffer: [] u8) -> Result(u32, Error) { + if vtable == null do return .{ Err = .NoVtable }; + if vtable.write == null_proc do return .{ Err = .NotImplemented }; return vtable.write(s, buffer); } -stream_write_at :: (use s: &Stream, at: u32, buffer: [] u8) -> (Error, u32) { - if vtable == null do return .NoVtable, 0; - if vtable.write_at == null_proc do return .NotImplemented, 0; +stream_write_at :: (use s: &Stream, at: u32, buffer: [] u8) -> Result(u32, Error) { + if vtable == null do return .{ Err = .NoVtable }; + if vtable.write_at == null_proc do return .{ Err = .NotImplemented }; return vtable.write_at(s, at, buffer); } @@ -143,18 +147,16 @@ stream_size :: (use s: &Stream) -> i32 { return vtable.size(s); } -#doc """ - Waits until a stream is able to be read from or written to. - - If `timeout` < 0, then there is an indefinite timeout. - - If `timeout` = 0, then there is no timeout, and this function returns immediately. - - If `timeout` > 0, then there is a `timeout` millisecond delay before returning false. -""" -stream_poll :: (use s: &Stream, ev: PollEvent, timeout: i32) -> (Error, bool) { - if vtable == null do return .NoVtable, false; - if vtable.poll == null_proc do return .NotImplemented, false; +/// Waits until a stream is able to be read from or written to. +/// +/// If `timeout` < 0, then there is an indefinite timeout. +/// +/// If `timeout` = 0, then there is no timeout, and this function returns immediately. +/// +/// If `timeout` > 0, then there is a `timeout` millisecond delay before returning false. +stream_poll :: (use s: &Stream, ev: PollEvent, timeout: i32) -> Result(bool, Error) { + if vtable == null do return .{ Err = .NoVtable }; + if vtable.poll == null_proc do return .{ Err = .NotImplemented }; return vtable.poll(s, ev, timeout); } @@ -248,14 +250,14 @@ buffer_stream_make :: #match #locked { } } -#match builtin.delete buffer_stream_free +#overload builtin.delete :: buffer_stream_free buffer_stream_free :: (use bs: &BufferStream) { if write_enabled && !fixed { delete(&data); } } -#match core.string.as_str buffer_stream_to_str +#overload str.as_str buffer_stream_to_str buffer_stream_to_str :: (use bs: &BufferStream) -> str { if !write_enabled do return null_str; @@ -276,63 +278,63 @@ buffer_stream_vtable := Stream_Vtable.{ return .None; }, - tell = (use ss: &BufferStream) -> (Error, u32) { - return .None, curr_pos; + tell = (use ss: &BufferStream) -> Result(u32, Error) { + return .{ Ok = curr_pos }; }, - read = (use ss: &BufferStream, buffer: [] u8) -> (Error, u32) { - if curr_pos >= data.count do return .EOF, 0; + read = (use ss: &BufferStream, buffer: [] u8) -> Result(u32, Error) { + if curr_pos >= data.count do return .{ Err = .EOF }; bytes_to_read := math.min(buffer.count, data.count - curr_pos); memory.copy(buffer.data, &data.data[curr_pos], bytes_to_read); curr_pos += bytes_to_read; - return .None, bytes_to_read; + return .{ Ok = bytes_to_read }; }, - read_at = (use ss: &BufferStream, at: u32, buffer: [] u8) -> (Error, u32) { - if at >= data.count do return .EOF, 0; + read_at = (use ss: &BufferStream, at: u32, buffer: [] u8) -> Result(u32, Error) { + if at >= data.count do return .{ Err = .EOF }; bytes_to_read := math.min(buffer.count, data.count - at); memory.copy(buffer.data, &data.data[at], bytes_to_read); - return .None, bytes_to_read; + return .{ Ok = bytes_to_read }; }, - read_byte = (use ss: &BufferStream) -> (Error, u8) { - if curr_pos >= data.count do return .EOF, 0; + read_byte = (use ss: &BufferStream) -> Result(u8, Error) { + if curr_pos >= data.count do return .{ Err = .EOF }; defer curr_pos += 1; - return .None, data[curr_pos]; + return .{ Ok = data[curr_pos] }; }, - write = (use dss: &BufferStream, buffer: [] u8) -> (Error, u32) { - if !write_enabled do return .NotImplemented, 0; + write = (use dss: &BufferStream, buffer: [] u8) -> Result(u32, Error) { + if !write_enabled do return .{ Err = .NotImplemented }; if curr_pos + buffer.count >= data.capacity { - if fixed do return .EOF, 0; - if !array.ensure_capacity(&data, curr_pos + buffer.count) do return .EOF, 0; + if fixed do return .{ Err = .EOF }; + if !array.ensure_capacity(&data, curr_pos + buffer.count) do return .{ Err = .EOF }; } memory.copy(&data.data[curr_pos], buffer.data, buffer.count); curr_pos += buffer.count; data.count = math.max(data.count, curr_pos); - return .None, buffer.count; + return .{ Ok = buffer.count }; }, - write_at = (use dss: &BufferStream, at: u32, buffer: [] u8) -> (Error, u32) { - if !write_enabled do return .NotImplemented, 0; + write_at = (use dss: &BufferStream, at: u32, buffer: [] u8) -> Result(u32, Error) { + if !write_enabled do return .{ Err = .NotImplemented }; if at + buffer.count >= data.capacity { - if fixed do return .EOF, 0; - if !array.ensure_capacity(&data, at + buffer.count) do return .EOF, 0; + if fixed do return .{ Err = .EOF }; + if !array.ensure_capacity(&data, at + buffer.count) do return .{ Err = .EOF }; } memory.copy(&data.data[at], buffer.data, buffer.count); data.count = math.max(data.count, at + buffer.count); - return .None, buffer.count; + return .{ Ok = buffer.count }; }, write_byte = (use dss: &BufferStream, byte: u8) -> Error { @@ -371,12 +373,12 @@ buffer_stream_vtable := Stream_Vtable.{ return .None; }, - poll = (use dss: &BufferStream, ev: io.PollEvent, timeout: i32) -> (Error, bool) { + poll = (use dss: &BufferStream, ev: io.PollEvent, timeout: i32) -> Result(bool, Error) { if ev == .Write && !write_enabled { - return .None, false; + return .{ Ok = false }; } - return .None, true; + return .{ Ok = true }; } } diff --git a/core/io/writer.onyx b/core/io/writer.onyx index 9dd0fc29b..241b3f3f1 100644 --- a/core/io/writer.onyx +++ b/core/io/writer.onyx @@ -19,6 +19,28 @@ Writer :: struct { buffer_filled: u32; } +Writer.make :: writer_make +Writer.free :: writer_free +Writer.flush :: writer_flush +Writer.consume_error :: writer_consume_error +Writer.remaining_capacity :: writer_remaining_capacity + +Writer.write :: write +Writer.write_byte :: write_byte +Writer.write_str :: write_str +Writer.write_cstr :: write_cstr +Writer.write_i32 :: write_i32 +Writer.write_i64 :: write_i64 +Writer.write_f32 :: write_f32 +Writer.write_f64 :: write_f64 +Writer.write_bool :: write_bool +Writer.write_ptr :: write_ptr +Writer.write_range :: write_range +Writer.write_format :: write_format +Writer.write_format_va :: write_format_va +Writer.write_escaped_str :: write_escaped_str + + writer_make :: (s: &Stream, buffer_size := 4096) -> Writer { assert(s.vtable != null, "Stream vtable was not setup correctly."); @@ -31,8 +53,7 @@ writer_make :: (s: &Stream, buffer_size := 4096) -> Writer { return w; } -// -// Future-proofing the API +#overload delete :: writer_free writer_free :: (w: &Writer) { writer_flush(w); delete(&w.buffer); @@ -41,7 +62,7 @@ writer_free :: (w: &Writer) { writer_flush :: (w: &Writer) { if w.buffer_filled == 0 do return; - err, bytes_wrote := stream_write(w.stream, w.buffer[0 .. w.buffer_filled]); + err := stream_write(w.stream, w.buffer[0 .. w.buffer_filled]).Err ?? Error.None; if err != .None { w.error = err; } @@ -83,7 +104,7 @@ write_byte :: (use writer: &Writer, byte: u8) { write_str :: (use writer: &Writer, s: str) { if buffer.count == 0 { - if err := stream_write(stream, s); err != .None { + if err := stream_write(stream, s).Err ?? Error.None; err != .None { writer.error = err; } @@ -93,7 +114,7 @@ write_str :: (use writer: &Writer, s: str) { } else { writer_flush(writer); - if err := stream_write(stream, s); err != .None { + if err := stream_write(stream, s).Err ?? Error.None; err != .None { writer.error = err; } } @@ -159,7 +180,7 @@ write_format_va :: (use writer: &Writer, format: str, va: [] any) { } write_escaped_str :: (use writer: &Writer, s: str) { - write_byte(writer, #char "\""); + write_byte(writer, '"'); while i := 0; i < s.count { defer i += 1; @@ -167,24 +188,24 @@ write_escaped_str :: (use writer: &Writer, s: str) { // @Incomplete switch ch { - case #char "\n" { write_byte(writer, #char "\\"); write_byte(writer, #char "n"); } - case #char "\r" { write_byte(writer, #char "\\"); write_byte(writer, #char "r"); } - case #char "\t" { write_byte(writer, #char "\\"); write_byte(writer, #char "t"); } - case #char "\v" { write_byte(writer, #char "\\"); write_byte(writer, #char "v"); } - case #char "\b" { write_byte(writer, #char "\\"); write_byte(writer, #char "b"); } - case #char "\f" { write_byte(writer, #char "\\"); write_byte(writer, #char "f"); } - case #char "\0" { write_byte(writer, #char "\\"); write_byte(writer, #char "0"); } - case #char "\"" { write_byte(writer, #char "\\"); write_byte(writer, #char "\""); } - case #char "\\" { write_byte(writer, #char "\\"); write_byte(writer, #char "\\"); } - - case #default { + case '\n' { write_byte(writer, '\\'); write_byte(writer, 'n'); } + case '\r' { write_byte(writer, '\\'); write_byte(writer, 'r'); } + case '\t' { write_byte(writer, '\\'); write_byte(writer, 't'); } + case '\v' { write_byte(writer, '\\'); write_byte(writer, 'v'); } + case '\b' { write_byte(writer, '\\'); write_byte(writer, 'b'); } + case '\f' { write_byte(writer, '\\'); write_byte(writer, 'f'); } + case '\0' { write_byte(writer, '\\'); write_byte(writer, '0'); } + case '"' { write_byte(writer, '\\'); write_byte(writer, '"'); } + case '\\' { write_byte(writer, '\\'); write_byte(writer, '\\'); } + + case _ { // @Speed write_byte(writer, ch); } } } - write_byte(writer, #char "\""); + write_byte(writer, '"'); } write :: #match { diff --git a/core/js/foreign.onyx b/core/js/foreign.onyx new file mode 100644 index 000000000..821b261fe --- /dev/null +++ b/core/js/foreign.onyx @@ -0,0 +1,53 @@ + +package core.js + +use core.alloc + +#js #order 0 #file "./onyx.js" + +#package { + // Value + #foreign "__syscall" { + __free :: (value: Value) -> void --- + __delete :: (value: Value, prop: str) -> void --- + __new :: (value: Value, args: [] Value) -> Value --- + + __dot :: (value: Value, name: str) -> Value --- + __sub :: (value: Value, idx: i32) -> Value --- + __set :: (value: Value, name: str, v: Value) -> void --- + __set_index :: (value: Value, index: i32, v: Value) -> void --- + __len :: (value: Value) -> i32 --- + + __from_str :: (s: str) -> Value --- + __from_arr :: (a: [] Value) -> Value --- + + // Call with zero-length to just retrieve length. + __to_str :: (v: Value, buf: [] u8) -> i32 --- + + __call :: (func: Value, args: [] Value) -> Value --- + __method :: (value: Value, method: str, args: [] Value) -> Value --- + + __instance_of :: (value: Value, class: Value) -> bool --- + + __new_object :: () -> Value --- + __new_array :: () -> Value --- + + __copy_to_js :: (arraybuf: Value, buf: [] u8) -> i32 --- + __copy_to_onyx :: (buf: [] u8, arraybuf: Value) -> i32 --- + } + + // Func + #foreign "__syscall" { + __make_func :: (f: (Value, [] Value) -> Value) -> Func --- + } + + #export "__allocate_arg_buf" (args: i32) -> rawptr { + return alloc.heap_allocator->alloc(args * sizeof Value); + } + + #export "__free_arg_buf" (buf: rawptr) { + if buf == null do return; + + alloc.heap_allocator->free(buf); + } +} diff --git a/core/js/func.onyx b/core/js/func.onyx new file mode 100644 index 000000000..39ac2ea24 --- /dev/null +++ b/core/js/func.onyx @@ -0,0 +1,31 @@ + +package core.js + +/// Represents an Onyx function that can be called by JavaScript. +Func :: #distinct Value + +func :: Func.from + +/// Creates a JavaScript function that wraps an Onyx function. +Func.from :: (f: (this: Value, args: [] Value) -> Value) -> Func { + func := __make_func(f); + __add_to_pool(cast(Value) func); + return func; +} + +Func.release :: (f: Func) { + if __current_pool { + __current_pool->remove(cast(Value) f); + } + + cast(Value, f)->release(); +} + +Func.leak :: (f: Func) -> Func { + if __current_pool { + __current_pool->remove(cast(Value) f); + } + + return f; +} + diff --git a/core/js/onyx.js b/core/js/onyx.js new file mode 100644 index 000000000..db4ff7d26 --- /dev/null +++ b/core/js/onyx.js @@ -0,0 +1,340 @@ +function JsHeap(onyx) { + this.heap = [NaN, 0, null, true, false, globalThis, onyx] + this.mappedValues = new Map([[0,1],[null,2],[true,3],[false,4],[globalThis,5],[onyx,6]]) + this.freelist = new Array() + + return this +} + +JsHeap.prototype = { + get: function(x) { + if ((x | 0) >= this.heap.length) { + console.error("[JsHeap] Accessing outside of heap bounds.") + return null + } + return this.heap[x | 0] + }, + get_or_insert: function(v) { + var index = this.mappedValues.get(v) + if (index === undefined) index = this.insert(v) + return index + }, + insert: function(v) { + if (this.freelist.length > 0) { + var i = this.freelist.pop() + this.heap[i] = v + return i + } else { + this.heap.push(v) + return this.heap.length - 1 + } + }, + free: function(x) { + if ((x | 0) <= 6) return + if ((x | 0) >= this.heap.length) { + console.error("[JsHeap] Deleting outside of heap bounds.") + return + } + if (this.heap[x | 0] !== undefined) { + this.heap[x | 0] = undefined + this.freelist.push(x | 0) + } + } +} + +globalThis.Onyx = function() { + this.memory = null + this.data = null + this.instance = null + this.started = false + this._scratchBuf = new ArrayBuffer(16) + this._scratchBufView = new DataView(this._scratchBuf) + this._textDecoder = new TextDecoder("utf-8") + this._textEncoder = new TextEncoder("utf-8") + this._heap = new JsHeap(this) + + return this +} + +Onyx.modules = {} + +Onyx.register_module = function(name, member_getter) { + Onyx.modules[name] = Onyx.modules[name] || [] + Onyx.modules[name].push(member_getter) +} + +Onyx.load = function(source) { + return fetch(source) + .then(function(res) { return res.arrayBuffer() }) + .then(function(code) { return Onyx.create(code) }) +} + +Onyx.create = function(wasm_code) { + var instance = new Onyx() + var import_object = {} + for (var name in Onyx.modules) { + var module_object = {} + for (var i in Onyx.modules[name]) { + Object.assign(module_object, Onyx.modules[name][i](instance)) + } + import_object[name] = module_object + } + + return WebAssembly.instantiate(wasm_code, import_object) + .then(function(wasm_module) { + instance.memory = wasm_module.instance.exports.memory + instance.instance = wasm_module.instance + + return instance + }) +} + + +globalThis.Onyx.prototype = { + start: function() { + if (this.instance.started) return + this.instance.started = true + + this.instance.exports._start() + }, + + invoke: function(name, ...rest) { + this.start() + + var func = this.instance.exports[name] + if (func == null) throw new Error(`no such export '${name}'`) + + var args = rest.map(arg => this.store_value(arg)) + var result = func.call(null, args) + if (result !== undefined) { + const index = this.load_value_index(result) + result = this.load_value(result) + this._heap.free(index) + } + + for (var i in args) { + const index = this.load_value_index(args[i]) + this._heap.free(index) + } + + return result + }, + + extract_string: function(ptr, len) { + const array = new Uint8Array(this.memory.buffer, ptr, len) + const string = this._textDecoder.decode(array) + return string + }, + + load_value: function(value) { + this._scratchBufView.setBigUint64(0, value, true) + const fp_value = this._scratchBufView.getFloat64(0, true) + + if (fp_value === 0) return undefined + if (!isNaN(fp_value)) return fp_value + + const index = Number(value & 0xffffffffn) + return this._heap.get(index) + }, + + load_value_index: function(value) { + this._scratchBufView.setBigUint64(0, value, true) + const fp_value = this._scratchBufView.getFloat64(0, true) + + if (fp_value === 0) return 0 + if (!isNaN(fp_value)) return 0 + + const index = Number(value & 0xffffffffn) + return index + }, + + load_slice_of_values: function(addr, len) { + const results = [] + const data = new DataView(this.memory.buffer) + for (var i = 0; i < len; i++) { + results.push( + this.load_value(data.getBigUint64(addr + i * 8, true)) + ) + } + return results + }, + + store_value: function(value) { + const nan_header_bytes = 0x7ff80000 + if (typeof value === "number" && value !== 0) { + if (isNaN(value)) { + this._scratchBufView.setUint32(4, nan_header_bytes, true) + this._scratchBufView.setUint32(0, 0, true) + return this._scratchBufView.getBigUint64(0, true) + } + + this._scratchBufView.setFloat64(0, value, true) + return this._scratchBufView.getBigUint64(0, true) + } + + if (value === undefined) return 0n + + const index = this._heap.get_or_insert(value) + var type_flag = 0 + switch (typeof value) { + case "object": if (value !== null) type_flag = 1; break + case "string": type_flag = 2; break + case "symbol": type_flag = 3; break + case "function": type_flag = 4; break + } + + this._scratchBufView.setUint32(4, nan_header_bytes | type_flag, true) + this._scratchBufView.setUint32(0, index, true) + return this._scratchBufView.getBigUint64(0, true) + } +} + +Onyx.register_module("host", function(instance) { return { + print_str: function(ptr, len) { console.log(instance.extract_string(ptr, len)) }, + time: function() { return BigInt(Date.now()) }, + exit: function() { debugger } +}}) + +Onyx.register_module("__syscall", function(instance) { return { + __new_object: function() { + return instance.store_value({}) + }, + __new_array: function() { + return instance.store_value([]) + }, + __new: function(v, args_ptr, args_len) { + const args = instance.load_slice_of_values(args_ptr, args_len) + const obj = instance.load_value(v) + const new_obj = instance.store_value(Reflect.construct(obj, args)) + return new_obj + }, + __delete: function(v, prop_ptr, prop_len) { + const value = instance.load_value(v) + const prop = instance.extract_string(prop_ptr, prop_len) + + Reflect.deleteProperty(value, prop) + }, + __free: function(v) { + const index = instance.load_value_index(v) + instance._heap.free(index) + }, + __dot: function(v, name_ptr, name_len) { + const name = instance.extract_string(name_ptr, name_len) + const value = instance.load_value(v) + const result = instance.store_value(Reflect.get(value, name)) + return result + }, + __sub: function(v, i) { + const value = instance.load_value(v) + const result = instance.store_value(Reflect.get(value, i)) + return result + }, + __set: function(v, name_ptr, name_len, newval) { + const value = instance.load_value(v) + const new_value = instance.load_value(newval) + const name = instance.extract_string(name_ptr, name_len) + Reflect.set(value, name, new_value) + }, + __set_index: function(v, index, newval) { + const value = instance.load_value(v) + const new_value = instance.load_value(newval) + Reflect.set(value, index, new_value) + }, + __len: function(v) { + const value = instance.load_value(v) + return value.length + }, + __from_str: function(ptr, len) { + const s = instance.extract_string(ptr, len) + return instance.store_value(s) + }, + __from_arr: function(ptr, len) { + const arr = instance.load_slice_of_values(ptr, len) + return instance.store_value(arr) + }, + __call: function(f, args_ptr, args_len) { + const args = instance.load_slice_of_values(args_ptr, args_len) + const func = instance.load_value(f) + const result = instance.store_value(Reflect.apply(func, undefined, args)) + return result + }, + __method: function(v, method_ptr, method_len, args_ptr, args_len) { + const args = instance.load_slice_of_values(args_ptr, args_len) + const method = instance.extract_string(method_ptr, method_len) + const value = instance.load_value(v) + const func = Reflect.get(value, method) + + const result = instance.store_value(Reflect.apply(func, value, args)) + return result + }, + __instance_of: function(v, c) { + const value = instance.load_value(v) + const base = instance.load_value(c) + + if (typeof base !== "object" && typeof base !== "function") return false + + return value instanceof base + }, + __make_func: function(funcidx, closureptr) { + var wasmFunc = instance.instance.exports.__indirect_function_table.get(funcidx) + + return instance.store_value(function() { + var data = new DataView(instance.memory.buffer) + + var argptr = instance.instance.exports.__allocate_arg_buf(arguments.length) + for (var i = 0; i < arguments.length; i++) { + data.setBigUint64(argptr + i * 8, instance.store_value(arguments[i]), true) + } + + var thisArg = instance.store_value(this) + + instance.instance.exports.__closure_base.value = closureptr + var result = wasmFunc(thisArg, argptr, arguments.length) + + const thisIndex = instance.load_value_index(thisArg) + instance._heap.free(thisIndex) + + instance.instance.exports.__free_arg_buf(argptr) + + return instance.load_value(result) + }) + }, + __to_str: function(v, outptr, outlen) { + const s = instance.load_value(v) + if (typeof s !== "string") return -1 + + const encoded = instance._textEncoder.encode(s) + if (outlen == 0) return encoded.length + + const onyxmem = new Uint8Array(instance.memory.buffer) + onyxmem.set(encoded, outptr) + return encoded.length + }, + __copy_to_js: function(a_ref, bufptr, buflen) { + const a = instance.load_value(a_ref) + if (!(a instanceof Uint8Array || a instanceof Uint8ClampedArray)) { + return -1 + } + + const onyxmem = new Uint8Array(instance.memory.buffer) + const copylen = Math.min(buflen, a.length) + const to_copy = onyxmem.subarray(bufptr, bufptr+copylen) + a.set(to_copy) + + return copylen + }, + __copy_to_onyx: function(bufptr, buflen, a_ref) { + const a = instance.load_value(a_ref) + if (!(a instanceof Uint8Array || a instanceof Uint8ClampedArray)) { + return -1 + } + + const onyxmem = new Uint8Array(instance.memory.buffer, bufptr) + const copylen = Math.min(buflen, a.length) + const to_copy = a.subarray(0, copylen) + onyxmem.set(to_copy) + + return copylen + } +}}) + diff --git a/core/js/value.onyx b/core/js/value.onyx new file mode 100644 index 000000000..6fa0cfa29 --- /dev/null +++ b/core/js/value.onyx @@ -0,0 +1,448 @@ + +package core.js + +use core.iter +use core.math +use core.array +use core.alloc + +/// Used to represent a JavaScript value. +Value :: #distinct u64 + +value :: Value.from + +Type :: enum { + Undefined :: 0; + Null :: 1; + Boolean :: 2; + Number :: 3; + String :: 4; + Symbol :: 5; + Object :: 6; + Function :: 7; +} + +Undefined :: Value.{0} +NaN :: Value.{0x7ff8000000000000} +Zero :: Value.{0x7ff8000000000001} +Null :: Value.{0x7ff8000000000002} +True :: Value.{0x7ff8000000000003} +False :: Value.{0x7ff8000000000004} +Global :: Value.{0x7ff8000000000005} +Onyx :: Value.{0x7ff8000000000006} + +/// Creates a new JavaScript object and returns the `Value` handle to it. +Value.new_object :: () -> Value { + v := __new_object(); + __add_to_pool(v); + return v; +} + +/// Creates a new JavaScript array and returns the `Value` handle to it. +Value.new_array :: () -> Value { + v := __new_array(); + __add_to_pool(v); + return v; +} + +/// Converts an Onyx value into a JavaScript `Value`. +/// +/// Currently, these types are supported: `i32`, `u32`, `i64`, `u64`, `f32`, `f64`, `bool`, str, `Value`, `Func`, and `Map(str, $V)`. +Value.from :: #match { + (m: Map(str, $V)) -> ? Value { + result := Value.new_object(); + __add_to_pool(result); + + for &entry in m.entries { + result->set(entry.key, Value.from(entry.value) ?? Value.Undefined); + } + + return result; + }, + + (a: any) -> ? Value { + switch a.type { + case i32, u32 { + // Integers are simply upcasted to float64s + v := cast(f64) *cast(&i32, a.data); + if v == 0 do return Zero; + + return *cast(&Value) &v; + } + + case i64, u64 { + // Integers are simply upcasted to float64s, even with a loss of precision + // Gotta love JS. + v := cast(f64) *cast(&i64, a.data); + if v == 0 do return Zero; + + return *cast(&Value) &v; + } + + case f32 { + v := cast(f64) *cast(&f32, a.data); + if v == 0 do return Zero; + + return *cast(&Value) &v; + } + + case f64 { + v := *cast(&f64, a.data); + if v == 0 do return Zero; + + return *cast(&Value) &v; + } + + case bool { + if *cast(&bool, a.data) do return True; + else do return False; + } + } + + if a.type == str { + s := __from_str(*cast(&str) a.data); + __add_to_pool(s); + return s; + } + + if a.type == #type (Value, [] Value) -> Value { + f := *cast(&(Value, [] Value) -> Value) a.data; + v := cast(Value) Func.from(f); + __add_to_pool(v); + return v; + } + + if a.type == Value do return *cast(&Value) a.data; + if a.type == Func do return *cast(&Value) a.data; // Funcs right now are just values + + return .None; + } +} + +/// Converts a `Value` into a `bool`. If the value is not internally of boolean type, `None` is returned. +Value.as_bool :: (v: Value) -> ? bool { + if cast(u64) v == cast(u64) True do return true; + if cast(u64) v == cast(u64) False do return false; + + return .None; +} + +/// Converts a `Value` into a `f64`. If the value is not internally of float type, `None` is returned. +Value.as_float :: (v: Value) -> ? f64 { + v_u64 := cast(u64, v); + v_f64 := *cast(&f64, &v_u64); + if !math.is_nan(v_f64) { + return v_f64; + } + + return .None; +} + +/// Converts a `Value` into a `i32`. If the value is not internally of float type, `None` is returned. +Value.as_int :: (v: Value) -> ? i32 { + v_u64 := cast(u64, v); + v_f64 := *cast(&f64, &v_u64); + if !math.is_nan(v_f64) { + return cast(i32) v_f64; + } + + return .None; +} + +/// Converts a `Value` into a `str`. If the value is not internally of str type, `None` is returned. +/// +/// Note that this function returns a string that is allocated on the heap. +/// The caller is responsible for managing the returned string. +Value.as_str :: (v: Value) -> ? str { + len := __to_str(v, .[]); + if len < 0 do return .None; + + ret := make(str, len); + __to_str(v, ret); + return ret; +} + +/// Returns the internal type of the `Value`. +Value.type :: (v: Value) -> Type { + v_u64 := cast(u64, v); + + // TODO: Cleanup these magic constants. + if v_u64 == 0 do return .Undefined; + if v_u64 == 0x7ff8000000000001 do return .Number; + if v_u64 == 0x7ff8000000000002 do return .Null; + if v_u64 == 0x7ff8000000000003 do return .Boolean; + if v_u64 == 0x7ff8000000000004 do return .Boolean; + if v_u64 == 0x7ff8000000000005 do return .Object; + if v_u64 == 0x7ff8000000000006 do return .Object; + + if !math.is_nan(*cast(&f64, &v_u64)) do return .Number; + + type_flag: u32 = ~~((v_u64 >> 32) & 7); + switch type_flag { + case 0 do return .Null; + case 1 do return .Object; + case 2 do return .String; + case 3 do return .Symbol; + case 4 do return .Function; + } + + return .Undefined; +} + +/// Calls a method on a `Value`. +Value.call :: #match { + (v: Value, method: str, args: [] any) -> Value { + transform_args(args, [](__method(v, method, mapped_args))); + }, + + (v: Value, method: str, args: ..any) -> Value { + return Value.call(v, method, cast([] any) args); + } +} + +/// Invokes the `Value` as though it is a JavaScript function. +Value.invoke :: #match { + (v: Value, args: ..any) -> Value { + transform_args(args, [](__call(v, mapped_args))); + }, + + (v: Value, args: ..any) -> Value { + return Value.invoke(v, cast([] any) args); + } +} + +/// Removes the `Value` from current `ValuePool`. This means that the `Value` will not be automatically collected, and must be released with `Value.release`. +Value.leak :: (v: Value) -> Value { + if __current_pool { + __current_pool->remove(v); + } + + return v; +} + +/// Releases the `Value` from the JavaScript heap. The `Value` should not be used after this method is called. +Value.release :: (v: Value) { + if __current_pool { + __current_pool->remove(v); + } + + return __free(v); +} + +/// Invokes the JavaScript `delete` operator on the specified property. +Value.delete :: (v: Value, property: str) { + return __delete(v, property); +} + +Value.equals :: (v1: Value, v2: Value) -> bool { + return cast(u64, v1) == cast(u64, v2) && cast(u64, v1) != cast(u64, NaN); +} + +Value.is_null :: (v: Value) -> bool { + return cast(u64) v == cast(u64) Null; +} + +Value.is_undefined :: (v: Value) -> bool { + return cast(u64) v == cast(u64) Undefined; +} + +Value.is_nan :: (v: Value) -> bool { + return cast(u64) v == cast(u64) NaN; +} + +/// Returns the evaluation of the `instanceof` operator in JavaScript. +Value.instance_of :: (v: Value, base: Value) -> bool { + return __instance_of(v, base); +} + +/// Invokes the `new` operator on the `Value`, with arguments `args`. +Value.new :: (v: Value, args: ..any) -> Value { + transform_args(cast([] any) args, [](__new(v, mapped_args))); +} + +/// Retrieves the evaluation of `v[prop]` in JavaScript. +Value.get :: (v: Value, prop: str) -> Value { + r := __dot(v, prop); + __add_to_pool(r); + return r; +} + +/// Retrieves the evaluation of `v[prop] = value` in JavaScript. +Value.set :: #match #locked { + (v: Value, prop: str, value: Value) { + __set(v, prop, value); + }, + + (v: Value, prop: str, value: $T) { + js_value := Value.from(value) ?? Undefined; + __set(v, prop, js_value); + js_value->release(); + }, + + (v: Value, index: i32, value: Value) { + __set_index(v, index, value); + }, + + (v: Value, index: i32, value: $T) { + js_value := Value.from(value) ?? Undefined; + __set_index(v, index, js_value); + js_value->release(); + } +} + +/// Special case for `->get("length")`. Because it is required so often, this optimization is quite nice. +Value.length :: (v: Value) -> i32 { + return __len(v); +} + +/// Retrieves the evaluation of `v[i]` in JavaScript. +Value.index :: (v: Value, i: i32) -> Value { + r := __sub(v, i); + __add_to_pool(r); + return r; +} + +/// JavaScript defines a "falsesy" value as undefined, null, false, 0, and "". +/// All other values are "truthy". +Value.truthy :: (v: Value) -> bool { + switch v->type() { + case .Undefined, .Null do return false; + case .Boolean do return cast(u64) v == cast(u64) True; + case .Number do return cast(u64) v != cast(u64) Zero; + case .String do return v->length() != 0; + case _ do return true; + } +} + +/// Copies data from a Uint8Array in JS to a buffer in Onyx. +/// Returns the number of bytes copied, or -1 if the value was not a Uint8Array. +Value.copy_to_onyx :: (v: Value, buf: [] u8) -> i32 { + return __copy_to_onyx(buf, v); +} + + +/// Copies data into a Uint8Array in JS from a buffer in Onyx. +/// Returns the number of bytes copied, or -1 if the value was not a Uint8Array. +Value.copy_to_js :: (v: Value, buf: [] u8) -> i32 { + return __copy_to_js(v, buf); +} + + +#local +transform_args :: macro (args: [] any, $body: Code) { + mapped_args: [..] Value; + args_to_free: [..] Value; + defer { + for to_free in args_to_free { + to_free->release(); + } + + delete(&mapped_args); + delete(&args_to_free); + } + + for arg in args { + if arg.type == Value || arg.type == Func { + mapped_args << *cast(&Value, arg.data); + + } else { + generated := Value.from(arg) ?? Undefined; + mapped_args << generated; + args_to_free << generated; + } + } + + res := #unquote body; + __add_to_pool(res); + return res; +} + +#overload +builtin.delete :: (v: Value) { + __free(v); +} + + +#package +#thread_local +__current_pool: &ValuePool + +#package +__add_to_pool :: macro (v: Value) { + if __current_pool { + if cast(u64) v != 0 { + __current_pool->add(v); + } + } +} + + +/// To aid in managing `Value`s that are created over the life time of the program, +/// `ValuePool` collects all of the `Value`s and allows for releasing them all at once. +ValuePool :: struct { + values: [..] Value = make([..] Value, alloc.heap_allocator); +} + +ValuePool.add :: (vp: &ValuePool, v: Value) { + vp.values << v; +} + +ValuePool.remove :: (vp: &ValuePool, v: Value) { + array.filter(&vp.values, [x](cast(u64) x != cast(u64) v)); +} + +ValuePool.release_all :: (vp: &ValuePool) { + for v in vp.values { + __free(v); + } + + array.clear(&vp.values); +} + +ValuePool.destroy :: (vp: &ValuePool) { + vp->release_all(); + delete(&vp.values); +} + +/// Gets the current `ValuePool` in use. +get_pool :: () => __current_pool; + +/// Sets the `ValuePool` to use. +set_pool :: (vp: &ValuePool) { + __current_pool = vp; +} + +/// Creates a new `ValuePool` and uses it. The old `ValuePool` is forgotten. +setup_default_pool :: () { + __current_pool = new(ValuePool); +} + +/// Releases all objects in the current `ValuePool`. +release_pooled_objects :: () { + if __current_pool { + __current_pool->release_all(); + } +} + +/// Helper macro to create a `ValuePool` that is scoped to a block. +temp_pool :: #match { + macro (body: Code) -> u32 { + #this_package.temp_pool(); + + #unquote body; + return 0; + }, + + macro () { + __current_pool :: __current_pool + + pool := #this_package.ValuePool.{}; + defer delete(&pool); + + old_pool := __current_pool; + __current_pool = &pool; + defer __current_pool = old_pool; + } +} + + diff --git a/core/math/consts.onyx b/core/math/consts.onyx new file mode 100644 index 000000000..44d9e7bc6 --- /dev/null +++ b/core/math/consts.onyx @@ -0,0 +1,27 @@ +package core.math + +E :: 2.71828182845904523536f +PI :: 3.14159265f +TAU :: 6.28318330f +SQRT_2 :: 1.414213562f + + +i8.MAX :: 127 +i8.MIN :: -128 +u8.MAX :: 255 +u8.MIN :: 0 +i16.MAX :: 32767 +i16.MIN :: -32768 +u16.MAX :: 65536 +u16.MIN :: 0 +i32.MAX :: 2147483647 +i32.MIN :: -2147483648 +u32.MAX :: 4294967296 +u32.MIN :: 0 +i64.MAX :: 9223372036854775807 +i64.MIN :: -9223372036854775807 // This is slightly incorrect because of a compiler bug +u64.MAX :: 18446744073709551616 +u64.MIN :: 0 + + + diff --git a/core/math/math.onyx b/core/math/math.onyx index 76b5b37cb..07ab15131 100644 --- a/core/math/math.onyx +++ b/core/math/math.onyx @@ -15,11 +15,6 @@ use core.intrinsics.wasm // - Dual numbers -E :: 2.71828182845904523536f; -PI :: 3.14159265f; -TAU :: 6.28318330f; -SQRT_2 :: 1.414213562f; - // // Trigonometry // Basic trig functions have been implemented using taylor series approximations. The @@ -70,22 +65,22 @@ cos :: (t: f32) -> f32 { } asin :: (t: f32) -> f32 { - assert(false, "asin is not implemented yet!"); + panic("asin is not implemented yet!"); return 0; } acos :: (t: f32) -> f32 { - assert(false, "acos is not implemented yet!"); + panic("acos is not implemented yet!"); return 0; } atan :: (t: f32) -> f32 { - assert(false, "atan is not implemented yet!"); + panic("atan is not implemented yet!"); return 0; } atan2 :: (t: f32) -> f32 { - assert(false, "atan2 is not implemented yet!"); + panic("atan2 is not implemented yet!"); return 0; } diff --git a/core/misc/any_utils.onyx b/core/misc/any_utils.onyx index 66c25c39a..ceee53342 100644 --- a/core/misc/any_utils.onyx +++ b/core/misc/any_utils.onyx @@ -4,6 +4,7 @@ use runtime use core.iter use core.array use core.string +use core.memory use runtime.info { get_type_info, @@ -28,7 +29,7 @@ any_as :: (a: any, $T: type_expr) -> &T { return cast(&T) a.data; } -#doc "Dereference a pointer any." +/// Dereference a pointer any. any_dereference :: (v: any) -> any { t := get_type_info(v.type); if t.kind == .Pointer { @@ -39,7 +40,7 @@ any_dereference :: (v: any) -> any { return v; } -#doc "Unwraps an optional any, if the any is an optional. `? T -> T`" +/// Unwraps an optional any, if the any is an optional. `? T -> T` any_unwrap :: (v: any) -> any { if union_constructed_from(v.type, Optional) { t := v.type->info()->as_union(); @@ -53,7 +54,7 @@ any_unwrap :: (v: any) -> any { } -#doc "Subscript an array-like any." +/// Subscript an array-like any. any_subscript :: (v: any, index: i32) -> any { base_ptr, elem_type, count := any_as_array(v); if index >= count || index < 0 { @@ -66,7 +67,7 @@ any_subscript :: (v: any, index: i32) -> any { }; } -#doc "Select a member from an any." +/// Select a member from an any. any_selector :: (v: any, member_name: str) -> any { t := get_type_info(v.type); if t.kind == .Struct { @@ -79,12 +80,12 @@ any_selector :: (v: any, member_name: str) -> any { return .{null, void}; } -#doc "Like `any_selector`, but works with selecting \"foo.bar.joe\"." +/// Like `any_selector`, but works with selecting \"foo.bar.joe\". any_nested_selector :: (v: any, member_name: str) -> any { t := get_type_info(v.type); if t.kind != .Struct do return .{}; - part_name, next_name := string.bisect(member_name, #char "."); + part_name, next_name := string.bisect(member_name, '.'); member := get_struct_member(v.type, part_name); if member { @@ -100,8 +101,6 @@ any_nested_selector :: (v: any, member_name: str) -> any { return .{null, void}; } -#doc """ -""" any_member :: #match #locked { (v: any, member_type: type_expr, member_offset: u32) -> any { return any.{ @@ -118,20 +117,18 @@ any_member :: #match #locked { } } -#doc """ - Convert a structure or pointer to a structure to a Map with - keys representing the fields of the structure, and values - representing the value of each field. - - T :: struct { - x := 123; - y := "test"; - } - - m := any_to_map(T.{}); - - `m` would have two keys, "x" and "y". -""" +/// Convert a structure or pointer to a structure to a Map with +/// keys representing the fields of the structure, and values +/// representing the value of each field. +/// +/// T :: struct { +/// x := 123; +/// y := "test"; +/// } +/// +/// m := any_to_map(T.{}); +/// +/// `m` would have two keys, "x" and "y". any_to_map :: (v: any) -> ? Map(str, any) { vals := v; if vals.type->info().kind == .Pointer { @@ -151,11 +148,11 @@ any_to_map :: (v: any) -> ? Map(str, any) { return out; } -#doc "Creates an iterator out of an array-like any." +/// Creates an iterator out of an array-like any. any_iter :: (arr: any) -> Iterator(any) { base_ptr, elem_type, count := any_as_array(arr); if count == 0 { - return .{ null, ((_: rawptr) => any.{}, false) }; + return .{ null, (_: rawptr) => Optional.empty(any) }; } return iter.generator( @@ -167,18 +164,39 @@ any_iter :: (arr: any) -> Iterator(any) { index = 0, }, - (ctx: $T) -> (any, bool) { + (ctx: $T) -> ? any { if ctx.index < ctx.count { defer ctx.index += 1; - return any.{ cast([&] u8) ctx.base_ptr + ctx.elem_size * ctx.index, ctx.elem_type }, true; + return any.{ cast([&] u8) ctx.base_ptr + ctx.elem_size * ctx.index, ctx.elem_type }; } - return .{}, false; + return .None; } ); } +/// Creates a deep copy of an array of `any`s. +/// +/// Copies the slice to a new slice, and all of the `any` data members. +any_deep_copy :: (args: [] any, allocator := context.allocator) -> (result: [] any) { + result = make([] any, args.count, allocator); + + for arg, i in args { + size := get_type_info(arg.type).size; + + new_data := allocator->alloc(size); + if !new_data do continue; + + memory.copy(new_data, arg.data, size); + + result[i] = any.{ new_data, arg.type }; + } + + return; +} + + #local any_as_array :: (arr: any) -> (rawptr, type_expr, u32) { info := get_type_info(arr.type); @@ -194,7 +212,7 @@ any_as_array :: (arr: any) -> (rawptr, type_expr, u32) { return a.data, (cast(&Type_Info_Dynamic_Array) info).of, a.count; } - case #default { + case _ { return null, void, 0; } } diff --git a/core/misc/arg_parse.onyx b/core/misc/arg_parse.onyx index 2a7837c34..8b9eba435 100644 --- a/core/misc/arg_parse.onyx +++ b/core/misc/arg_parse.onyx @@ -6,31 +6,40 @@ use core.conv use core.string use runtime -#doc """ - This is currently a very basic argument parsing library. - The options are given through a structure like so: - - Options :: struct { - @"--option_1" - option_1: str; - - @"--option_2", "-o2" - option_2: bool; - } - - main :: (args) => { - o: Options; - arg_parse.arg_parse(args, &o); - } - - Options that are strings and integers expect an argument after - them to specify their value. Options that are bool default to - false and are true if one or more of the option values are present. -""" +/// This is currently a very basic argument parsing library. +/// The options are given through a structure like so: +/// +/// Options :: struct { +/// @"--option_1" +/// option_1: str; +/// +/// @"--option_2", "-o2" +/// option_2: bool; +/// } +/// +/// main :: (args) => { +/// o: Options; +/// arg_parse.arg_parse(args, &o); +/// } +/// +/// Options that are strings and integers expect an argument after +/// them to specify their value. Options that are bool default to +/// false and are true if one or more of the option values are present. +arg_parse :: #match #local {} + +#overload arg_parse :: (c_args: [] cstr, output: any) -> bool { - arg_iter := iter.as_iter(c_args) |> - iter.map(string.from_cstr); - defer arg_iter.close(arg_iter.data); + args := iter.as_iter(c_args) |> + iter.map(string.from_cstr) |> + iter.collect(); + + return arg_parse(args, output); +} + +#overload +arg_parse :: (args: [] str, output: any) -> bool { + arg_iter := Iterator.from(args); + defer Iterator.close(arg_iter); use runtime.info {*}; @@ -56,21 +65,21 @@ arg_parse :: (c_args: [] cstr, output: any) -> bool { } case i32 { - value_str, success := iter.take_one(arg_iter, no_close=true); - if !success do return false; + value_str := iter.next(arg_iter); + if !value_str do return false; - value := conv.str_to_i64(value_str); + value := conv.str_to_i64(value_str->unwrap()); *(cast(&i32) (cast([&] u8) data_base + member.offset)) = ~~value; } case str { - value, success := iter.take_one(arg_iter, no_close=true); - if !success do return false; + value := iter.next(arg_iter); + if !value do return false; - *(cast(&str) (cast([&] u8) data_base + member.offset)) = value; + *(cast(&str) (cast([&] u8) data_base + member.offset)) = value->unwrap(); } - case #default { + case _ { printf("Unsupported argument type, {}.\n", output.type); return false; } diff --git a/core/misc/method_ops.onyx b/core/misc/method_ops.onyx index 926a8b184..53c8d6e71 100644 --- a/core/misc/method_ops.onyx +++ b/core/misc/method_ops.onyx @@ -10,15 +10,13 @@ use runtime // // Vec2 :: struct { x, y: f32 } // -// #inject Vec2 { -// __add :: (v1, v2: Vec2) => Vec2.{ v1.x + v2.x, v1.y + v2.y }; -// } +// Vec2.__add :: (v1, v2: Vec2) => Vec2.{ v1.x + v2.x, v1.y + v2.y }; // // This is an optional language feature because it currently significantly // affects compile-time, on average adding 30% to the total compilation time. // To enable this feature, add this somewhere: // -// #inject runtime.vars.Onyx_Enable_Operator_Methods :: true +// runtime.vars.Onyx_Enable_Operator_Methods :: true // #local { diff --git a/core/module.onyx b/core/module.onyx index c8809b158..115a8e117 100644 --- a/core/module.onyx +++ b/core/module.onyx @@ -24,11 +24,16 @@ use runtime #load "./conv/parse" #load "./math/math" +#load "./math/consts" #load "./random/random" #load "./hash/hash" #load "./hash/md5" #load "./hash/sha256" +#load "./hash/sha1" + +#load "./crypto/hmac" +#load "./crypto/keys/jwt" #load "./string/string" #load "./string/buffer" @@ -64,6 +69,7 @@ use runtime #load "./encoding/utf8" #load "./encoding/osad" #load_all "./encoding/json" +#load_all "./encoding/xml" #load "./encoding/kdl/kdl" #load "./runtime/common" @@ -81,6 +87,7 @@ use runtime #if runtime.platform.Supports_Os { #load "./os/os" + #load "./os/args" } #if runtime.platform.Supports_Processes { @@ -118,6 +125,8 @@ use runtime #load "./sync/semaphore" #load "./sync/barrier" #load "./sync/once" + #load "./sync/channel" + #load "./sync/mutex_guard" } @@ -134,5 +143,9 @@ use runtime } #if runtime.runtime == .Js { + #load "./js/value" + #load "./js/func" + #load "./js/foreign" + #load "./runtime/platform/js/platform" } diff --git a/core/net/net.onyx b/core/net/net.onyx index 8918d8267..6dc42b489 100644 --- a/core/net/net.onyx +++ b/core/net/net.onyx @@ -19,23 +19,21 @@ Socket :: struct { } // Inject methods for the socket -#inject Socket { - close :: socket_close - option :: socket_option - is_alive :: socket_is_alive - bind :: socket_bind - listen :: socket_listen - accept :: socket_accept - connect :: socket_connect - shutdown :: socket_shutdown - send :: socket_send - sendto :: socket_sendto - sendall :: socket_sendall - recv :: socket_recv - recv_into :: socket_recv_into - recvfrom :: socket_recvfrom - poll :: socket_poll -} +Socket.close :: socket_close +Socket.option :: socket_option +Socket.is_alive :: socket_is_alive +Socket.bind :: socket_bind +Socket.listen :: socket_listen +Socket.accept :: socket_accept +Socket.connect :: socket_connect +Socket.shutdown :: socket_shutdown +Socket.send :: socket_send +Socket.sendto :: socket_sendto +Socket.sendall :: socket_sendall +Socket.recv :: socket_recv +Socket.recv_into :: socket_recv_into +Socket.recvfrom :: socket_recvfrom +Socket.poll :: socket_poll SocketFamily :: enum { Unknown :: 0x00; @@ -101,10 +99,9 @@ SocketAddress :: union { }; } -#inject SocketAddress.addr_as_str :: (this: &SocketAddress, allocator := context.allocator) -> str { return switch *this { - case .Unix as path => string.as_str(cast(cstr) path); + case .Unix as path => str.as_str(cast(cstr) path); case .Inet as inet => do { str_addr := ipv4_to_str(inet.addr); out := make(dyn_str, allocator); @@ -152,6 +149,25 @@ make_unix_address :: (out: &SocketAddress, path: str) { out_path[offset] = 0; } + +dial :: (host: str, port: u16, type: SocketType) -> Result(Socket, io.Error) { + use addrs := resolve(host, port, 10); + + for &addr in addrs { + if addr.type != type do continue; + + sock := socket_create(addr.family, type, .ANY).Ok?; + if err := socket_connect(&sock, &addr.addr); err != .None { + return .{ Err = err }; + } + + return .{ Ok = sock }; + } + + return .{ Err = .NotFound }; +} + + socket_create :: (family: SocketFamily, type: SocketType, proto: SocketProto) -> Result(Socket, io.Error) { s: Socket; s.type = type; @@ -372,25 +388,27 @@ socket_recvfrom :: (s: &Socket, buffer: [] u8) -> ? SocketRecvFromResult { #local __net_socket_vtable := io.Stream_Vtable.{ - read = (use s: &Socket, buffer: [] u8) -> (io.Error, u32) { - if cast(i32) handle == 0 do return .BadFile, 0; - if !s->is_alive() do return .EOF, 0; + read = (use s: &Socket, buffer: [] u8) -> Result(u32, io.Error) { + if cast(i32) handle == 0 do return .{ Err = .BadFile }; + if !s->is_alive() do return .{ Err = .EOF }; res := runtime.platform.__net_sock_recv(handle, buffer); - res->ok()->with([bytes_read] { - if bytes_read == 0 do return .EOF, 0; + res.Ok->with([bytes_read] { + if bytes_read == 0 do return .{ Err = .EOF }; - return .None, bytes_read; + return .{ Ok = bytes_read }; }); - res->err()->with([err] { - if err == .NoData do return .ReadLater, 0; + res.Err->with([err] { + if err == .NoData do return .{ Err = .ReadLater }; if err == .EOF { socket_close(s); } - return err, 0; + return .{ Err = err }; }); + + return .{ Ok = 0 } }, write_byte = (use s: &Socket, byte: u8) -> io.Error { @@ -410,9 +428,9 @@ socket_recvfrom :: (s: &Socket, buffer: [] u8) -> ? SocketRecvFromResult { return .BufferFull; }, - write = (use s: &Socket, buffer: [] u8) -> (io.Error, u32) { - if cast(i32) handle == 0 do return .BadFile, 0; - if !s->is_alive() do return .EOF, 0; + write = (use s: &Socket, buffer: [] u8) -> Result(u32, io.Error) { + if cast(i32) handle == 0 do return .{ Err = .BadFile }; + if !s->is_alive() do return .{ Err = .EOF }; res := runtime.platform.__net_sock_send(handle, buffer); res->err()->with([err] { @@ -420,23 +438,23 @@ socket_recvfrom :: (s: &Socket, buffer: [] u8) -> ? SocketRecvFromResult { socket_close(s); } - return err, 0; + return .{ Err = err }; }); - return .None, res->ok()->unwrap(); + return .{ Ok = res->ok()->unwrap() }; }, - poll = (use s: &Socket, ev: io.PollEvent, timeout: i32) -> (io.Error, bool) { - if ev == .Write do return .None, true; - if !s->is_alive() do return .None, false; + poll = (use s: &Socket, ev: io.PollEvent, timeout: i32) -> Result(bool, io.Error) { + if ev == .Write do return .{ Ok = true }; + if !s->is_alive() do return .{ Ok = false }; status := socket_poll(s, timeout); if status == .Closed { - return .EOF, false; + return .{ Err = .EOF }; } - return .None, status == .Readable; + return .{ Ok = status == .Readable }; }, close = (use p: &Socket) -> io.Error { @@ -455,7 +473,7 @@ str_to_ipv4 :: (ip: str) -> u32 { res: u32; shift := 0; for 4 { - octet := string.read_until(&ip_, #char "."); + octet := string.read_until(&ip_, '.'); string.advance(&ip_, 1); res |= cast(u32) (conv.str_to_i64(octet) & cast(i64) 0xFF) << shift; @@ -484,3 +502,19 @@ ipv6_to_str :: (addr: [16] u8) -> str { |> string.join(" "); } + + +ResolveResult :: struct { + family: SocketFamily; + type: SocketType; + proto: SocketProto; + addr: SocketAddress; +} + +resolve :: (host: str, port: u16, max_results: i32) -> [] ResolveResult { + addrs := make([] ResolveResult, max_results); + results := runtime.platform.__net_resolve(host, port, addrs); + addrs = addrs[0 .. results]; + return addrs; +} + diff --git a/core/net/tcp.onyx b/core/net/tcp.onyx index b314f3add..95c678341 100644 --- a/core/net/tcp.onyx +++ b/core/net/tcp.onyx @@ -1,7 +1,7 @@ package core.net #if !runtime.platform.Supports_Networking { - #error "Cannot include this file. Platform not supported."; + #error "Cannot include this file. Platform not supported." } use core.thread @@ -15,80 +15,78 @@ use runtime // Should TCP_Connection be an abstraction of both the client and the server? // Or is there not enough shared between them to justify that? TCP_Connection :: struct { - socket: Socket; + socket: Socket - event_allocator: Allocator; - events: [..] TCP_Event; - event_cursor := 0; + event_allocator: Allocator + events: [..] TCP_Event + event_cursor := 0 } TCP_Event :: struct { - kind: Kind; - data: rawptr; + kind: Kind + data: rawptr Kind :: enum { - Undefined; - Connection; - Disconnection; - Data; - Ready; + Undefined + Connection + Disconnection + Data + Ready } Connection :: struct { - address : &SocketAddress; + address : &SocketAddress // This is only set when the event is coming from the server. - client : &TCP_Server.Client; + client : &TCP_Server.Client } Disconnection :: struct { - address: &SocketAddress; + address: &SocketAddress // This is only set when the event is coming from the server. - client : &TCP_Server.Client; + client : &TCP_Server.Client } Data :: struct { - address: &SocketAddress; + address: &SocketAddress // This is only set when the event is coming from the server. - client : &TCP_Server.Client; + client : &TCP_Server.Client - contents: [] u8; + contents: [] u8 } Ready :: struct { - address: &SocketAddress; + address: &SocketAddress // This is only set when the event is coming from the server. - client : &TCP_Server.Client; + client : &TCP_Server.Client } } // Iterator implementation for TCP_Connection -#inject TCP_Connection { - iter_open :: (use conn: &TCP_Connection) { - conn.event_cursor = 0; - } +TCP_Connection.iter_open :: (use conn: &TCP_Connection) { + conn.event_cursor = 0 +} - iter_next :: (use conn: &TCP_Connection) -> (TCP_Event, bool) { - if event_cursor == events.count do return .{}, false; +TCP_Connection.iter_next :: (use conn: &TCP_Connection) -> ? TCP_Event { + if event_cursor == events.count do return .None - defer event_cursor += 1; - return events[event_cursor], true; - } + defer event_cursor += 1 + return events[event_cursor] +} - iter_close :: (use conn: &TCP_Connection) { - for events { - switch it.kind { - case .Data { - raw_free(event_allocator, (cast(&TCP_Event.Data) it.data).contents.data); - } +TCP_Connection.iter_close :: (use conn: &TCP_Connection) { + for events { + switch it.kind { + case .Data { + raw_free(event_allocator, (cast(&TCP_Event.Data) it.data).contents.data) } - - raw_free(event_allocator, it.data); } - array.clear(&events); + raw_free(event_allocator, it.data) } + + array.clear(&events) } @@ -97,90 +95,104 @@ TCP_Event :: struct { // TCP_Server :: struct { - use connection: TCP_Connection; + use connection: TCP_Connection - client_allocator: Allocator; - clients: [] &Client; + clients: [] ? Client client_count: u32; // max clients is stored as clients.count. - alive := true; - pulse_time_ms := 500; + alive := true + pulse_time_ms := 500 - emit_data_events := true; - emit_ready_event_multiple_times := false; + emit_data_events := true + emit_ready_event_multiple_times := false } -#inject TCP_Server { - listen :: tcp_server_listen - stop :: tcp_server_stop - pulse :: tcp_server_pulse - send :: tcp_server_send - broadcast :: tcp_server_broadcast - handle_events :: tcp_server_handle_events - kill_client :: tcp_server_kill_client +TCP_Server.listen :: tcp_server_listen +TCP_Server.stop :: tcp_server_stop +TCP_Server.pulse :: tcp_server_pulse +TCP_Server.send :: tcp_server_send +TCP_Server.broadcast :: tcp_server_broadcast +TCP_Server.handle_events :: tcp_server_handle_events +TCP_Server.event_iter :: tcp_server_event_iter +TCP_Server.kill_client :: tcp_server_kill_client +TCP_Server.transfer_client :: tcp_server_transfer_client + +TCP_Server.Client :: struct { + use socket : Socket + address : SocketAddress + state : State + server : &TCP_Server + + recv_ready_event_present := false + + State :: enum { + Alive + Being_Killed + Dying + Dead + } } -#inject TCP_Server { - Client :: struct { - use socket : Socket; - address : SocketAddress; - state : State; - - recv_ready_event_present := false; +TCP_Server.Client.read_complete :: (use this: &TCP_Server.Client) { + recv_ready_event_present = false +} - State :: enum { - Alive; - Being_Killed; - Dying; - Dead; - } - } +TCP_Server.Client.transfer :: (use this: &TCP_Server.Client, new_server: &TCP_Server) -> ? TCP_Server.Client { + return tcp_server_transfer_client(server, this, new_server) } -#inject TCP_Server.Client { - read_complete :: (use this: &TCP_Server.Client) { - recv_ready_event_present = false; +TCP_Server.Client.detach :: (use this: &TCP_Server.Client) -> (res: Socket) { + res = this.socket + + for& server.clients { + if it->unwrap_ptr() == this { + *it = .None + server.client_count -= 1 + break + } } + + return } tcp_server_make :: (max_clients := 32, allocator := context.allocator) -> &TCP_Server { maybe_socket := socket_create(.Inet, .Stream, .IP); // IPv6? - if maybe_socket.Err do return null; + if maybe_socket.Err do return null - socket := maybe_socket.Ok->unwrap(); + socket := maybe_socket.Ok->unwrap() - server := new(TCP_Server, allocator=allocator); - server.socket = socket; - server.event_allocator = allocator; + server := new(TCP_Server, allocator=allocator) + server.socket = socket + server.event_allocator = allocator - server.client_count = 0; - server.client_allocator = allocator; - server.clients = make([] &TCP_Server.Client, max_clients, allocator=allocator); - array.fill(server.clients, null); + server.client_count = 0 + server.clients = make([] ? TCP_Server.Client, max_clients, allocator=allocator) + array.fill(server.clients, .None) - return server; + return server } tcp_server_listen :: (use server: &TCP_Server, port: u16) -> bool { - sa: SocketAddress; - make_ipv4_address(&sa, "0.0.0.0", port); - if !socket->bind(&sa) do return false; + sa: SocketAddress + make_ipv4_address(&sa, "0.0.0.0", port) + if !socket->bind(&sa) do return false - socket->listen(); - socket->option(.NonBlocking, true); - return true; + socket->listen() + socket->option(.NonBlocking, true) + return true } tcp_server_stop :: (use server: &TCP_Server) { - server.alive = false; + server.alive = false - for clients { - if !it do continue; + for& clients { + if !*it do continue - if it.state == .Alive do server->kill_client(it); + client := it->unwrap_ptr() + if client.state == .Alive do server->kill_client(client) } - server.socket->close(); + server.socket->close() } tcp_server_pulse :: (use server: &TCP_Server) -> bool { @@ -188,28 +200,32 @@ tcp_server_pulse :: (use server: &TCP_Server) -> bool { // Check for new connection if client_count < clients.count { socket->accept().Ok->with([client_data] { - client := new(TCP_Server.Client, allocator=client_allocator); - client.state = .Alive; - client.socket = client_data.socket; - client.address = client_data.addr; + client := Slice.first(clients, [cl](cl.None)) + if !client { + client_data.socket->close() + break + } + + *client = TCP_Server.Client.{} + cl := client->unwrap_ptr() + cl.server = server + cl.state = .Alive + cl.socket = client_data.socket + cl.address = client_data.addr - for& clients do if *it == null { *it = client; break; } - server.client_count += 1; + server.client_count += 1 - conn_event := new(TCP_Event.Connection, allocator=server.event_allocator); - conn_event.address = &client.address; - conn_event.client = client; + conn_event := new(TCP_Event.Connection, allocator=server.event_allocator) + conn_event.address = &cl.address + conn_event.client = cl - server.events << .{ .Connection, conn_event }; - }); + server.events << .{ .Connection, conn_event } + }) } // // Process dead clients - for& clients { - client := *it; - if !client do continue; - + for& maybe_client in clients do maybe_client.*->with([client] { switch client.state { case .Being_Killed { // Before, there was not a "being killed" state and the code made @@ -223,45 +239,44 @@ tcp_server_pulse :: (use server: &TCP_Server) -> bool { // the loop below that checks for dying clients will never see the // dying client. To remedy this, "Being_Killed" was added as another // shutdown phase. TLDR: This is a hack; refactor this. - client.state = .Dying; + client.state = .Dying } case .Dying { - raw_free(server.client_allocator, client); - *it = null; - server.client_count -= 1; + *maybe_client = .None + server.client_count -= 1 } } - } + }) if client_count == 0 { // Wait for a client to connect. - status_buffer: [1] Socket_Poll_Status; - socket_poll_all(.[&socket], status_buffer, -1); - return server.alive; + status_buffer: [1] Socket_Poll_Status + socket_poll_all(.[&socket], status_buffer, -1) + return server.alive - } else do for clients { + } else do for& clients { // If we have some clients, make sure their sockets are still alive. // There were issues detecting this in the poll() function so we do // do it explictly here. - if it == null do continue; - if it.state != .Alive do continue; + if !*it do continue + + client := it->unwrap_ptr() + if client.state != .Alive do continue - if !it.socket->is_alive() { - tcp_server_kill_client(server, it); + if !client.socket->is_alive() { + tcp_server_kill_client(server, client) } } - clients_with_messages := wait_to_get_client_messages(server); - defer if clients_with_messages.data != null do cfree(clients_with_messages.data); - - for clients_with_messages { - if it.state != .Alive do continue; + use clients_with_messages := wait_to_get_client_messages(server) + for client in clients_with_messages { + if client.state != .Alive do continue if server.emit_data_events { - msg_buffer: [1024] u8; - bytes_read := it.socket->recv_into(msg_buffer); + msg_buffer: [1024] u8 + bytes_read := client.socket->recv_into(msg_buffer) // If exactly 0 bytes are read from the buffer, it means that the // client has shutdown and future communication should be terminated. @@ -269,66 +284,121 @@ tcp_server_pulse :: (use server: &TCP_Server) -> bool { // If a negative number of bytes are read, then an error has occured // and the client should also be marked as dead. if bytes_read <= 0 { - tcp_server_kill_client(server, it); - continue; + tcp_server_kill_client(server, client) + continue } - data_event := new(TCP_Event.Data, allocator=server.event_allocator); - data_event.client = it; - data_event.address = &it.address; - data_event.contents = memory.copy_slice(msg_buffer[0 .. bytes_read], allocator=server.event_allocator); - server.events << .{ .Data, data_event }; - - } elseif !it.recv_ready_event_present { - it.recv_ready_event_present = true; - ready_event := new(TCP_Event.Ready, allocator=server.event_allocator); - ready_event.client = it; - ready_event.address = &it.address; - server.events << .{ .Ready, ready_event }; + data_event := new(TCP_Event.Data, allocator=server.event_allocator) + data_event.client = client + data_event.address = &client.address + data_event.contents = memory.copy_slice(msg_buffer[0 .. bytes_read], allocator=server.event_allocator) + server.events << .{ .Data, data_event } + + } elseif !client.recv_ready_event_present { + client.recv_ready_event_present = true + ready_event := new(TCP_Event.Ready, allocator=server.event_allocator) + ready_event.client = client + ready_event.address = &client.address + server.events << .{ .Ready, ready_event } } } - for clients { - if it == null do continue; - if it.state == .Dying { - disconnect_event := new(TCP_Event.Disconnection, allocator=server.event_allocator); - disconnect_event.client = it; - disconnect_event.address = &it.address; - server.events << .{ .Disconnection, disconnect_event }; + for& clients { + if !*it do continue + + client := it->unwrap_ptr() + if client.state == .Dying { + disconnect_event := new(TCP_Event.Disconnection, allocator=server.event_allocator) + disconnect_event.client = client + disconnect_event.address = &client.address + server.events << .{ .Disconnection, disconnect_event } } } - client_count = array.count_where(clients, [v](v != null)); + client_count = Array.count_where(clients, [v](!v.None)) - return server.alive; + return server.alive } tcp_server_send :: (use server: &TCP_Server, client: &TCP_Server.Client, data: [] u8) { - client.socket->send(data); + client.socket->send(data) } tcp_server_broadcast :: (use server: &TCP_Server, data: [] u8, except: &TCP_Server.Client = null) { - for clients { - if it == null do continue; - if it.state != .Alive do continue; - if it == except do continue; + for& clients { + if !*it do continue - it.socket->send(data); + client := it->unwrap_ptr() + if client.state != .Alive do continue + if client == except do continue + + client.socket->send(data) } } tcp_server_handle_events :: macro (server: &TCP_Server, handler: Code) { while server->pulse() { - for iter.as_iter(&server.connection) { - switch it.kind do #unquote handler(it); + for Iterator.from(&server.connection) { + switch it.kind do #unquote handler(it) } } } +tcp_server_event_iter :: (server: &TCP_Server) -> Iterator(TCP_Event) { + return Iterator.generator_no_copy( + new(.{ server = server, inner_iter = Optional.empty(Iterator(TCP_Event)) }) + + (ctx: $C) -> ? TCP_Event { + while true { + if !ctx.inner_iter { + ctx.server->pulse() + + ctx.inner_iter = Iterator.from(&ctx.server.connection) + } + + i := ctx.inner_iter! + next := Iterator.next(i) + + if next do return next + + Iterator.close(i) + ctx.inner_iter = .None + } + + return .None + } + ) +} + tcp_server_kill_client :: (use server: &TCP_Server, client: &TCP_Server.Client) { - client.state = .Being_Killed; - client.socket->shutdown(.ReadWrite); - client.socket->close(); + client.state = .Being_Killed + client.socket->shutdown(.ReadWrite) + client.socket->close() +} + +tcp_server_transfer_client :: (server: &TCP_Server, client: &TCP_Server.Client, other_server: &TCP_Server) -> ? TCP_Server.Client { + if other_server.client_count >= other_server.clients.count do return .None + + transferred_client := *client + transferred_client.server = other_server + + for& other_server.clients { + if !*it { + *it = transferred_client + break + } + } + other_server.client_count += 1 + + for& server.clients { + if it->unwrap_ptr() == client { + *it = .None + server.client_count -= 1 + break + } + } + + return transferred_client } @@ -338,7 +408,7 @@ tcp_server_kill_client :: (use server: &TCP_Server, client: &TCP_Server.Client) // TCP_Client :: struct { - use connection: TCP_Connection; + use connection: TCP_Connection } @@ -346,17 +416,18 @@ TCP_Client :: struct { #local wait_to_get_client_messages :: (use server: &TCP_Server) -> [] &TCP_Server.Client { - active_clients := alloc.array_from_stack(&TCP_Server.Client, client_count); - active_clients.count = 0; + active_clients := alloc.array_from_stack(&TCP_Server.Client, client_count + 1) + active_clients.count = 0 - for clients { - if it == null do continue; + for& clients { + if !*it do continue - if it.state == .Alive { - if !it.socket->is_alive() do continue; + client := it->unwrap_ptr() + if client.state == .Alive { + if !client.socket->is_alive() do continue - active_clients[active_clients.count] = it; - active_clients.count += 1; + active_clients[active_clients.count] = client + active_clients.count += 1 } } @@ -370,14 +441,20 @@ wait_to_get_client_messages :: (use server: &TCP_Server) -> [] &TCP_Server.Clien // to immediately return, and enter a sort of complicated "spin loop" // in order to not stall worker threads. if active_clients.count == 0 { - return .{ null, 0 }; + return .{ null, 0 } } - status_buffer := alloc.array_from_stack(Socket_Poll_Status, client_count); - socket_poll_all(cast([] &Socket) active_clients, status_buffer, pulse_time_ms); + // HACK This is making a large assumption about the layout of the TCP_Server.Client. + // This is assuming that the `socket` member is the first thing, and this this + // type punning trick can work. + active_clients[active_clients.count] = ~~ &server.socket + active_clients.count += 1 + + status_buffer := alloc.array_from_stack(Socket_Poll_Status, active_clients.count) + socket_poll_all(cast([] &Socket) active_clients, status_buffer, pulse_time_ms) - recv_clients: [..] &TCP_Server.Client; - for it in client_count { + recv_clients: [..] &TCP_Server.Client + for it in active_clients.count - 1 { if status_buffer[it] == .Readable { // // If there is already a Ready event present for this client, @@ -387,15 +464,15 @@ wait_to_get_client_messages :: (use server: &TCP_Server) -> [] &TCP_Server.Clien // read, the TCP server will enter a spin loop, effectively ignoring // the polling. This shouldn't be too bad because in most cases the // code will immediately parse the response. - if active_clients[it].recv_ready_event_present && !emit_ready_event_multiple_times do continue; + if active_clients[it].recv_ready_event_present && !emit_ready_event_multiple_times do continue - recv_clients << active_clients[it]; + recv_clients << active_clients[it] } if status_buffer[it] == .Closed { - tcp_server_kill_client(server, active_clients[it]); + tcp_server_kill_client(server, active_clients[it]) } } - return recv_clients; + return recv_clients } diff --git a/core/onyx/cbindgen.onyx b/core/onyx/cbindgen.onyx index ba57760fc..1efa24858 100644 --- a/core/onyx/cbindgen.onyx +++ b/core/onyx/cbindgen.onyx @@ -41,7 +41,7 @@ package cbindgen use runtime -#doc "Deprecated. Use `link_name`." +/// Deprecated. Use `link_name`. customize :: struct { symbol_name: str; } @@ -91,32 +91,27 @@ Binding_Config :: struct { } generate_c_binding :: (use binding_config: Binding_Config) -> bool { - wrote := false; - for file in os.with_file(output_file, .Write) { - writer := io.writer_make(file); - defer io.writer_free(&writer); + use file := os.open(output_file, .Write)->or_return(false); + use writer := io.writer_make(&file); - fb := runtime.info.get_foreign_block(foreign_block); + fb := runtime.info.get_foreign_block(foreign_block); - write_file_introduction(&writer, preamble, fb.module_name); + write_file_introduction(&writer, preamble, fb.module_name); - for fb.funcs { - for impl in custom_implementations { - if impl.name == it.name { - io.write(&writer, impl.impl); - io.write(&writer, "\n"); - continue continue; - } + for fb.funcs { + for impl in custom_implementations { + if impl.name == it.name { + io.write(&writer, impl.impl); + io.write(&writer, "\n"); + continue continue; } - - write_function_body(&writer, it, cast_map, name_map); } - write_library_block(&writer, fb.funcs); - wrote = true; + write_function_body(&writer, it, cast_map, name_map); } - return wrote; + write_library_block(&writer, fb.funcs); + return true; } // @@ -124,7 +119,7 @@ generate_c_binding :: (use binding_config: Binding_Config) -> bool { module_path :: (file_path: str) -> str { out := file_path; while out.count > 0 { - if out[out.count - 1] == #char "/" || out[out.count - 1] == #char "\\" do break; + if out[out.count - 1] == '/' || out[out.count - 1] == '\\' do break; out.count -= 1; } @@ -163,7 +158,7 @@ compile_c_file :: ( } if flags != "" { - for string.split(flags, #char " ") do args << it; + for string.split(flags, ' ') do args << it; } args << "-o"; @@ -196,6 +191,8 @@ compile_c_file :: ( #if runtime.compiler_os == .Windows { return true; } + + return false; } #local { @@ -334,11 +331,11 @@ compile_c_file :: ( case u64 do string.append(call_signature, "L"); case f32 do string.append(call_signature, "f"); case f64 do string.append(call_signature, "d"); - case #default { + case _ { if is_pointer(p) { string.append(call_signature, "p"); } else { - assert(false, tprintf("Unsupported type in function pointer: {}", p)); + panic(tprintf("Unsupported type in function pointer: {}", p)); } } } @@ -376,7 +373,7 @@ compile_c_file :: ( if last_arg_is_return_value { return_type := slice.get(method_info.parameter_types, -1); if return_type->info().kind != .Pointer { - assert(false, tprintf("last_arg_is_return_value requires last parameter to be a pointer. ({} in {})", method_info.parameter_types, method_name)); + panic(tprintf("last_arg_is_return_value requires last parameter to be a pointer. ({} in {})", method_info.parameter_types, method_name)); } return_type = return_type->info()->as_pointer().to; @@ -435,8 +432,8 @@ compile_c_file :: ( case .Multi_Pointer do return "ptr"; case .Array do return "ptr"; - case .Function do return "i32"; // assert(false, "Passing functions between wasm and c is not yet supported."); - case .Slice do assert(false, "Passing a slice from c to wasm is not yet supported."); + case .Function do return "i32"; // panic("Passing functions between wasm and c is not yet supported."); + case .Slice do panic("Passing a slice from c to wasm is not yet supported."); case .Enum do return type_to_wasm_type((cast(&Type_Info_Enum) param_info).backing_type); case .Distinct do return type_to_wasm_type((cast(&Type_Info_Distinct) param_info).base_type); @@ -451,11 +448,11 @@ compile_c_file :: ( return type_to_wasm_type(s_info.members[0].type); } - assert(false, "Passing structures between wasm and c is not yet supported."); + panic("Passing structures between wasm and c is not yet supported."); } case .Union { - assert(false, "Passing unions between wasm and c is not yet supported."); + panic("Passing unions between wasm and c is not yet supported."); } } @@ -490,7 +487,7 @@ compile_c_file :: ( case .Pointer do return "WASM_I32"; // This will also have to depend on the pointer size... case .Multi_Pointer do return "WASM_I32"; // This will also have to depend on the pointer size... - case .Function do return "WASM_I32, WASM_I32, WASM_I32"; // assert(false, "Passing functions between wasm and c is not yet supported."); + case .Function do return "WASM_I32, WASM_I32"; // panic("Passing functions between wasm and c is not yet supported."); case .Array do return "WASM_I32"; case .Slice do return "WASM_I32,WASM_I32"; case .Enum do return type_encoding((cast(&Type_Info_Enum) param_info).backing_type); @@ -502,7 +499,7 @@ compile_c_file :: ( return type_encoding(s_info.members[0].type); } - // assert(false, "Passing structures between wasm and c is not yet supported."); + // panic("Passing structures between wasm and c is not yet supported."); return "WASM_I32"; } } diff --git a/core/onyx/compiler_extension.onyx b/core/onyx/compiler_extension.onyx new file mode 100644 index 000000000..a0ba65087 --- /dev/null +++ b/core/onyx/compiler_extension.onyx @@ -0,0 +1,274 @@ +//+optional-semicolons +package onyx.compiler_extension +#allow_stale_code + +use core { + stdio + Result +} +use core.alloc {package, arena} +use core.encoding.osad +use core.io + +// PROTOCOL VERSION 1 +// +// OUTGOING +// +// INIT (0) send info about extension +// version (u32) +// name (str) +// +// REPORT (1) report an error +// position (Position) +// msg (str) +// +// EXPANSION (2) response to ExpandMacro +// id (u32) +// code (Result(str, Reason)) +// +// INJECT (3) add code to top level scope +// code (str) +// +// +// INCOMING +// +// INIT (0) +// version_major (u32) +// version_minor (u32) +// version_patch (u32) +// max_protocol_version (u32) +// +// TERMINATE (1) +// +// EXPANDMACRO (2) +// id (u32) +// location (ExpansionLocation) +// position (Position) +// macro_name (str) +// body (str) +// + +// PROTOCOL VERSION 2 CHANGES +// +// OUTGOING INIT (0) +// added `hooks ([] i32)` after `name` +// +// OUTGOING ACKNOWLEDGEHOOK (4) +// Added +// +// INCOMING HOOK (3) +// Added + + +// Types + +IncomingMessage :: union { + Init as 0: struct { + compiler_version_major: u32 + compiler_version_minor: u32 + compiler_version_patch: u32 + + max_protocol_version_supported: u32 + } + + Terminate as 1: struct { + } + + ExpandMacro as 2: ExpansionInfo + + Hook as 3: struct { + id: u32 + info: HookInfo + } +} + +OutgoingMessage :: union { + Init as 0: struct { + protocol_version: u32 + name: str + + hooks: [] i32 + } + + ErrorReport as 1: struct { + position: Position + msg: str + } + + Expansion as 2: struct { + id: u32 + code: Result(str, ExpansionFailureReason) + } + + InjectCode as 3: struct { + code: str + } + + AcknowledgeHook as 4: struct { + id: u32 + } +} + +ExpansionInfo :: struct { + id: u32 + location: ExpansionLocation + position: Position + macro_name: str + body: str +} + +ExpansionLocation :: enum { + Expression + Statement + TopLevelStatement +} + +ExpansionFailureReason :: enum { + NotSupported + BadSyntax +} + +Position :: struct { + filename: str + line: u32 + column: u32 + length: u32 +} + +HookInfo :: union { + None: void + + Stalled: struct {} + + Count: void +} + + +// Code + +ExtensionContext :: struct { + protocol_version: u32 + name: str + proc_macros: Map(str, #type (&ExtensionContext, ExpansionInfo) -> Result(str, ExpansionFailureReason)) + hook_handlers: [cast(i32) HookInfo.tag_enum.Count] (&ExtensionContext, HookInfo) -> void + + arena: arena.Arena + + reader: io.Reader + + running: bool +} + +ExtensionContext.make :: (name: str) -> (res: ExtensionContext) { + res.name = name + res.protocol_version = 2 + + res.proc_macros = make(typeof res.proc_macros) + res.arena = arena.make(context.allocator, 32 * 1024) + res.reader = io.Reader.make(&stdio.stream) + return +} + +ExtensionContext.start :: (self: &#Self, callback: (&#Self, msg: IncomingMessage) -> void) { + self.running = true + + while self.running { + msg: IncomingMessage + if !osad.deserialize(&msg, typeof msg, &self.reader, alloc.as_allocator(&self.arena)) { + break + } + + switch msg { + case .Init as &init { + use hooks := make([..] i32) + for h, i in self.hook_handlers { + if h != null_proc { + hooks->push(i) + } + } + + self->send(.{ + Init = .{ + protocol_version = self.protocol_version + name = self.name + hooks = hooks + } + }) + } + + case .Terminate do break break + + case .Hook as &hk { + handler := self.hook_handlers[~~~ hk.info.tag] + + if handler != null_proc { + handler(self, hk.info) + } + + self->send(.{ + AcknowledgeHook = .{ + id = hk.id + } + }) + } + + case .ExpandMacro as em { + if self.proc_macros->has(em.macro_name) { + handler := self.proc_macros->get(em.macro_name)->unwrap() + result := handler(self, em) + self->send(.{ + Expansion = .{ + id = em.id, + code = result + } + }) + + } else { + fallthrough + } + } + + case _ { + callback(self, msg) + } + } + + alloc.arena.clear(&self.arena) + } +} + +ExtensionContext.send :: (self: &#Self, msg: OutgoingMessage) { + use stdout_writer := io.Writer.make(&stdio.stream) + + osad.serialize(msg, &stdout_writer) +} + +ExtensionContext.handle_macro :: (self: &#Self, name: str, func: (&ExtensionContext, ExpansionInfo) -> Result(str, ExpansionFailureReason)) { + self.proc_macros->put(name, func) +} + +ExtensionContext.handle_hook :: (self: &#Self, hook: HookInfo.tag_enum, func: (&ExtensionContext, HookInfo) -> void) { + self.hook_handlers[~~hook] = func +} + + + +map_position :: (base_pos: Position, pos: Position) => { + line := do { + if pos.line == 1 do return base_pos.line; + return pos.line + base_pos.line - 1; + }; + + column := do { + if pos.line == 1 do return base_pos.column + pos.column - 1; + return pos.column; + }; + + return Position.{ + filename = base_pos.filename, + line = line, + column = column, + length = pos.length + }; +} + diff --git a/core/onyx/cptr.onyx b/core/onyx/cptr.onyx index 018a52581..35e3cad61 100644 --- a/core/onyx/cptr.onyx +++ b/core/onyx/cptr.onyx @@ -19,82 +19,80 @@ cptr :: struct (T: type_expr) { data: u64; } -#inject cptr { - // - // Creates a new C-pointer from an Onyx pointer. - make :: macro (ptr: &$T) -> cptr(T) { - __cptr_make :: __cptr_make - return .{ __cptr_make(ptr) }; - } +// +// Creates a new C-pointer from an Onyx pointer. +cptr.make :: macro (ptr: &$T) -> cptr(T) { + __cptr_make :: __cptr_make + return .{ __cptr_make(ptr) }; +} +// +// Extract the data out of a C pointer into a buffer in the Onyx memory. +cptr.read :: #match { // - // Extract the data out of a C pointer into a buffer in the Onyx memory. - read :: #match { - // - // Special, error-inducing case for cptr(void) - (this: cptr(void)) -> void { }, - - (this: cptr($T)) -> T { - buf: [sizeof T] u8; - __cptr_read(this.data, ~~buf, sizeof T); - return *cast(&T) buf; - }, - - (this: cptr($T), buffer: [] u8) { - __cptr_read(this.data, ~~buffer.data, buffer.length); - } + // Special, error-inducing case for cptr(void) + (this: cptr(void)) -> void { }, + + (this: cptr($T)) -> T { + buf: [sizeof T] u8; + __cptr_read(this.data, ~~buf, sizeof T); + return *cast(&T) buf; + }, + + (this: cptr($T), buffer: [] u8) { + __cptr_read(this.data, ~~buffer.data, buffer.length); } +} - // - // Helper procedures for quickly reading an integer of various sizes. - read_u8 :: (this: cptr(u8)) => __cptr_read_u8(this.data); - read_u16 :: (this: cptr(u16)) => __cptr_read_u16(this.data); - read_u32 :: (this: cptr(u32)) => __cptr_read_u32(this.data); - read_u64 :: (this: cptr(u64)) => __cptr_read_u64(this.data); - read_i8 :: (this: cptr(i8)) => cast(i8) __cptr_read_u8(this.data); - read_i16 :: (this: cptr(i16)) => cast(i16) __cptr_read_u16(this.data); - read_i32 :: (this: cptr(i32)) => cast(i32) __cptr_read_u32(this.data); - read_i64 :: (this: cptr(i64)) => cast(i64) __cptr_read_u64(this.data); +// +// Helper procedures for quickly reading an integer of various sizes. +cptr.read_u8 :: (this: cptr(u8)) => __cptr_read_u8(this.data); +cptr.read_u16 :: (this: cptr(u16)) => __cptr_read_u16(this.data); +cptr.read_u32 :: (this: cptr(u32)) => __cptr_read_u32(this.data); +cptr.read_u64 :: (this: cptr(u64)) => __cptr_read_u64(this.data); +cptr.read_i8 :: (this: cptr(i8)) => cast(i8) __cptr_read_u8(this.data); +cptr.read_i16 :: (this: cptr(i16)) => cast(i16) __cptr_read_u16(this.data); +cptr.read_i32 :: (this: cptr(i32)) => cast(i32) __cptr_read_u32(this.data); +cptr.read_i64 :: (this: cptr(i64)) => cast(i64) __cptr_read_u64(this.data); - // - // When given a non-zero-sized dest, this procedure - // fills the dest buffer with the contents of the string - // up to the number bytes in the dest buffer. This - // procedure returns the length of the string as given - // by strlen(). This exists because iterating byte by byte - // using __cptr_read_u8 would be slow compared to strlen(). - extract_str :: (this: cptr(u8), dest: [] u8) => __cptr_extract_str(this.data, dest); +// +// When given a non-zero-sized dest, this procedure +// fills the dest buffer with the contents of the string +// up to the number bytes in the dest buffer. This +// procedure returns the length of the string as given +// by strlen(). This exists because iterating byte by byte +// using __cptr_read_u8 would be slow compared to strlen(). +cptr.extract_str :: (this: cptr(u8), dest: [] u8) => __cptr_extract_str(this.data, dest); - // - // This procedure attempts to convert a C-pointer back into an - // Onyx pointer, if the pointer lives with the Onyx memory space. - to_rawptr :: (this: cptr($T)) -> &T { - // I'm treating NULL as more of a concept, than as an actual value here, - // because if something returns a NULL pointer, it should logically map - // to the same idea as 'null' in Onyx. - if this.data == 0 do return null; - - use core.intrinsics.wasm - // Using 1 instead of 0 because a null pointer (0) converts - // to the memory address 0, not the base address for the WASM - // memory. - mem_base_ptr := __cptr_make(cast(rawptr) 1); - assert(mem_base_ptr <= this.data + 1 && (this.data + 1) >> 16 <= mem_base_ptr + ~~(wasm.memory_size()), "Invalid conversion from cptr to rawptr: pointer value out of Onyx memory range."); - return ~~(this.data - mem_base_ptr + 1); - } +// +// This procedure attempts to convert a C-pointer back into an +// Onyx pointer, if the pointer lives with the Onyx memory space. +cptr.to_rawptr :: (this: cptr($T)) -> &T { + // I'm treating NULL as more of a concept, than as an actual value here, + // because if something returns a NULL pointer, it should logically map + // to the same idea as 'null' in Onyx. + if this.data == 0 do return null; + + use core.intrinsics.wasm + // Using 1 instead of 0 because a null pointer (0) converts + // to the memory address 0, not the base address for the WASM + // memory. + mem_base_ptr := __cptr_make(cast(rawptr) 1); + assert(mem_base_ptr <= this.data + 1 && (this.data + 1) >> 16 <= mem_base_ptr + ~~(wasm.memory_size()), "Invalid conversion from cptr to rawptr: pointer value out of Onyx memory range."); + return ~~(this.data - mem_base_ptr + 1); +} - as_unsafe :: (this: cptr($T), $new_type: type_expr) -> cptr(new_type) { - return .{ this.data }; - } +cptr.as_unsafe :: (this: cptr($T), $new_type: type_expr) -> cptr(new_type) { + return .{ this.data }; +} - at :: (this: cptr($T), index: i32) -> T { - elem := this + index; - return elem->read(); - } +cptr.at :: (this: cptr($T), index: i32) -> T { + elem := this + index; + return elem->read(); +} - format :: (output: &conv.Format_Output, format: &conv.Format, p: &cptr($T)) { - conv.format(output, "cptr({})[0x{b16}]", T, p.data); - } +cptr.format :: (output: &conv.Format_Output, format: &conv.Format, p: &cptr($T)) { + conv.format(output, "cptr({})[0x{b16}]", T, p.data); } diff --git a/core/os/args.onyx b/core/os/args.onyx new file mode 100644 index 000000000..6a8d0084b --- /dev/null +++ b/core/os/args.onyx @@ -0,0 +1,16 @@ +package core.os + +use runtime +use core.iter + +args :: (allocator := context.allocator) -> (result: [] str) { + args := runtime.platform.__args(allocator) + + result = make([] str, args.count, allocator) + for a, i in args { + result[i] = args[i] |> str.as_str() + } + + return +} + diff --git a/core/os/dir.onyx b/core/os/dir.onyx index 697ae3151..797aec772 100644 --- a/core/os/dir.onyx +++ b/core/os/dir.onyx @@ -17,10 +17,16 @@ DirectoryEntry :: struct { identifier : u32; name_length : u32; name_data : [256] u8; - - name :: (use dir: &DirectoryEntry) => str.{ ~~name_data, name_length }; } +/// Returns the `str` of the name, pointing into the `DirectoryEntry` for data. +/// The string is valid for the lifetime of the `DirectoryEntry`. +DirectoryEntry.name :: (use dir: &DirectoryEntry) => str.{ ~~name_data, name_length }; + +/// Copies the name of the item into the provided allocator and returns it +DirectoryEntry.name_copy :: (use dir: DirectoryEntry, allocator := context.allocator) => + str.copy(.{ ~~name_data, name_length }, allocator) + dir_open :: (path: str) -> (Directory, bool) { dir: Directory; success := fs.__dir_open(path, &dir); @@ -49,15 +55,15 @@ list_directory :: (path: str) -> Iterator(DirectoryEntry) { opened := false; } - next :: (use c: &Context) -> (DirectoryEntry, bool) { - if !opened do return .{}, false; + next :: (use c: &Context) -> ? DirectoryEntry { + if !opened do return .None; entry: DirectoryEntry; if !dir_read(dir, &entry) { - return .{}, false; + return .None; } - return entry, true; + return entry; } close :: (use c: &Context) { diff --git a/core/os/file.onyx b/core/os/file.onyx index 965046fd9..62cf0adf6 100644 --- a/core/os/file.onyx +++ b/core/os/file.onyx @@ -27,6 +27,8 @@ OpenMode :: enum { Append :: 0x03; } +FileData :: fs.FileData; + File :: struct { use stream : io.Stream; data : fs.FileData; @@ -65,7 +67,7 @@ get_contents_from_file :: (file: &File) -> str { data := cast([&] u8) raw_alloc(context.allocator, size); - _, prev_loc := io.stream_tell(file); + prev_loc := io.stream_tell(file).Ok ?? 0; io.stream_seek(file, 0, .Start); io.stream_read(file, .{ data, size }); @@ -75,6 +77,13 @@ get_contents_from_file :: (file: &File) -> str { return data[0 .. size]; } +from_fd :: (fd: FileData) -> File { + file: File; + file.data = fd; + file.vtable = &fs.__file_stream_vtable; + return file; +} + open :: (path: str, mode := OpenMode.Read) -> Result(File, os.FileError) { file := File.{ stream = .{ vtable = null }, @@ -94,6 +103,11 @@ close :: (file: &File) { file.stream.vtable = null; } +#overload +__dispose_used_local :: macro (file: &File) { + #this_package.close(file); +} + get_contents :: #match { get_contents_from_file, @@ -105,25 +119,6 @@ get_contents :: #match { } } -with_file :: (path: str, mode := OpenMode.Read) -> Iterator(&File) { - file_stream: ? File; - if fs := open(path, mode); fs { - file_stream = fs->ok(); - } - - if file_stream { - return iter.single( - file_stream->unwrap() |> alloc.on_heap(), - file => { - close(file); - cfree(file); - }); - - } else { - return iter.empty(&File); - } -} - is_file :: (path: str) -> bool { s: FileStat; if !file_stat(path, &s) do return false; diff --git a/core/os/path.onyx b/core/os/path.onyx index 58c2818fc..b9c7ba0a7 100644 --- a/core/os/path.onyx +++ b/core/os/path.onyx @@ -12,17 +12,15 @@ use core.conv PATH_SEP :: '/' } -#doc """ - Removes: - - Stray '.' in the path - - Stray '..' - - Repeated '/' - - Trailing '/' - - Modifies the string in place, as the length will never be longer. -""" +/// Removes: +/// - Stray '.' in the path +/// - Stray '..' +/// - Repeated '/' +/// - Trailing '/' +/// +/// Modifies the string in place, as the length will never be longer. path_clean :: (path: str, allocator := Path_Allocator) -> str { - if path == "" do return string.alloc_copy(".", Path_Allocator); + if path == "" do return string.copy(".", Path_Allocator); rooted := path[0] == PATH_SEP; n := path.length; @@ -73,11 +71,9 @@ path_clean :: (path: str, allocator := Path_Allocator) -> str { return out; } -#doc """ - Concatenates path elements, and returns cleaned output. - - This uses the temporary allocator, so a copy may be needed. -""" +/// Concatenates path elements, and returns cleaned output. +/// +/// This uses the temporary allocator, so a copy may be needed. path_join :: (path: ..str) -> str { out := make(dyn_str, allocator=context.temp_allocator); @@ -88,23 +84,19 @@ path_join :: (path: ..str) -> str { return path_clean(out); } -#doc """ - Returns everything but the last element in the path. - - This is then cleaned and copied into the temporary allocator. -""" +/// Returns everything but the last element in the path. +/// +/// This is then cleaned and copied into the temporary allocator. path_directory :: (path: str) -> str { dir, _ := path_split(path); return path_clean(dir); } -#doc """ - Returns the extension of the file on the end of the path, if present. - - path_extension("foo.txt") -> "txt" - path_extension("foo/bar") -> "" - path_extension("foo/bar.txt") -> "txt" -""" +/// Returns the extension of the file on the end of the path, if present. +/// +/// path_extension("foo.txt") -> "txt" +/// path_extension("foo/bar") -> "" +/// path_extension("foo/bar.txt") -> "txt" path_extension :: (path: str) -> str { for i in range.{ path.length - 1, 0, -1 } { if path[i] == PATH_SEP do break; @@ -113,12 +105,10 @@ path_extension :: (path: str) -> str { return ""; } -#doc """ - Returns the last element of the path, sans its extension. - - path_basename("foo.txt") -> "foo" - path_basename("test/bar.txt") -> "bar" -""" +/// Returns the last element of the path, sans its extension. +/// +/// path_basename("foo.txt") -> "foo" +/// path_basename("test/bar.txt") -> "bar" path_basename :: (path: str) -> str { if path == "" do return "."; @@ -127,10 +117,8 @@ path_basename :: (path: str) -> str { return path[start + 1 .. end]; } -#doc """ - Splits the last path element off. -""" -path_split :: (path: str) -> (parent: str, child: str) { +/// Splits the last path element off. +path_split :: (path: str) -> (str, str) { index := string.last_index_of(path, PATH_SEP); return path[0 .. index], path[index+1 .. path.length]; } diff --git a/core/os/process.onyx b/core/os/process.onyx index b6405530f..d861ca2ac 100644 --- a/core/os/process.onyx +++ b/core/os/process.onyx @@ -19,17 +19,13 @@ use runtime.platform { ProcessData } -#doc """ - Represents a spawned OS process. -""" +/// Represents a spawned OS process. Process :: struct { use stream: io.Stream; process_handle: ProcessData; } -#doc """ - Represents options for process creation. -""" +/// Represents options for process creation. ProcessSpawnOpts :: struct { capture_io: bool; non_blocking_io: bool; @@ -38,11 +34,9 @@ ProcessSpawnOpts :: struct { environment: [] Pair(str, str); } -#doc """ - Spawns a new OS process. This operation is always assumed to succeed. - To determine if the operation failed, use `process_wait` and look for the - `FailedToRun` state. -""" +/// Spawns a new OS process. This operation is always assumed to succeed. +/// To determine if the operation failed, use `process_wait` and look for the +/// `FailedToRun` state. process_spawn :: #match #local -> Process {} #overload @@ -80,17 +74,17 @@ process_spawn :: (path: str, args: [] str, opts: &ProcessSpawnOpts) -> Process { }; } -#doc "Force kills a subprocess." +/// Force kills a subprocess. process_kill :: (use p: &Process) -> bool { return __process_kill(process_handle); } -#doc "Waits for a process to exit." +/// Waits for a process to exit. process_wait :: (use p: &Process) => { return __process_wait(process_handle); } -#doc "Frees internal resources used by a process. Should be called after process_kill or process_wait." +/// Frees internal resources used by a process. Should be called after process_kill or process_wait. process_destroy :: (use p: &Process) => { __process_destroy(process_handle); } @@ -102,27 +96,27 @@ process_destroy :: (use p: &Process) => { } #local process_stream_vtable := io.Stream_Vtable.{ - read = (use p: &Process, buffer: [] u8) -> (io.Error, u32) { + read = (use p: &Process, buffer: [] u8) -> Result(u32, io.Error) { // Read from the process stdout - if cast(i64) process_handle == 0 do return .BadFile, 0; + if cast(i64) process_handle == 0 do return .{ Err = .BadFile }; bytes_read := __process_read(process_handle, buffer); if bytes_read < 0 { switch cast(Process_Read_Error) -bytes_read { - case .None do return .None, 0; - case .Process_Dead do return .EOF, 0; - case .Unknown do return .BadFile, 0; + case .None do return .{ Ok = 0 }; + case .Process_Dead do return .{ Err = .EOF }; + case .Unknown do return .{ Err = .BadFile }; } } - return .None, bytes_read; + return .{ Ok = bytes_read }; }, - write = (use p: &Process, buffer: [] u8) -> (io.Error, u32) { + write = (use p: &Process, buffer: [] u8) -> Result(u32, io.Error) { // Write to the process stdin - if cast(i64) process_handle == 0 do return .BadFile, 0; + if cast(i64) process_handle == 0 do return .{ Err = .BadFile }; bytes_written := __process_write(process_handle, buffer); - return .None, bytes_written; + return .{ Ok = bytes_written }; }, close = (use p: &Process) -> io.Error { @@ -131,11 +125,9 @@ process_destroy :: (use p: &Process) => { } } -#doc """ - Represents exit states of a process. - - This is not the best format for this data, as it is impossible to get the exit status. -""" +/// Represents exit states of a process. +/// +/// This is not the best format for this data, as it is impossible to get the exit status. ProcessResult :: enum { Success :: 0x00; FailedToRun :: 0x01; @@ -143,9 +135,7 @@ ProcessResult :: enum { InternalErr :: 0x03; } -#doc """ - Represents the exit state and output of a process. -""" +/// Represents the exit state and output of a process. ProcessResultOutput :: struct { result: ProcessResult; output: str; @@ -156,9 +146,7 @@ ProcessResultOutput :: struct { // Builder pattern for processes // -#doc """ - Stores configuration used by the builder pattern for processes. -""" +/// Stores configuration used by the builder pattern for processes. Command :: struct { _path: str; _args: [..] str; @@ -169,103 +157,99 @@ Command :: struct { _opts: ProcessSpawnOpts; } -#doc """ - Produces a new `Command` for the process builder. - - os.command() - ->path("executable_path") - ->args(.["argument", "list"]) - ->run(); -""" +/// Produces a new `Command` for the process builder. +/// +/// os.command() +/// ->path("executable_path") +/// ->args(.["argument", "list"]) +/// ->run(); command :: () -> &Command { return new(Command); } -#inject Command { - #doc "Sets the path of the command." - path :: (cmd: &Command, path: str) -> &Command { - cmd._path = path; - return cmd; - } +/// Sets the path of the command. +Command.path :: (cmd: &Command, path: str) -> &Command { + cmd._path = path; + return cmd; +} - #doc "Appends arguments to the argument array of the command." - args :: (cmd: &Command, args: [] str) -> &Command { - array.concat(&cmd._args, args); - return cmd; - } +/// Appends arguments to the argument array of the command. +Command.args :: (cmd: &Command, args: [] str) -> &Command { + array.concat(&cmd._args, args); + return cmd; +} - #doc "Sets an environment variable." - env :: (cmd: &Command, key, value: str) -> &Command { - cmd._env << .{ key, value }; - return cmd; - } +/// Sets an environment variable. +Command.env :: (cmd: &Command, key, value: str) -> &Command { + cmd._env << .{ key, value }; + return cmd; +} - #doc "Sets the working directory of the command." - dir :: (cmd: &Command, dir: str) -> &Command { - cmd._dir = dir; - return cmd; - } +/// Sets the working directory of the command. +Command.dir :: (cmd: &Command, dir: str) -> &Command { + cmd._dir = dir; + return cmd; +} - #doc "Runs the command, wait until it completes, and capture output." - output :: (cmd: &Command) -> Result(str, ProcessResultOutput) { - if !cmd._process { - cmd._opts.capture_io = true; - cmd._opts.non_blocking_io = false; - cmd._opts.detach = false; +/// Runs the command, wait until it completes, and capture output. +Command.output :: (cmd: &Command) -> Result(str, ProcessResultOutput) { + if !cmd._process { + cmd._opts.capture_io = true; + cmd._opts.non_blocking_io = false; + cmd._opts.detach = false; - cmd->start(); - if !cmd._process do return .{ Err = .{ .Error, "" } }; - } + cmd->start(); + if !cmd._process do return .{ Err = .{ .Error, "Failed to spawn process" } }; + } - r := io.reader_make(cmd._process->unwrap_ptr()); - output := io.read_all(&r); - io.reader_free(&r); + r := io.reader_make(cmd._process->unwrap_ptr()); + output := io.read_all(&r); + io.reader_free(&r); - res := cmd->wait(); - if res != .Success { - return .{ Err = .{ res, output } }; - } - - return .{ Ok = output }; + res := cmd->wait(); + if res != .Success { + return .{ Err = .{ res, output } }; } - #doc "Runs the command and waits until it completes." - run :: (cmd: &Command) -> ProcessResult { - cmd->start(); - return cmd->wait(); - } + return .{ Ok = output }; +} - #doc "Starts the command with the mapped I/O." - start_with_output :: (cmd: &Command) -> &Command { - cmd._opts.capture_io = true; - return cmd->start(); - } +/// Runs the command and waits until it completes. +Command.run :: (cmd: &Command) -> ProcessResult { + cmd->start(); + return cmd->wait(); +} - #doc "Starts the command." - start :: (cmd: &Command) -> &Command { - cmd._opts.environment = cmd._env; - cmd._opts.dir = cmd._dir; +/// Starts the command with the mapped I/O. +Command.start_with_output :: (cmd: &Command) -> &Command { + cmd._opts.capture_io = true; + return cmd->start(); +} - cmd._process = process_spawn(cmd._path, cmd._args, &cmd._opts); - return cmd; - } +/// Starts the command. +Command.start :: (cmd: &Command) -> &Command { + cmd._opts.environment = cmd._env; + cmd._opts.dir = cmd._dir; - #doc "Wait for the command to complete." - wait :: (cmd: &Command) -> ProcessResult { - if !cmd._process do return .Error; + cmd._process = process_spawn(cmd._path, cmd._args, &cmd._opts); + return cmd; +} - res := process_wait(cmd._process->unwrap_ptr()); - process_destroy(cmd._process->unwrap_ptr()); +/// Wait for the command to complete. +Command.wait :: (cmd: &Command) -> ProcessResult { + if !cmd._process do return .Error; - cmd->destroy(); - return res; - } + res := process_wait(cmd._process->unwrap_ptr()); + process_destroy(cmd._process->unwrap_ptr()); - #doc "Destroys all internal information of a command. Automatically called by `Command.wait`." - destroy :: (cmd: &Command) { - delete(&cmd._args); - delete(&cmd._env); - cfree(cmd); - } + cmd->destroy(); + return res; +} + +/// Destroys all internal information of a command. Automatically called by `Command.wait`. +Command.destroy :: (cmd: &Command) { + delete(&cmd._args); + delete(&cmd._env); + cfree(cmd); } diff --git a/core/os/tty.onyx b/core/os/tty.onyx index 4609ae458..be5c643e4 100644 --- a/core/os/tty.onyx +++ b/core/os/tty.onyx @@ -42,11 +42,11 @@ tty_set :: (state: &TTY_State) -> bool { } else { tty_get :: () -> TTY_State { - assert(false, "core.os.tty_get not supported on this platform."); + panic("core.os.tty_get not supported on this platform."); } tty_set :: (state: &TTY_State) -> bool { - assert(false, "core.os.tty_get not supported on this platform."); + panic("core.os.tty_get not supported on this platform."); return false; } diff --git a/core/random/random.onyx b/core/random/random.onyx index 2423fb32f..bb0af91e6 100644 --- a/core/random/random.onyx +++ b/core/random/random.onyx @@ -3,68 +3,62 @@ package core.random use core use runtime -#doc "The state of a random number generator." +/// The state of a random number generator. Random :: struct { seed: i64; } -#inject Random { - #doc """ - Creates a new random number generator. - - An initial seed can be passed in, otherwise the current UNIX time is used. - """ - make :: (seed: i64 = (#unquote __initial_value)) -> Random { - return .{ seed }; - } +/// Creates a new random number generator. +/// +/// An initial seed can be passed in, otherwise the current UNIX time is used. +Random.make :: (seed: i64 = (#unquote __initial_value)) -> Random { + return .{ seed }; +} - #doc "Sets the seed of the random number generator." - set_seed :: #match { - (self: &Random, s: u32) { self.seed = ~~s; }, - (self: &Random, s: u64) { self.seed = s; }, - } +/// Sets the seed of the random number generator. +Random.set_seed :: #match { + (self: &Random, s: u32) { self.seed = ~~s; }, + (self: &Random, s: u64) { self.seed = s; }, +} - #doc "Generates a random 32-bit integer." - int :: (self: &Random) -> u32 { - s := self.seed * RANDOM_MULTIPLIER + RANDOM_INCREMENT; - defer self.seed = s; - return cast(u32) ((s >> 16) & ~~0xffffffff); - } +/// Generates a random 32-bit integer. +Random.int :: (self: &Random) -> u32 { + s := self.seed * RANDOM_MULTIPLIER + RANDOM_INCREMENT; + defer self.seed = s; + return cast(u32) ((s >> 16) & ~~0xffffffff); +} - #doc "Generates a random 32-bit integer between `lo` and `hi`, inclusive." - between :: (self: &Random, lo: i32, hi: i32) -> i32 { - return self->int() % (hi + 1 - lo) + lo; - } +/// Generates a random 32-bit integer between `lo` and `hi`, inclusive. +Random.between :: (self: &Random, lo: i32, hi: i32) -> i32 { + return self->int() % (hi + 1 - lo) + lo; +} - #doc "Generates a random floating point number between `lo` and `hi`." - float :: (self: &Random, lo := 0.0f, hi := 1.0f) -> f32 { - return (cast(f32) (self->int() % (1 << 23)) / cast(f32) (1 << 23)) * (hi - lo) + lo; - } +/// Generates a random floating point number between `lo` and `hi`. +Random.float :: (self: &Random, lo := 0.0f, hi := 1.0f) -> f32 { + return (cast(f32) (self->int() % (1 << 23)) / cast(f32) (1 << 23)) * (hi - lo) + lo; +} - #doc "Returns a random element from a slice." - choice :: (self: &Random, a: [] $T) -> T { - return a[self->between(0, a.count - 1)]; - } +/// Returns a random element from a slice. +Random.choice :: (self: &Random, a: [] $T) -> T { + return a[self->between(0, a.count - 1)]; +} - #doc """ - Returns a random byte-array of length `bytes_long`. - - If `alpha_numeric` is true, then the string will only consist of alpha-numeric characters. - """ - string :: (self: &Random, bytes_long: u32, alpha_numeric := false, allocator := context.allocator) -> str { - use core.memory - - s := memory.make_slice(u8, bytes_long, allocator=allocator); - for& s { - if alpha_numeric { - #persist alpha_numeral := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; - *it = self->choice(alpha_numeral); - } else { - *it = ~~(self->between(32, 127)); - } +/// Returns a random byte-array of length `bytes_long`. +/// +/// If `alpha_numeric` is true, then the string will only consist of alpha-numeric characters. +Random.string :: (self: &Random, bytes_long: u32, alpha_numeric := false, allocator := context.allocator) -> str { + use core.memory + + s := memory.make_slice(u8, bytes_long, allocator=allocator); + for& s { + if alpha_numeric { + #persist alpha_numeral := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; + *it = self->choice(alpha_numeral); + } else { + *it = ~~(self->between(32, 127)); } - return s; } + return s; } // @@ -77,33 +71,31 @@ Random :: struct { // The global random state. global_random := Random.{ 8675309 }; -#doc "Sets the seed for the global random number generator." +/// Sets the seed for the global random number generator. set_seed :: #match { (s: u32) { global_random->set_seed(~~s); }, (s: u64) { global_random->set_seed(s); }, } -#doc "Generates a random integer." +/// Generates a random integer. int :: () => global_random->int(); -#doc "Generates a random integer, between `lo` and `hi` inclusively." +/// Generates a random integer, between `lo` and `hi` inclusively. between :: (lo: i32, hi: i32) => global_random->between(lo, hi); -#doc "Generates a random floating point number." +/// Generates a random floating point number. float :: (lo := 0.0f, hi := 1.0f) => global_random->float(lo, hi); -#doc "Chooses a random element out of a slice." +/// Chooses a random element out of a slice. choice :: (a: [] $T) => global_random->choice(a); -#doc """ - Returns a random byte-array of length `bytes_long`. - - If `alpha_numeric` is true, then the string will only consist of alpha-numeric characters. -""" +/// Returns a random byte-array of length `bytes_long`. +/// +/// If `alpha_numeric` is true, then the string will only consist of alpha-numeric characters. string :: (bytes_long: u32, alpha_numeric := false, allocator := context.allocator) => global_random->string(bytes_long, alpha_numeric, allocator); diff --git a/core/runtime/common.onyx b/core/runtime/common.onyx index 62cedd00b..02377b980 100644 --- a/core/runtime/common.onyx +++ b/core/runtime/common.onyx @@ -122,12 +122,9 @@ __thread_initialize :: () { func(data); __flush_stdio(); - - core.thread.__exited(id); } _thread_exit :: (id: i32) { - // raw_free(alloc.heap_allocator, __tls_base); - // core.thread.__exited(id); + core.thread.__exited(id); } } diff --git a/core/runtime/info/helper.onyx b/core/runtime/info/helper.onyx index a3ce05da5..4fb8a026e 100644 --- a/core/runtime/info/helper.onyx +++ b/core/runtime/info/helper.onyx @@ -203,7 +203,7 @@ size_of :: (t: type_expr) -> u32 { return size_of(d.base_type); } - case #default => 0; + case _ => 0; }; } @@ -317,9 +317,9 @@ get_struct_by_name :: (name: str) -> type_expr { index := 0; for type_table { defer index += 1; - if it.kind != .Struct do continue; + if it.info.kind != .Struct do continue; - if (cast(&Type_Info_Struct) it).name == name do return cast(type_expr) index; + if (cast(&Type_Info_Struct) it.info).name == name do return cast(type_expr) index; } return void; @@ -354,8 +354,8 @@ for_all_types :: macro (body: Code) { use runtime for runtime.info.type_table.count { - type_info := runtime.info.type_table[it]; - type_idx : type_expr = ~~ it; + type_info := runtime.info.type_table[it].info; + type_idx := runtime.info.type_table[it].type; #unquote body(type_info, type_idx); } diff --git a/core/runtime/info/stack_trace.onyx b/core/runtime/info/stack_trace.onyx index 8b2e00413..09dbb76db 100644 --- a/core/runtime/info/stack_trace.onyx +++ b/core/runtime/info/stack_trace.onyx @@ -16,7 +16,6 @@ Stack_Node :: struct { file: str; line: u32; func_name: str; - func_type: type_expr; } Stack_Trace :: struct { @@ -32,10 +31,20 @@ Stack_Frame :: struct { #if runtime.Stack_Trace_Enabled { -get_stack_trace :: () -> [..] Stack_Frame { +get_stack_trace :: #match #local {} + +#overload +get_stack_trace :: (offset := 0) -> [..] Stack_Frame { trace := make([..] Stack_Frame, 8, alloc.temp_allocator); walker := __stack_trace.prev; + + for offset { + if walker { + walker = walker.prev + } + } + while walker { trace << .{ walker.data, walker.current_line }; walker = walker.prev; @@ -44,10 +53,38 @@ get_stack_trace :: () -> [..] Stack_Frame { return trace; } +#overload +get_stack_trace :: (buf: [] Stack_Frame, offset := 0) -> [] Stack_Frame { + walker := __stack_trace.prev + + for offset { + if walker { + walker = walker.prev + } + } + + i := 0 + while walker && i < buf.count { + buf[i] = .{ walker.data, walker.current_line } + walker = walker.prev + i += 1 + } + + return buf[0 .. i] +} + } else { -get_stack_trace :: () -> [] Stack_Frame { - return .[]; +get_stack_trace :: #match #local {} + +#overload +get_stack_trace :: (offset := 0) -> [] Stack_Frame { + return .[] +} + +#overload +get_stack_trace :: (buf: [] Stack_Frame, offset := 0) -> [] Stack_Frame { + return buf[0 .. 0] } } diff --git a/core/runtime/info/types.onyx b/core/runtime/info/types.onyx index 4d2afdcf4..0b5f13f52 100644 --- a/core/runtime/info/types.onyx +++ b/core/runtime/info/types.onyx @@ -3,7 +3,12 @@ package runtime.info -type_table : [] &Type_Info; +Type_Entry :: struct { + type: type_expr + info: &Type_Info +} + +type_table : [] Type_Entry Type_Info :: struct { // This must match the order of the elements in onyxtypes.h @@ -223,35 +228,48 @@ Type_Info_Polymorphic_Union :: struct { } get_type_info :: (t: type_expr) -> &Type_Info { - // Grossness to get around the fact that type_exprs are not technically comparable, because in most - // cases you should not compare them as the number assigned to them is arbitrary. - if ~~t < cast(i32) 0 || ~~t >= cast(i32) type_table.count do return null; + ti := cast(i32) t - return type_table[cast(i32) t]; -} + lo := 0 + hi := type_table.count - 1 + mid := (hi + lo) >> 1 + while lo <= hi { + x := cast(i32) type_table[mid].type + if ti == x { + return type_table[mid].info + } + if x < ti { + lo = mid + 1 + } else { + hi = mid - 1 + } -#inject type_expr { - info :: get_type_info -} + mid = (hi + lo) >> 1 + } -#inject Type_Info { - as_basic :: (t: &Type_Info) => cast(&Type_Info_Basic, t); - as_pointer :: (t: &Type_Info) => cast(&Type_Info_Pointer, t); - as_multi_pointer :: (t: &Type_Info) => cast(&Type_Info_Multi_Pointer, t); - as_function :: (t: &Type_Info) => cast(&Type_Info_Function, t); - as_array :: (t: &Type_Info) => cast(&Type_Info_Array, t); - as_slice :: (t: &Type_Info) => cast(&Type_Info_Slice, t); - as_dynamic_array :: (t: &Type_Info) => cast(&Type_Info_Dynamic_Array, t); - as_variadic_argument :: (t: &Type_Info) => cast(&Type_Info_Variadic_Argument, t); - as_enum :: (t: &Type_Info) => cast(&Type_Info_Enum, t); - as_struct :: (t: &Type_Info) => cast(&Type_Info_Struct, t); - as_poly_struct :: (t: &Type_Info) => cast(&Type_Info_Polymorphic_Struct, t); - as_union :: (t: &Type_Info) => cast(&Type_Info_Union, t); - as_poly_union :: (t: &Type_Info) => cast(&Type_Info_Polymorphic_Union, t); - as_distinct :: (t: &Type_Info) => cast(&Type_Info_Distinct, t); - as_compound :: (t: &Type_Info) => cast(&Type_Info_Compound, t); + return null } + +type_expr.info :: get_type_info + +Type_Info.as_basic :: (t: &Type_Info) => cast(&Type_Info_Basic, t); +Type_Info.as_pointer :: (t: &Type_Info) => cast(&Type_Info_Pointer, t); +Type_Info.as_multi_pointer :: (t: &Type_Info) => cast(&Type_Info_Multi_Pointer, t); +Type_Info.as_function :: (t: &Type_Info) => cast(&Type_Info_Function, t); +Type_Info.as_array :: (t: &Type_Info) => cast(&Type_Info_Array, t); +Type_Info.as_slice :: (t: &Type_Info) => cast(&Type_Info_Slice, t); +Type_Info.as_dynamic_array :: (t: &Type_Info) => cast(&Type_Info_Dynamic_Array, t); +Type_Info.as_variadic_argument :: (t: &Type_Info) => cast(&Type_Info_Variadic_Argument, t); +Type_Info.as_enum :: (t: &Type_Info) => cast(&Type_Info_Enum, t); +Type_Info.as_struct :: (t: &Type_Info) => cast(&Type_Info_Struct, t); +Type_Info.as_poly_struct :: (t: &Type_Info) => cast(&Type_Info_Polymorphic_Struct, t); +Type_Info.as_union :: (t: &Type_Info) => cast(&Type_Info_Union, t); +Type_Info.as_poly_union :: (t: &Type_Info) => cast(&Type_Info_Polymorphic_Union, t); +Type_Info.as_distinct :: (t: &Type_Info) => cast(&Type_Info_Distinct, t); +Type_Info.as_compound :: (t: &Type_Info) => cast(&Type_Info_Compound, t); + + diff --git a/core/runtime/platform/js/platform.onyx b/core/runtime/platform/js/platform.onyx index 09449209e..388b81c5d 100644 --- a/core/runtime/platform/js/platform.onyx +++ b/core/runtime/platform/js/platform.onyx @@ -38,16 +38,21 @@ __time :: () -> i64 #foreign "host" "time" --- __wait_for_input :: (timeout: i32) => true; +__args :: (a: Allocator) -> [] cstr do return .[]; + __futex_wait :: (addr: rawptr, expected: i32, timeout: i32) -> i32 { use core.intrinsics.atomics {__atomic_wait} if context.thread_id != 0 { __atomic_wait(cast(&i32) addr, expected, ~~timeout); + return 0; } + return 1; } __futex_wake :: (addr: rawptr, maximum: i32) -> i32 { use core.intrinsics.atomics {__atomic_notify} __atomic_notify(addr, maximum); + return 1; } PollDescription :: struct { diff --git a/core/runtime/platform/onyx/fs.onyx b/core/runtime/platform/onyx/fs.onyx index 0e9e45947..b392b81b9 100644 --- a/core/runtime/platform/onyx/fs.onyx +++ b/core/runtime/platform/onyx/fs.onyx @@ -85,40 +85,45 @@ __file_stream_vtable := io.Stream_Vtable.{ return (.None) if now >= 0 else .BadFile; }, - tell = (use fs: &os.File) -> (io.Error, u32) { - return .None, __file_tell(data); + tell = (use fs: &os.File) -> Result(u32, io.Error) { + return .{ Ok = __file_tell(data) }; }, - read = (use fs: &os.File, buffer: [] u8) -> (io.Error, u32) { + read = (use fs: &os.File, buffer: [] u8) -> Result(u32, io.Error) { bytes_read: u64; error := __file_read(data, buffer, &bytes_read); - return error, ~~bytes_read; + if error != .None do return .{ Err = error }; + return .{ Ok = ~~bytes_read }; }, - read_at = (use fs: &os.File, at: u32, buffer: [] u8) -> (io.Error, u32) { + read_at = (use fs: &os.File, at: u32, buffer: [] u8) -> Result(u32, io.Error) { __file_seek(data, at, .Start); bytes_read: u64; error := __file_read(data, buffer, &bytes_read); - return error, ~~bytes_read; + if error != .None do return .{ Err = error }; + return .{ Ok = ~~bytes_read }; }, - read_byte = (use fs: &os.File) -> (io.Error, u8) { + read_byte = (use fs: &os.File) -> Result(u8, io.Error) { byte: u8; error := __file_read(data, ~~ cast([1] u8) &byte, null); - return error, byte; + if error != .None do return .{ Err = error }; + return .{ Ok = byte }; }, - write = (use fs: &os.File, buffer: [] u8) -> (io.Error, u32) { + write = (use fs: &os.File, buffer: [] u8) -> Result(u32, io.Error) { bytes_wrote: u64; error := __file_write(data, buffer, &bytes_wrote); - return error, ~~bytes_wrote; + if error != .None do return .{ Err = error }; + return .{ Err = ~~bytes_wrote }; }, - write_at = (use fs: &os.File, at: u32, buffer: [] u8) -> (io.Error, u32) { + write_at = (use fs: &os.File, at: u32, buffer: [] u8) -> Result(u32, io.Error) { __file_seek(data, at, .Start); bytes_wrote: u64; error := __file_write(data, buffer, &bytes_wrote); - return error, ~~bytes_wrote; + if error != .None do return .{ Err = error }; + return .{ Ok = ~~bytes_wrote }; }, write_byte = (use fs: &os.File, byte: u8) -> io.Error { @@ -141,7 +146,7 @@ __file_stream_vtable := io.Stream_Vtable.{ return __file_size(data); }, - poll = (use fs: &os.File, ev: io.PollEvent, timeout: i32) -> (io.Error, bool) { + poll = (use fs: &os.File, ev: io.PollEvent, timeout: i32) -> Result(bool, io.Error) { fds: [1] PollDescription; fds[0] = .{ fd = ~~data, @@ -151,10 +156,10 @@ __file_stream_vtable := io.Stream_Vtable.{ __poll(fds, timeout); if fds[0].out_events == .Closed { - return .EOF, ev == .Closed; + return .{ Err = .EOF }; } - return .None, fds[0].out_events == ev; + return .{ Ok = fds[0].out_events == ev }; } }; diff --git a/core/runtime/platform/onyx/net.onyx b/core/runtime/platform/onyx/net.onyx index 8828cd5f1..2f95e59e3 100644 --- a/core/runtime/platform/onyx/net.onyx +++ b/core/runtime/platform/onyx/net.onyx @@ -7,7 +7,8 @@ use core.net { SocketOption, SocketAddress, SocketShutdown, - SocketStatus + SocketStatus, + ResolveResult } use core {Result, string, io} @@ -53,7 +54,7 @@ __net_sock_bind :: (s: SocketData, addr: &SocketAddress) -> bool { return __net_bind_host(s, host.host, host.port); } - case #default do return false; + case _ do return false; } } @@ -108,7 +109,7 @@ __net_sock_connect :: (s: SocketData, addr: &SocketAddress) -> io.Error { translate_error :: (s: SocketError) => switch s { case .None => io.Error.None; case .ConnectFailed => .ConnectFailed; - case #default => .OperationFailed; + case _ => .OperationFailed; } } @@ -175,7 +176,7 @@ __net_sock_recv :: (s: SocketData, buf: [] u8) -> Result(i32, io.Error) { __net_sock_send :: (s: SocketData, buf: [] u8) -> Result(i32, io.Error) { sent := __net_send(s, buf); - if sent == 0 || sent == -1 + if sent == -1 { // If there was an error sending data, call the connection closed. return .{ Err = .EOF }; @@ -203,8 +204,45 @@ __net_sock_close :: (s: SocketData) -> void { __net_close_socket(s); } -__net_resolve :: (host: str, port: u16, out_addrs: [] SocketAddress) -> i32 { +__net_resolve :: (host: str, port: u16, out_addrs: [] ResolveResult) -> i32 { + host_str := string.to_cstr_on_stack(host); + original_info := __net_resolve_start(host_str, port); + + data: [512] u8; + + out_len := 0; + + info := original_info; + while out_len < out_addrs.count; info != 0 { + info = __net_resolve_next(info, data); + + family := *cast(& SocketFamily) &data[0]; // Do we need to transfer the family? + type := *cast(& SocketType) &data[4]; + proto := *cast(& SocketProto) &data[8]; + + addr := switch family { + case .Inet => do { + addr_in: &sockaddr_in_t = ~~&data[12]; + return SocketAddress.{ Inet = .{ beu16_to_leu16(addr_in.port), addr_in.addr } }; + } + + case .Inet6 => do { + addr_in6: &sockaddr_in6_t = ~~&data[12]; + return SocketAddress.{ Inet6 = .{ beu16_to_leu16(addr_in6.port), addr_in6.addr } }; + } + + case .Unix => do { + addr_un: &sockaddr_un_t = ~~&data[12]; + return SocketAddress.{ Unix = addr_un.path }; + } + }; + out_addrs[out_len] = .{ family, type, proto, addr }; + out_len += 1; + } + + __net_resolve_end(original_info); + return out_len; } @@ -249,7 +287,7 @@ __net_resolve :: (host: str, port: u16, out_addrs: [] SocketAddress) -> i32 { *out = .{ Inet = .{ beu16_to_leu16(addr_in.port), addr_in.addr } }; } - case 10 { + case 10, 30 { addr_in6: &sockaddr_in6_t = ~~addr; *out = .{ Inet6 = .{ beu16_to_leu16(addr_in6.port), addr_in6.addr } }; } @@ -283,6 +321,10 @@ __net_resolve :: (host: str, port: u16, out_addrs: [] SocketAddress) -> i32 { __net_recvfrom :: (handle: SocketData, data: [] u8, out_buf: rawptr, out_len: &i32) -> i32 --- __net_setting_flag :: (handle: SocketData, setting: SocketOption, value: bool) -> void --- + + __net_resolve_start :: (host: cstr, port: u16) -> u64 --- + __net_resolve_next :: (info: u64, buf: [] u8) -> u64 --- + __net_resolve_end :: (original_info: u64) -> void --- } beu16_to_leu16 :: (x: u16) -> u16 { diff --git a/core/runtime/platform/onyx/platform.onyx b/core/runtime/platform/onyx/platform.onyx index 862b4f081..6c0f86ef6 100644 --- a/core/runtime/platform/onyx/platform.onyx +++ b/core/runtime/platform/onyx/platform.onyx @@ -13,9 +13,9 @@ use runtime { #load "./env" #load "./net" -#load "core/onyx/cptr" -#load "core/onyx/cbindgen" -// #load "core/onyx/fault_handling" +#load "core:onyx/cptr" +#load "core:onyx/cbindgen" +// #load "core:onyx/fault_handling" // Platform supports Supports_Files :: true @@ -40,26 +40,37 @@ Supports_TTY :: true } __output_string :: (s: str) -> u32 { - err, wrote := io.stream_write(&__stdout, s); + wrote := io.stream_write(&__stdout, s).Ok ?? 0; return wrote; } __output_error :: (s: str) -> u32 { - err, wrote := io.stream_write(&__stderr, s); + wrote := io.stream_write(&__stderr, s).Ok ?? 0; return wrote; } __read_from_input :: (buffer: [] u8) -> i32 { - err, read := io.stream_read(&__stdin, buffer); - if err == .ReadPending do return 0; - if err != .None do return -1; - return read; + switch io.stream_read(&__stdin, buffer) { + case .Err as err { + if err == .ReadPending do return 0; + return -1; + } + + case .Ok as read { + return read; + } + } } __wait_for_input :: (timeout: i32) -> bool { - err, ready := io.stream_poll(&__stdin, .Read, timeout); - if err != .None do return false; - return ready; + switch io.stream_poll(&__stdin, .Read, timeout) { + case .Err as err { + return false; + } + case .Ok as ready{ + return ready; + } + } } @@ -96,6 +107,18 @@ ProcessData :: #distinct u64 __random_get :: (buf: [] u8) -> void --- } +__args :: (allocator: Allocator) -> [] cstr { + args : [] cstr; + argv_buf_size : i32; + __args_sizes_get(&args.count, &argv_buf_size); + + args = make([] cstr, args.count, allocator); + argv_buf := cast([&] u8) allocator->alloc(argv_buf_size); + __args_get(args.data, argv_buf); + + return args; +} + #if !#defined(runtime.vars.no_entrypoint) { use main #local MAIN_FUNCTION :: main.main @@ -127,23 +150,16 @@ __start :: () { __runtime_initialize(); context.thread_id = 0; - #if #defined(runtime.vars.MEMDEBUG) { - use core.alloc.memdebug - memdebug.enable_in_scope(context.allocator); + #if #defined(runtime.vars.MEMWATCH) { + use core.alloc.memwatch + memwatch.enable_in_scope(context.allocator); } #if (typeof MAIN_FUNCTION) == #type () -> void { // An error on this line means no 'main.main' was found in your program. MAIN_FUNCTION(); } else { - args : [] cstr; - argv_buf_size : i32; - __args_sizes_get(&args.count, &argv_buf_size); - - args = memory.make_slice(cstr, args.count); - argv_buf := cast(cstr) calloc(argv_buf_size); - __args_get(args.data, argv_buf); - + args := __args(context.allocator); MAIN_FUNCTION(args); } diff --git a/core/runtime/platform/wasi/platform.onyx b/core/runtime/platform/wasi/platform.onyx index 57d142be4..a278b51c1 100644 --- a/core/runtime/platform/wasi/platform.onyx +++ b/core/runtime/platform/wasi/platform.onyx @@ -210,38 +210,30 @@ __poll :: (fds: [] PollDescription, timeout: i32) -> void { } } +__args :: (allocator: Allocator) -> [] cstr { + args : [] cstr; + argv_buf_size : i32; + args_sizes_get(&args.count, &argv_buf_size); + + args = make([] cstr, args.count, allocator); + argv_buf := cast([&] u8) allocator->alloc(argv_buf_size); + args_get(args.data, argv_buf); + + return args; +} + + // Sets up everything needed for execution. __start :: () { __runtime_initialize(); context.thread_id = 0; - #if (typeof MAIN_PKG.main) == #type () -> void { + #if (typeof MAIN_PKG.main) == (#type () -> void) { MAIN_PKG.main(); } else { - args : [] cstr; - argv_buf_size : Size; - args_sizes_get(&args.count, &argv_buf_size); - - args = core.memory.make_slice(cstr, args.count); - argv_buf := cast(cstr) calloc(argv_buf_size); - args_get(args.data, argv_buf); - - - // This post processing of the argv array needs to happen if the target is using - // 32-bit pointers, instead of 64-bits. Right now, Onyx pointers take up 64-bits, - // but in most circumstances, only the lower 32-bits are used. When webassembly - // standardizes the 64-bit address space, it will be an easy conversion over. - // But for right now, WASI will give the argv array 32-bit pointers, instead of - // 64-bit pointers. This loops expands the 32-bit pointers into 64-bit pointers - // while not clobbering any of them. - while i := cast(i32) (args.count - 1); i >= 0 { - defer i -= 1; - - args[i] = cast(cstr) (cast([&] u32) args.data)[i]; - } - + args := __args(context.allocator); MAIN_PKG.main(args); } diff --git a/core/runtime/platform/wasi/wasi_defs.onyx b/core/runtime/platform/wasi/wasi_defs.onyx index 16688d93f..eb042ba3b 100644 --- a/core/runtime/platform/wasi/wasi_defs.onyx +++ b/core/runtime/platform/wasi/wasi_defs.onyx @@ -405,16 +405,15 @@ path_filestat_get :: (fd: FileDescriptor, flags: LookupFlags, path: str, b path_filestat_set_times :: (fd: FileDescriptor, flags: LookupFlags, path: str, atim: Timestamp, mtim: Timestamp, fst_flags: FSTFlags) -> Errno #foreign WASI_VERSION "path_filestat_set_times" --- path_link :: (fd: FileDescriptor, old_flags: LookupFlags, old_path: str, new_fd: FileDescriptor, new_path: str) -> Errno #foreign WASI_VERSION "path_link" --- -path_open :: (fd: FileDescriptor - , dirflags: LookupFlags - , path: str - , oflags: OFlags - , fs_rights_base: Rights - , fs_rights_inherting: Rights - , fdflags: FDFlags - , opened_fd: &FileDescriptor - ) -> Errno - #foreign WASI_VERSION "path_open" --- +path_open :: (fd: FileDescriptor, + dirflags: LookupFlags, + path: str, + oflags: OFlags, + fs_rights_base: Rights, + fs_rights_inherting: Rights, + fdflags: FDFlags, + opened_fd: &FileDescriptor, + ) -> Errno #foreign WASI_VERSION "path_open" --- path_readlink :: (fd: FileDescriptor, path: str, buf: &u8, buf_len: Size, bufused: &Size) -> Errno #foreign WASI_VERSION "path_readlink" --- path_remove_directory :: (fd: FileDescriptor, path: str) -> Errno #foreign WASI_VERSION "path_remove_directory" --- diff --git a/core/runtime/platform/wasi/wasi_fs.onyx b/core/runtime/platform/wasi/wasi_fs.onyx index 487d11f00..6674fbc93 100644 --- a/core/runtime/platform/wasi/wasi_fs.onyx +++ b/core/runtime/platform/wasi/wasi_fs.onyx @@ -26,80 +26,54 @@ FileData :: struct { __file_open :: (path: str, mode := os.OpenMode.Read) -> (FileData, os.FileError) { // Requesting all of the rights because why not. - rights := - Rights.DataSync - | Rights.Read - | Rights.Seek - | Rights.FdStatSetFlags - | Rights.Sync - | Rights.Tell - | Rights.Write - | Rights.Advise - | Rights.Allocate - | Rights.PathCreateDirectory - | Rights.PathCreateFile - | Rights.PathLinkSource - | Rights.PathLinkTarget - | Rights.PathOpen - | Rights.ReadDir - | Rights.PathReadlink - | Rights.PathRenameSource - | Rights.PathRenameTarget - | Rights.PathFilestatGet - | Rights.PathFilestateSetSize - | Rights.PathFilestateSetTimes - | Rights.FilestatGet - | Rights.FilestatSetSize - | Rights.FilestatSetTimes - | Rights.PathSymlink - | Rights.PathRemoveDirectory - | Rights.PathUnlinkFile - | Rights.PollFDReadWrite; - - open_flags := cast(OFlags) 0; - fd_flags := FDFlags.Sync; + rights := wasi.Rights.{} + open_flags := cast(OFlags) 0 + fd_flags := cast(FDFlags) 0 switch mode { + case .Append { + open_flags |= OFlags.Creat; + } + case .Write { open_flags |= OFlags.Creat | OFlags.Trunc; - rights |= Rights.Write | Rights.Seek | Rights.Tell; } + } + switch mode { case .Append { - open_flags |= OFlags.Creat; - rights |= Rights.Write | Rights.Seek | Rights.Tell; - fd_flags |= FDFlags.Append; + fd_flags |= FDFlags.Append; + fallthrough; + } + + case .Write { + rights |= Rights.Write | Rights.Seek | Rights.Tell; + rights |= Rights.DataSync | Rights.FdStatSetFlags; + rights |= Rights.Sync | Rights.Allocate | Rights.Advise; + rights |= Rights.FilestatSetSize | Rights.FilestatSetTimes; } case .Read { - rights |= Rights.Read | Rights.Seek | Rights.Tell; + rights |= Rights.Read | Rights.Seek | Rights.Tell | Rights.FilestatGet; } } - file := FileData.{ fd = -1 }; + file := FileData.{ fd = 0 }; file.mode = mode; file.rights = rights; file.flags = fd_flags; - // Currently the directory's file descriptor appears to always be 3 - // However, this is not necessarily correct, so also try a preopened directory - for DIR_FD in .[ 3, 4 ] { - if err := wasi.path_open( - DIR_FD, - .SymLinkFollow, - path, - open_flags, - rights, - rights, - fd_flags, - &file.fd); - err == .Success { - return file, .None; - } + fd, resolved_path := resolve_fd_and_path(path) + err := wasi.path_open(fd, .{}, resolved_path, open_flags, rights, rights, fd_flags, &file.fd); + + if err == .Success { + return file, .None; } - // @TODO // provide a better error code. - return file, .NotFound; + return .{}, switch err { + // TODO: Return better error statuses. + case _ => .NotFound + } } __file_close :: (file: FileData) -> os.FileError { @@ -114,21 +88,22 @@ __file_stat :: (path: str, out: &os.FileStat) -> bool { fs: wasi.FileStat; exists := false; - for .[3, 4] { // Trying both preopened directories - err := wasi.path_filestat_get(it, .SymLinkFollow, path, &fs); - if err == .Success { - exists = true; - out.size = ~~ fs.size; - out.change_time = fs.ctim / 1000000; - out.accessed_time = fs.atim / 1000000; - out.modified_time = fs.mtim / 1000000; - - switch fs.filetype { - case .RegularFile do out.type = .RegularFile; - case .Directory do out.type = .Directory; - case .SymLink do out.type = .SymLink; - case #default do out.type = .Unknown; - } + + fd, resolved_path := resolve_fd_and_path(path) + + err := wasi.path_filestat_get(fd, .SymLinkFollow, resolved_path, &fs); + if err == .Success { + exists = true; + out.size = ~~ fs.size; + out.change_time = fs.ctim / 1000000; + out.accessed_time = fs.atim / 1000000; + out.modified_time = fs.mtim / 1000000; + + switch fs.filetype { + case .RegularFile do out.type = .RegularFile; + case .Directory do out.type = .Directory; + case .SymLink do out.type = .SymLink; + case _ do out.type = .Unknown; } } @@ -139,30 +114,28 @@ __file_exists :: (path: str) -> bool { fs: wasi.FileStat; exists := false; - for .[3, 4] { // Trying both preopened directories - err := wasi.path_filestat_get(it, .SymLinkFollow, path, &fs); - if err == .Success do exists = true; - } + fd, resolved_path := resolve_fd_and_path(path) + err := wasi.path_filestat_get(fd, .SymLinkFollow, resolved_path, &fs); + if err == .Success do exists = true; return exists; } __file_remove :: (path: str) -> bool { removed := false; - for .[3, 4] { // Trying both preopened directories - err := wasi.path_unlink_file(it, path); - if err == .Success do removed = true; - } + fd, resolved_path := resolve_fd_and_path(path) + err := wasi.path_unlink_file(fd, resolved_path); + if err == .Success do removed = true; return removed; } __file_rename :: (old_path: str, new_path: str) -> bool { renamed := false; - for .[3, 4] { // Trying both preopened directories - err := wasi.path_rename(it, old_path, it, new_path); - if err == .Success do renamed = true; - } + fd, resolved_path := resolve_fd_and_path(old_path) + new_fd, new_resolved_path := resolve_fd_and_path(new_path) + err := wasi.path_rename(fd, old_path, new_fd, new_resolved_path); + if err == .Success do renamed = true; return renamed; } @@ -177,62 +150,62 @@ __file_stream_vtable := io.Stream_Vtable.{ return .None; }, - tell = (use fs: &os.File) -> (io.Error, u32) { + tell = (use fs: &os.File) -> Result(u32, io.Error) { location : wasi.Filesize; error := wasi.fd_tell(data.fd, &location); - if error != .Success do return .BadFile, 0; + if error != .Success do return .{ Err = .BadFile }; - return .None, ~~location; + return .{ Ok = ~~location }; }, - read = (use fs: &os.File, buffer: [] u8) -> (io.Error, u32) { + read = (use fs: &os.File, buffer: [] u8) -> Result(u32, io.Error) { bytes_read : wasi.Size; vec := IOVec.{ buf = cast(u32) buffer.data, len = buffer.count }; error := wasi.fd_read(data.fd, &vec, 1, &bytes_read); - if error != .Success do return .BadFile, 0; + if error != .Success do return .{ Err = .BadFile }; - return .None, bytes_read; + return .{ Ok = bytes_read }; }, - read_at = (use fs: &os.File, at: u32, buffer: [] u8) -> (io.Error, u32) { + read_at = (use fs: &os.File, at: u32, buffer: [] u8) -> Result(u32, io.Error) { bytes_read : wasi.Size; vec := IOVec.{ buf = cast(u32) buffer.data, len = buffer.count }; error := wasi.fd_pread(data.fd, &vec, 1, ~~at, &bytes_read); // FIX: Maybe report io.Error.OutOfBounds if the 'at' was out of bounds? - if error != .Success do return .BadFile, 0; + if error != .Success do return .{ Err = .BadFile }; - return .None, bytes_read; + return .{ Ok = bytes_read }; }, - read_byte = (use fs: &os.File) -> (io.Error, u8) { + read_byte = (use fs: &os.File) -> Result(u8, io.Error) { bytes_read : wasi.Size; byte : u8; vec := IOVec.{ buf = cast(u32) &byte, len = 1}; error := wasi.fd_read(data.fd, &vec, 1, &bytes_read); - if error != .Success do return .BadFile, 0; + if error != .Success do return .{ Err = .BadFile }; - return .None, byte; + return .{ Ok = byte }; }, - write = (use fs: &os.File, buffer: [] u8) -> (io.Error, u32) { + write = (use fs: &os.File, buffer: [] u8) -> Result(u32, io.Error) { bytes_written : wasi.Size; vec := IOVec.{ buf = cast(u32) buffer.data, len = buffer.count }; error := wasi.fd_write(data.fd, &vec, 1, &bytes_written); - if error != .Success do return .BadFile, 0; + if error != .Success do return .{ Err = .BadFile }; - return .None, bytes_written; + return .{ Ok = bytes_written }; }, - write_at = (use fs: &os.File, at: u32, buffer: [] u8) -> (io.Error, u32) { + write_at = (use fs: &os.File, at: u32, buffer: [] u8) -> Result(u32, io.Error) { bytes_written : wasi.Size; vec := IOVec.{ buf = cast(u32) buffer.data, len = buffer.count }; error := wasi.fd_pwrite(data.fd, &vec, 1, ~~at, &bytes_written); // FIX: Maybe report io.Error.OutOfBounds if the 'at' was out of bounds? - if error != .Success do return .BadFile, 0; + if error != .Success do return .{ Err = .BadFile }; - return .None, bytes_written; + return .{ Ok = bytes_written }; }, write_byte = (use fs: &os.File, byte: u8) -> io.Error { @@ -262,7 +235,7 @@ __file_stream_vtable := io.Stream_Vtable.{ return ~~ file_stat.size; }, - poll = (use fs: &os.File, ev: io.PollEvent, timeout: i32) -> (io.Error, bool) { + poll = (use fs: &os.File, ev: io.PollEvent, timeout: i32) -> Result(bool, io.Error) { p: [1] PollDescription = .[.{ fd = data.fd, in_events = ev, @@ -270,7 +243,7 @@ __file_stream_vtable := io.Stream_Vtable.{ runtime.platform.__poll(p, timeout); - return .None, p[0].out_events == ev; + return .{ Ok = p[0].out_events == ev }; } } @@ -285,22 +258,20 @@ DirectoryData :: &WasiDirectory; __dir_open :: (path: str, dir: &DirectoryData) -> bool { dir_fd: FileDescriptor; - DIR_PERMS := Rights.PathOpen | .ReadDir | .PathReadlink | .FilestatGet | .PathFilestatGet; - FILE_PERMS := Rights.Read | .Seek | .Tell | .FilestatGet | .PollFDReadWrite; + DIR_PERMS := Rights.PathOpen | .ReadDir | .PathReadlink | .FilestatGet; - for .[3, 4] { - err := wasi.path_open(it, .SymLinkFollow, path, .Directory, DIR_PERMS, FILE_PERMS, .Sync, &dir_fd); - if err != .Success do continue; - - d := new(WasiDirectory); - d.dir_fd = dir_fd; - d.last_cookie = 0; - - *dir = d; - return true; + fd, resolved_path := resolve_fd_and_path(path) + err := wasi.path_open(fd, .SymLinkFollow, resolved_path, .Directory, DIR_PERMS, .{}, .{}, &dir_fd); + if err != .Success { + return false; } - return false; + d := new(WasiDirectory); + d.dir_fd = dir_fd; + d.last_cookie = 0; + + *dir = d; + return true; } __dir_close :: (dir: DirectoryData) { @@ -323,7 +294,7 @@ __dir_read :: (dir: DirectoryData, out_entry: &os.DirectoryEntry) -> bool { case .Directory do out_entry.type = .Directory; case .RegularFile do out_entry.type = .RegularFile; case .SymLink do out_entry.type = .SymLink; - case #default do out_entry.type = .Other; + case _ do out_entry.type = .Other; } out_entry.identifier = ~~dirent.d_ino; @@ -337,25 +308,93 @@ __dir_read :: (dir: DirectoryData, out_entry: &os.DirectoryEntry) -> bool { __dir_create :: (path: str) -> bool { created := false; - for .[3, 4] { // Trying both preopened directories - err := wasi.path_create_directory(it, path); - if err == .Success do created = true; - } + fd, resolved_path := resolve_fd_and_path(path) + err := wasi.path_create_directory(fd, resolved_path); + if err == .Success do created = true; return created; } __dir_remove :: (path: str) -> bool { removed := false; - for .[3, 4] { // Trying both preopened directories - err := wasi.path_remove_directory(it, path); - if err == .Success do removed = true; - } + fd, resolved_path := resolve_fd_and_path(path) + err := wasi.path_remove_directory(fd, resolved_path); + if err == .Success do removed = true; return removed; } +#local { + preopened_dirs: [..] Pair(u32, str) + + #init () { + names := make([..] Pair(u32, str)) + + while true { + fd := names.length + 3 + prestat: wasi.PrestatTagged + switch wasi.fd_prestat_get(fd, &prestat) { + case .Success {} + case .NotSupported, .BadFile { + preopened_dirs = names + return + } + case _ { + log(.Error, "Error loading pre-opened directories") + return + } + } + + name_len := prestat.u.dir.pr_name_len + name := make([] u8, name_len) + switch wasi.fd_prestat_dir_name(fd, name) { + case .Success {} + case _ { + log(.Error, "Error loading pre-opened directories") + return + } + } + + // WASI preopens are not suppose to have a null-terminator, + // but some implementation do anyway, *and* report it in + // name length + while name[name.length - 1] == '\0' { + name.length -= 1 + } + + names->push(.{fd, name}) + } + } + + resolve_fd_and_path :: (path: str) -> (fd: u32, out: str) { + for preopened_dirs { + if path->starts_with(it.second) { + fd = it.first + + len := it.second.length + if it.second != "/" { + len += 1 + } + + out = str.advance(path, len) + + if out->empty() do out = "." + return + } + } + + fd = 3 + out = path + if out->empty() { + out = "." + } + + return + } +} + + #if !#defined(runtime.vars.WASIX) { __chdir :: (path: str) => false __getcwd :: () => "" diff --git a/core/runtime/platform/wasi/wasix_defs.onyx b/core/runtime/platform/wasi/wasix_defs.onyx index e839238e6..4d4d802c2 100644 --- a/core/runtime/platform/wasi/wasix_defs.onyx +++ b/core/runtime/platform/wasi/wasix_defs.onyx @@ -175,5 +175,5 @@ SockOption :: enum { sock_recv_from :: (fd: FileDescriptor, data: &IOVec, data_len: Size, flags: RIFlags, written: &Size, oflags: &ROFlags, addr: &AddrPort) -> Errno --- sock_send_to :: (fd: FileDescriptor, data: &IOVec, data_len: Size, flags: SIFlags, addr: &AddrPort, sent: &Size) -> Errno --- - resolve :: (host: cstr, port: u16, addr: [&] Addr, addr_len: Size, retaddrs: &Size) -> Errno --- -} \ No newline at end of file + resolve :: (host: str, port: u16, addr: [&] Addr, addr_len: Size, retaddrs: &Size) -> Errno --- +} diff --git a/core/runtime/platform/wasi/wasix_net.onyx b/core/runtime/platform/wasi/wasix_net.onyx index 230a1a7d5..40afa67dc 100644 --- a/core/runtime/platform/wasi/wasix_net.onyx +++ b/core/runtime/platform/wasi/wasix_net.onyx @@ -8,7 +8,8 @@ use core.net { SocketOption, SocketAddress, SocketShutdown, - SocketStatus + SocketStatus, + ResolveResult } use core {Result, string, io} @@ -54,7 +55,7 @@ __net_sock_opt_flag :: (s: SocketData, sockopt: SocketOption, flag: bool) -> boo opt := switch sockopt { case .Broadcast => wasi.SockOption.Broadcast; case .ReuseAddress => wasi.SockOption.ReuseAddr; - case #default => wasi.SockOption.Noop; + case _ => wasi.SockOption.Noop; }; return wasi.sock_set_opt_flag(s, opt, flag) == .Success; } @@ -84,7 +85,7 @@ __net_sock_accept :: (s: SocketData, out: &SocketAddress) -> Result(SocketData, switch wasi.sock_accept_v2(s, 0, &new_sock, &addr) { case .Success --- case .Again do return .{ Err = .NoData }; - case #default do return .{ Err = .OperationFailed }; + case _ do return .{ Err = .OperationFailed }; } wasi_addr_to_socket_address(&addr, out); @@ -100,10 +101,9 @@ __net_sock_connect :: (s: SocketData, addr: &SocketAddress) -> io.Error { case .Success => .None; case .ConnRefused, .ConnAborted, - .ConnReset - => .ConnectFailed; + .ConnReset => .ConnectFailed; - case #default => .OperationFailed; + case _ => .OperationFailed; }; } @@ -119,7 +119,7 @@ __net_sock_recv_from :: (s: SocketData, buf: [] u8, out: &SocketAddress) -> Resu return Result(i32, io.Error).{ Ok = out_len }; }; case .Again => .{ Err = .NoData }; - case #default => .{ Err = .EOF }; + case _ => .{ Err = .EOF }; }; } @@ -132,7 +132,7 @@ __net_sock_send_to :: (s: SocketData, buf: [] u8, addr: &SocketAddress) -> Resul return switch wasi.sock_send_to(s, &vec, 1, 0, &target, &out_len) { case .Success => Result(i32, io.Error).{ Ok = out_len }; case .Again => Result(i32, io.Error).{ Err = .NoData }; - case #default => Result(i32, io.Error).{ Err = .EOF }; + case _ => Result(i32, io.Error).{ Err = .EOF }; }; } @@ -144,7 +144,7 @@ __net_sock_recv :: (s: SocketData, buf: [] u8) -> Result(i32, io.Error) { return switch wasi.sock_recv(s, &vec, 1, 0, &out_len, &out_flags) { case .Success => Result(i32, io.Error).{ Ok = out_len }; case .Again => Result(i32, io.Error).{ Err = .NoData }; - case #default => Result(i32, io.Error).{ Err = .EOF }; + case _ => Result(i32, io.Error).{ Err = .EOF }; }; } @@ -155,7 +155,7 @@ __net_sock_send :: (s: SocketData, buf: [] u8) -> Result(i32, io.Error) { return switch wasi.sock_send(s, &vec, 1, 0, &out_len) { case .Success => Result(i32, io.Error).{ Ok = out_len }; case .Again => Result(i32, io.Error).{ Err = .NoData }; - case #default => Result(i32, io.Error).{ Err = .EOF }; + case _ => Result(i32, io.Error).{ Err = .EOF }; }; } @@ -168,7 +168,7 @@ __net_sock_shutdown :: (s: SocketData, how: SocketShutdown) -> io.Error { return switch wasi.sock_shutdown(s, sd) { case .Success => .None; - case #default => .OperationFailed; + case _ => .OperationFailed; }; } @@ -176,7 +176,28 @@ __net_sock_close :: (s: SocketData) -> void { wasi.fd_close(s); } -__net_resolve :: (host: str, port: u16, out_addrs: [] SocketAddress) -> i32 { +__net_resolve :: (host: str, port: u16, out_addrs: [] ResolveResult) -> i32 { + use addrbuf := make([] wasi.Addr, out_addrs.count); + + results := 0; + err := wasi.resolve(host, port, addrbuf.data, addrbuf.count, &results); + if err != .Success do return 0; + + for &in_addr, index in addrbuf[0..results] { + out_addr: ResolveResult; + out_addr.family = switch in_addr { + case .Ipv4 => .Inet + case .Ipv6 => .Inet6 + case .Unix => .Unix + case _ => .Unknown + }; + + wasi_addr_no_port_to_socket_address(in_addr, port, &out_addr.addr); + + out_addrs[index] = out_addr; + } + + return results; } @@ -218,4 +239,23 @@ __net_resolve :: (host: str, port: u16, out_addrs: [] SocketAddress) -> i32 { } } } + + wasi_addr_no_port_to_socket_address :: (input: &wasi.Addr, port: u16, out: &SocketAddress) { + switch *input { + case .Unspec --- + + case .Ipv4 as &inet { + *out = .{ Inet = .{ port, *cast(&u32) inet } }; + } + + case .Ipv6 as &inet { + *out = .{ Inet6 = .{ port, *cast(&[16] u8) inet } }; + } + + case .Unix as &unix { + p: [256] u8 = *cast(&[256] u8) unix; + *out = .{ Unix = p }; + } + } + } } diff --git a/core/string/char_utils.onyx b/core/string/char_utils.onyx index d6b3c2cfb..3e11b198d 100644 --- a/core/string/char_utils.onyx +++ b/core/string/char_utils.onyx @@ -1,40 +1,38 @@ package core.string -#inject u8 { - is_alpha :: (c: u8) -> bool { - return (c >= #char "A" && c <= #char "Z") || - (c >= #char "a" && c <= #char "z"); - } - - is_num :: (c: u8) -> bool { - return (c >= #char "0" && c <= #char "9"); - } - - is_lower :: (c: u8) -> bool { - return (c >= #char "a" && c <= #char "z"); - } - - is_upper :: (c: u8) -> bool { - return (c >= #char "A" && c <= #char "Z"); - } - - is_alphanum :: (c: u8) -> bool { - return c->is_alpha() || c->is_num(); - } - - is_whitespace :: (c: u8) -> bool { - return c == #char " " || c == #char "\n" || c == #char "\t" || c == #char "\v"; - } - - to_upper :: (c: u8) -> u8 { - if c >= 'a' && c <= 'z' do return c - 32; - return c; - } - - to_lower :: (c: u8) -> u8 { - if c >= 'A' && c <= 'Z' do return c + 32; - return c; - } +u8.is_alpha :: (c: u8) -> bool { + return (c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z'); +} + +u8.is_num :: (c: u8) -> bool { + return (c >= '0' && c <= '9'); +} + +u8.is_lower :: (c: u8) -> bool { + return (c >= 'a' && c <= 'z'); +} + +u8.is_upper :: (c: u8) -> bool { + return (c >= 'A' && c <= 'Z'); +} + +u8.is_alphanum :: (c: u8) -> bool { + return c->is_alpha() || c->is_num(); +} + +u8.is_whitespace :: (c: u8) -> bool { + return c == ' ' || c == '\n' || c == '\t' || c == '\v'; +} + +u8.to_upper :: (c: u8) -> u8 { + if c >= 'a' && c <= 'z' do return c - 32; + return c; +} + +u8.to_lower :: (c: u8) -> u8 { + if c >= 'A' && c <= 'Z' do return c + 32; + return c; } diff --git a/core/string/string.onyx b/core/string/string.onyx index 565f8f4b5..d583f0854 100644 --- a/core/string/string.onyx +++ b/core/string/string.onyx @@ -2,8 +2,9 @@ package core.string use core {package, *} -#doc "Generic procedure for turning something into a string." -as_str :: #match -> str {} + +/// Generic procedure for turning something into a string. +str.as_str :: #match -> str {} #local HasAsStrMethod :: interface (T: type_expr) { t as T; @@ -12,55 +13,53 @@ as_str :: #match -> str {} } #overload #order 10000 -as_str :: macro (t: $T/HasAsStrMethod) -> str { +str.as_str :: macro (t: $T/HasAsStrMethod) -> str { return T.as_str(t); } -free :: #match #locked { +str.free :: #match #locked { (s: str, allocator := context.allocator) { raw_free(allocator, s.data); }, - (s: &dyn_str) { core.array.free(s); }, + (s: &dyn_str) { s->free(); }, } -alloc_copy :: (original: str, allocator := context.allocator) -> str { +str.copy :: (original: str, allocator := context.allocator) -> str { if original.count == 0 do return .{}; new_str : str; new_str.data = raw_alloc(allocator, sizeof u8 * original.count); new_str.count = original.count; - copy(original, new_str); + str.copy_into(original, new_str); return new_str; } -temp_copy :: (original: str) -> str { +str.temp_copy :: (original: str) -> str { if original.count == 0 do return .{}; new_str := make([] u8, original.count, allocator=context.temp_allocator); - copy(original, new_str); + str.copy_into(original, new_str); return new_str; } -copy :: (orig: str, dest: str) { +str.copy_into :: (orig: str, dest: str) { len := orig.count; if dest.count < len do len = dest.count; memory.copy(dest.data, orig.data, len); } -#match as_str from_cstr +#overload str.as_str :: from_cstr from_cstr :: (s: cstr) -> str { return .{ data = s, count = length(s) }; } -#doc """ - Converts a `str` into a `cstr` by copying the memory of the string to the stack, - with an additional byte at the end that is set to 0, to correctly for a C-string. - - This only needs to be done when the string does not contain a `\0` byte on the end, - which is most of the time. If you know that the string has a `\0` byte, simply use `s.data`. -""" -to_cstr_on_stack :: macro (s_: str) -> cstr { +/// Converts a `str` into a `cstr` by copying the memory of the string to the stack, +/// with an additional byte at the end that is set to 0, to correctly for a C-string. +/// +/// This only needs to be done when the string does not contain a `\0` byte on the end, +/// which is most of the time. If you know that the string has a `\0` byte, simply use `s.data`. +str.to_cstr_on_stack :: macro (s_: str) -> cstr { use core.alloc use core.memory @@ -73,6 +72,7 @@ to_cstr_on_stack :: macro (s_: str) -> cstr { } +// Does this still need to exist? length :: #match #local {} #overload @@ -82,7 +82,7 @@ length :: (s: str) => s.count; length :: (s: cstr) -> u32 { len := 0; c: [&] u8 = s; - while c[0] != #char "\0" { + while c[0] != '\0' { len += 1; c += 1; } @@ -91,12 +91,12 @@ length :: (s: cstr) -> u32 { } -concat :: #match #local {} +str.concat :: #match #local {} #overload -concat :: (s1: str, s2: str, allocator := context.allocator) -> str { - len1 := length(s1); - len2 := length(s2); +str.concat :: (s1: str, s2: str, allocator := context.allocator) -> str { + len1 := s1.count; + len2 := s2.count; data := cast([&] u8) raw_alloc(allocator, len1 + len2); memory.copy(data, s1.data, len1); @@ -111,7 +111,7 @@ concat :: (s1: str, s2: str, allocator := context.allocator) -> str { // is only due to the languages constraints however. This // could easily be changed since there is no ambiguity. #overload -concat :: (allocator: Allocator, strings: ..str) -> str { +str.concat :: (allocator: Allocator, strings: ..str) -> str { total_length := 0; for s in strings do total_length += s.count; @@ -126,7 +126,7 @@ concat :: (allocator: Allocator, strings: ..str) -> str { } #overload -concat :: (buffer: [] u8, strings: ..str) -> str { +str.concat :: (buffer: [] u8, strings: ..str) -> str { total_copied := 0; for s in strings { // Should never greater than, but better safe than sorry. @@ -141,7 +141,7 @@ concat :: (buffer: [] u8, strings: ..str) -> str { } #overload -concat :: (into: &[..] u8, strings: ..str) -> str { +str.concat :: (into: &dyn_str, strings: ..str) -> str { for s in strings { array.ensure_capacity(into, into.count + s.count); memory.copy(into.data + into.count, s.data, s.count); @@ -151,7 +151,7 @@ concat :: (into: &[..] u8, strings: ..str) -> str { } #overload -concat :: (into: &[..] u8, chars: ..u8) -> str { +str.concat :: (into: &dyn_str, chars: ..u8) -> str { array.ensure_capacity(into, into.count + chars.count); for c in chars { memory.copy(into.data + into.count, cast(rawptr) &.[c], 1); @@ -161,16 +161,16 @@ concat :: (into: &[..] u8, chars: ..u8) -> str { } -contains :: #match #local {} +str.contains :: #match #local {} #overload -contains :: (s: str, c: u8) -> bool { +str.contains :: (s: str, c: u8) -> bool { for ch in s do if ch == c do return true; return false; } #overload -contains :: (s: str, substr: str) -> bool { +str.contains :: (s: str, substr: str) -> bool { while i := 0; i < s.count { defer i += 1; @@ -189,7 +189,7 @@ contains :: (s: str, substr: str) -> bool { } -join :: (strs: [] str, sep: str, allocator := context.allocator) -> str { +str.join :: (strs: [] str, sep: str, allocator := context.allocator) -> str { if strs.count == 0 do return ""; len_sum := array.fold(strs, 0, [v, acc](acc + v.length)); @@ -213,7 +213,7 @@ join :: (strs: [] str, sep: str, allocator := context.allocator) -> str { // @TODO // Check this for edge cases and other bugs. I'm not confident // it will work perfectly yet. - brendanfh 2020/12/21 -compare :: (str1: str, str2: str) -> i32 { +str.compare :: (str1: str, str2: str) -> i32 { i := 0; while i < str1.count && i < str2.count { if str1[i] == str2[i] do i += 1; @@ -224,7 +224,7 @@ compare :: (str1: str, str2: str) -> i32 { return ~~(str1[i] - str2[i]); } -equal :: (str1: str, str2: str) -> bool { +str.equal :: (str1: str, str2: str) -> bool { if str1.count != str2.count do return false; while i := 0; i < str1.count { if str1[i] != str2[i] do return false; @@ -233,7 +233,7 @@ equal :: (str1: str, str2: str) -> bool { return true; } -equal_insensitive :: (s1, s2: str) -> bool { +str.equal_insensitive :: (s1, s2: str) -> bool { if s1.count != s2.count do return false; while i := 0; i < s1.count { defer i += 1; @@ -241,17 +241,17 @@ equal_insensitive :: (s1, s2: str) -> bool { c1 := s1[i]; c2 := s2[i]; - if c1 >= #char "A" && c1 <= #char "Z" do c1 += 32; - if c2 >= #char "A" && c2 <= #char "Z" do c2 += 32; + if c1 >= 'A' && c1 <= 'Z' do c1 += 32; + if c2 >= 'A' && c2 <= 'Z' do c2 += 32; if c1 != c2 do return false; } return true; } -#operator == equal +#operator == str.equal #operator != macro (s1: str, s2: str) => !(s1 == s2); -starts_with :: (s: str, prefix: str) -> bool { +str.starts_with :: (s: str, prefix: str) -> bool { if s.count < prefix.count do return false; while i := 0; i < prefix.count { if s[i] != prefix[i] do return false; @@ -260,7 +260,7 @@ starts_with :: (s: str, prefix: str) -> bool { return true; } -ends_with :: (s: str, suffix: str) -> bool { +str.ends_with :: (s: str, suffix: str) -> bool { if s.count < suffix.count do return false; while i := 0; i < suffix.count { if s[s.count - 1 - i] != suffix[suffix.count - 1 - i] do return false; @@ -269,16 +269,16 @@ ends_with :: (s: str, suffix: str) -> bool { return true; } -empty :: (s: str) => s.count == 0 || s.data == null; +str.empty :: (s: str) => s.count == 0 || s.data == null; is_empty :: (s: str) -> bool #deprecated "Use 'string.empty' instead." { - s.count == 0 || s.data == null; + return s.count == 0 || s.data == null; } -index_of :: #match #local {} +str.index_of :: #match #local {} #overload -index_of :: (s: str, c: u8) -> i32 { +str.index_of :: (s: str, c: u8) -> i32 { for s.count { if s[it] == c do return it; } @@ -286,7 +286,7 @@ index_of :: (s: str, c: u8) -> i32 { } #overload -index_of :: (s: str, substr: str) -> i32 { +str.index_of :: (s: str, substr: str) -> i32 { while i := 0; i < s.count { defer i += 1; @@ -304,7 +304,7 @@ index_of :: (s: str, substr: str) -> i32 { return -1; } -last_index_of :: (s: str, c: u8) -> i32 { +str.last_index_of :: (s: str, c: u8) -> i32 { for range.{s.count-1, 0, -1} { if s[it] == c do return it; } @@ -312,64 +312,64 @@ last_index_of :: (s: str, c: u8) -> i32 { } -strip_whitespace :: #match #local {} +str.strip_whitespace :: #match #local {} #overload -strip_whitespace :: (s: &str) { +str.strip_whitespace :: (s: &str) { strip_leading_whitespace(s); strip_trailing_whitespace(s); } #overload -strip_whitespace :: (s: str) => +str.strip_whitespace :: (s: str) => s |> strip_leading_whitespace() |> strip_trailing_whitespace() -strip_leading_whitespace :: #match #local {} +str.strip_leading_whitespace :: #match #local {} #overload -strip_leading_whitespace :: (s: &str) { +str.strip_leading_whitespace :: (s: &str) { while s.count > 0 do switch s.data[0] { - case #char " ", #char "\t", #char "\n", #char "\r" { + case ' ', '\t', '\n', '\r' { s.data += 1; s.count -= 1; } - case #default do return; + case _ do return; } } #overload -strip_leading_whitespace :: (s: str) -> str { +str.strip_leading_whitespace :: (s: str) -> str { out := s; strip_leading_whitespace(&out); return out; } -strip_trailing_whitespace :: #match #local {} +str.strip_trailing_whitespace :: #match #local {} #overload -strip_trailing_whitespace :: (s: &str) { +str.strip_trailing_whitespace :: (s: &str) { while s.count >= 1 do switch s.data[s.count - 1] { - case #char " ", #char "\t", #char "\n", #char "\r" { + case ' ', '\t', '\n', '\r' { s.count -= 1; } - case #default do return; + case _ do return; } } #overload -strip_trailing_whitespace :: (s: str) -> str { +str.strip_trailing_whitespace :: (s: str) -> str { out := s; strip_trailing_whitespace(&out); return out; } -to_uppercase :: (s: str) -> str { +str.to_uppercase :: (s: str) -> str { for& ch in s { - if *ch >= #char "a" && *ch <= #char "z" { + if *ch >= 'a' && *ch <= 'z' { *ch -= 32; } } @@ -377,9 +377,9 @@ to_uppercase :: (s: str) -> str { return s; } -to_lowercase :: (s: str) -> str { +str.to_lowercase :: (s: str) -> str { for& ch in s { - if *ch >= #char "A" && *ch <= #char "Z" { + if *ch >= 'A' && *ch <= 'Z' { *ch += 32; } } @@ -388,10 +388,10 @@ to_lowercase :: (s: str) -> str { } -trim_start :: #match #local {} +str.trim_start :: #match #local {} #overload -trim_start :: (s: &str, char: u8) { +str.trim_start :: (s: &str, char: u8) { while s.data[0] == char { s.data += 1; s.count -= 1; @@ -399,34 +399,34 @@ trim_start :: (s: &str, char: u8) { } #overload -trim_start :: (s: str, char: u8) -> str { +str.trim_start :: (s: str, char: u8) -> str { out := s; trim_start(&out, char); return out; } -trim_end :: #match #local {} +str.trim_end :: #match #local {} #overload -trim_end :: (s: &str, char: u8) { +str.trim_end :: (s: &str, char: u8) { while s.data[s.count - 1] == char { s.count -= 1; } } #overload -trim_end :: (s: str, char: u8) -> str { +str.trim_end :: (s: str, char: u8) -> str { out := s; trim_end(&out, char); return out; } -advance :: #match #local {} +str.advance :: #match #local {} #overload -advance :: (s: &str, chars := 1) { +str.advance :: (s: &str, chars := 1) { chars = math.min(chars, s.count); s.data += chars; @@ -434,7 +434,7 @@ advance :: (s: &str, chars := 1) { } #overload -advance :: (s: str, chars := 1) -> str { +str.advance :: (s: str, chars := 1) -> str { chars = math.min(chars, s.count); out := s; @@ -444,16 +444,16 @@ advance :: (s: str, chars := 1) -> str { return out; } -replace :: (s: str, to_replace: u8, replace_with: u8) { +str.replace :: (s: str, to_replace: u8, replace_with: u8) { for &c in s { if *c == to_replace do *c = replace_with; } } -read_until :: #match #local {} +str.read_until :: #match #local {} #overload -read_until :: (s: &str, upto: u8, skip := 0) -> str { +str.read_until :: (s: &str, upto: u8, skip := 0) -> str { if s.count == 0 do return ""; out : str; @@ -477,7 +477,7 @@ read_until :: (s: &str, upto: u8, skip := 0) -> str { } #overload -read_until :: (s: &str, upto: str, skip := 0) -> str { +str.read_until :: (s: &str, upto: str, skip := 0) -> str { if s.count == 0 do return ""; out := str.{ data = s.data }; @@ -518,7 +518,7 @@ read_until :: (s: &str, upto: str, skip := 0) -> str { return out; } -read_alphanum :: (s: &str) -> str { +str.read_alphanum :: (s: &str) -> str { if s.count == 0 do return ""; out : str; @@ -527,13 +527,13 @@ read_alphanum :: (s: &str) -> str { for ch in *s { switch ch { - case #char "a" .. #char "z", - #char "A" .. #char "Z", - #char "0" .. #char "9" { + case 'a' ..= 'z', + 'A' ..= 'Z', + '0' ..= '9' { out.count += 1; } - case #default { + case _ { break break; } } @@ -545,7 +545,7 @@ read_alphanum :: (s: &str) -> str { return out; } -read_until_any :: (s: &str, skip: u32, uptos: ..u8) -> str { +str.read_until_any :: (s: &str, skip: u32, uptos: ..u8) -> str { if s.count == 0 do return ""; out : str; @@ -570,17 +570,17 @@ read_until_any :: (s: &str, skip: u32, uptos: ..u8) -> str { return out; } -advance_line :: (s: &str) { +str.advance_line :: (s: &str) { if s.count == 0 do return; adv := 0; - while s.data[adv] != #char "\n" && adv <= s.count do adv += 1; + while s.data[adv] != '\n' && adv <= s.count do adv += 1; s.data += adv + 1; s.count -= adv + 1; } -split :: (s: str, delim: u8, allocator := context.allocator) -> []str { +str.split :: (s: str, delim: u8, allocator := context.allocator) -> []str { delim_count := 0; for i in 0 .. s.count do if s[i] == delim do delim_count += 1; @@ -602,38 +602,38 @@ split :: (s: str, delim: u8, allocator := context.allocator) -> []str { return strarr[0 .. delim_count + 1]; } -split_iter :: #match #local {} +str.split_iter :: #match #local {} #overload -split_iter :: (s: str, delim: u8) -> Iterator(str) { +str.split_iter :: (s: str, delim: u8) -> Iterator(str) { return iter.generator( &.{ s = s, delim = delim }, - (ctx: &$T) -> (str, bool) { + (ctx: &$T) -> ? str { if string.empty(ctx.s) { - return "", false; + return .None; } ret: str; ret, ctx.s = bisect(ctx.s, ctx.delim); - return ret, true; + return ret; } ); } #overload -split_iter :: (s: str, delim: str) -> Iterator(str) { +str.split_iter :: (s: str, delim: str) -> Iterator(str) { return iter.generator( &.{ s = s, delim = delim }, - (ctx: &$T) -> (str, bool) { + (ctx: &$T) -> ? str { if string.empty(ctx.s) { - return "", false; + return .None; } ret: str; ret, ctx.s = bisect(ctx.s, ctx.delim); - return ret, true; + return ret; } ); } @@ -645,10 +645,10 @@ split_iter :: (s: str, delim: str) -> Iterator(str) { // character occurs at the very beginning or end of // the string, or if it does not occur at all. // -bisect :: #match #local {} +str.bisect :: #match #local {} #overload -bisect :: (s: str, c: u8) -> (str, str) { +str.bisect :: (s: str, c: u8) -> (str, str) { index := index_of(s, c); if index == -1 { return s, ""; @@ -658,7 +658,7 @@ bisect :: (s: str, c: u8) -> (str, str) { } #overload -bisect :: (s: str, substr: str) -> (str, str) { +str.bisect :: (s: str, substr: str) -> (str, str) { index := index_of(s, substr); if index == -1 { return s, ""; @@ -667,21 +667,46 @@ bisect :: (s: str, substr: str) -> (str, str) { return s[0 .. index], s[index+substr.length .. s.length]; } + +str.tokenize :: (s: str, charset: [] u8) => { + Token :: struct { + text: str + is_separator: bool + } + + next :: (use _: &$C) -> ? Token { + if start >= s.length { + return .None + } + + i := start + is_separator := Slice.some(charset, [a](a == s[i])) + while i < s.length { + c := s[i] + sep := Slice.some(charset, [a](a == c)) + + if sep != is_separator do break + + i += 1 + } + + defer start = i + return Token.{ s[start .. i], is_separator } + } + + return Iterator.generator(&.{ s = s, start = 0, charset = charset }, next) +} + // // Used by dyn_str // -to_dyn_str :: (x: str, allocator := context.allocator) -> dyn_str { +str.to_dyn_str :: (x: str, allocator := context.allocator) -> dyn_str { use core.array return array.make(x, allocator); } -delete :: macro (x: &dyn_str, idx: u32) -> u8 { - use core.array - return array.delete(x, idx); -} - -append :: #match { +dyn_str.append :: #match { macro (x: &dyn_str, other: str) { use core.array array.concat(x, other); @@ -693,24 +718,56 @@ append :: #match { }, } -clear :: macro (x: &dyn_str) { - use core.array - array.clear(x); -} - -retreat :: macro (x: &dyn_str, chars := 1) { +dyn_str.retreat :: macro (x: &dyn_str, chars := 1) { use core.array array.pop(x, chars); } -insert :: #match #locked { - macro (x: &dyn_str, idx: u32, new_str: str) -> bool { - use core.array - return array.insert(x, idx, new_str); - }, - macro (x: &dyn_str, idx: u32, ch: u8) -> bool { - use core.array - return array.insert(x, idx, ch); - } -} +// These definitions exist to allow you to still say `string.XYZ`, instead of `str.XYZ` +// That being said, `str.XYZ` should be the preferred way for the future. + +as_str :: str.as_str +free :: str.free +copy :: str.copy +temp_copy :: str.temp_copy +copy_into :: str.copy_into +to_cstr_on_stack :: str.to_cstr_on_stack +concat :: str.concat +join :: str.join +compare :: str.compare +contains :: str.contains +equal :: str.equal +equal_insensitive :: str.equal_insensitive +starts_with :: str.starts_with +ends_with :: str.ends_with +empty :: str.empty +index_of :: str.index_of +last_index_of :: str.last_index_of +strip_whitespace :: str.strip_whitespace +strip_leading_whitespace :: str.strip_leading_whitespace +strip_trailing_whitespace :: str.strip_trailing_whitespace +to_lowercase :: str.to_lowercase +to_uppercase :: str.to_uppercase +trim_start :: str.trim_start +trim_end :: str.trim_end +advance :: str.advance +replace :: str.replace +read_until :: str.read_until +read_alphanum :: str.read_alphanum +read_until_any :: str.read_until_any +advance_line :: str.advance_line +split :: str.split +split_iter :: str.split_iter +bisect :: str.bisect + +to_dyn_str :: str.to_dyn_str +delete :: dyn_str.delete +append :: dyn_str.append +clear :: dyn_str.clear +retreat :: dyn_str.retreat +insert :: dyn_str.insert + +// DEPRECATED: Use `string.copy` instead +alloc_copy :: str.copy + diff --git a/core/string/string_pool.onyx b/core/string/string_pool.onyx index 3822ed315..b7f7eb13f 100644 --- a/core/string/string_pool.onyx +++ b/core/string/string_pool.onyx @@ -4,39 +4,35 @@ use core.alloc use core.alloc.arena use core.memory -// -// Many times, storing strings is annoying because you need -// to keep the data alive, while moving pointers around and -// changing them. -// -// To remedy this, a StringPool is a simple wrapper around -// an arena allocator that enables you to quickly copy a -// string to the pool. From there, you can use the string -// until the pool is cleared or freed. -// +/// +/// Many times, storing strings is annoying because you need +/// to keep the data alive, while moving pointers around and +/// changing them. +/// +/// To remedy this, a StringPool is a simple wrapper around +/// an arena allocator that enables you to quickly copy a +/// string to the pool. From there, you can use the string +/// until the pool is cleared or freed. +/// StringPool :: struct { arena: arena.Arena; } -#inject StringPool { - add :: pool_add; - flush :: pool_flush; - free :: pool_free; -} +StringPool.make :: pool_make +StringPool.add :: pool_add; +StringPool.flush :: pool_flush; +StringPool.free :: pool_free; -// -// Creates a StringPool capable of storing a string of at -// most `maximum_string_length` bytes. +/// +/// Creates a StringPool. pool_make :: (maximum_string_length := 16384, allocator := context.allocator) => StringPool.{ arena.make(allocator, maximum_string_length) } -// -// Copies a string into the pool, returning the copied string. +/// +/// Copies a string into the pool, returning the copied string. pool_add :: (sp: &StringPool, s: str) -> str { - if s.count > sp.arena.arena_size do return ""; - allocator := alloc.as_allocator(&sp.arena); new_str := make(str, s.count, allocator); @@ -44,14 +40,14 @@ pool_add :: (sp: &StringPool, s: str) -> str { return new_str; } -// -// Clears all entries in the pool. +/// +/// Clears all entries in the pool. pool_flush :: (sp: &StringPool) { arena.clear(&sp.arena); } -// -// Completely frees all memory in the pool. +/// +/// Completely frees all memory in the pool. pool_free :: (sp: &StringPool) { arena.free(&sp.arena); } diff --git a/core/sync/barrier.onyx b/core/sync/barrier.onyx index d64115fc9..77d42cd84 100644 --- a/core/sync/barrier.onyx +++ b/core/sync/barrier.onyx @@ -13,10 +13,8 @@ package core.sync // continue processing. // -#doc """ - Represents a generational barrier, so the same barrier - can be used safely multiple times. -""" +/// Represents a generational barrier, so the same barrier +/// can be used safely multiple times. Barrier :: struct { mutex : Mutex; cond : Condition_Variable; @@ -27,7 +25,7 @@ Barrier :: struct { } -#doc "Initializes a new generational barrier with `thread_count` threads." +/// Initializes a new generational barrier with `thread_count` threads. barrier_init :: (b: &Barrier, thread_count: i32) { mutex_init(&b.mutex); condition_init(&b.cond); @@ -38,16 +36,14 @@ barrier_init :: (b: &Barrier, thread_count: i32) { } -#doc "Destroys a generational barrier." +/// Destroys a generational barrier. barrier_destroy :: (b: &Barrier) { mutex_destroy(&b.mutex); condition_destroy(&b.cond); } -#doc """ - Signals that a thread has reached the barrier. - The last thread to reach the barrier will wake up all other threads. -""" +/// Signals that a thread has reached the barrier. +/// The last thread to reach the barrier will wake up all other threads. barrier_wait :: (b: &Barrier) { mutex_lock(&b.mutex); defer mutex_unlock(&b.mutex); diff --git a/core/sync/channel.onyx b/core/sync/channel.onyx new file mode 100644 index 000000000..5822f89a2 --- /dev/null +++ b/core/sync/channel.onyx @@ -0,0 +1,65 @@ +package core.sync + +use core.iter +use core.array + +Channel :: struct (T: type_expr) { + _buf: [..] T; + _mutex: Mutex; + _condvar: Condition_Variable; + + _is_open: bool; +} + +Channel.make :: ($T: type_expr, allocator := context.allocator) -> (chan: Channel(T)) { + chan._buf = make([..] T, allocator); + mutex_init(&chan._mutex); + condition_init(&chan._condvar); + chan._is_open = true; + return; +} + +Channel.close :: (chan: &Channel) { + chan._is_open = false; + condition_broadcast(&chan._condvar); +} + +Channel.send :: (chan: &Channel, msg: chan.T) { + if !chan._is_open do return; + + critical_section(&chan._mutex) { + array.push(&chan._buf, msg); + condition_broadcast(&chan._condvar); + } +} + +Channel.poll :: (chan: &Channel) -> bool { + return chan._buf.length > 0; +} + +Channel.recv :: (chan: &Channel) -> ? chan.T { + scoped_mutex(&chan._mutex); + + while chan._buf.length == 0 && chan._is_open { + condition_wait(&chan._condvar, &chan._mutex); + } + + if chan._buf.length == 0 { + return .None; + } + + res := chan._buf[0]; + array.delete(&chan._buf, 0); + + return res; +} + +Channel.as_iter :: (chan: &Channel) -> Iterator(chan.T) { + return iter.generator( + &.{chan = chan}, + ctx => { + return ctx.chan->recv(); + } + ); +} + diff --git a/core/sync/condition_variable.onyx b/core/sync/condition_variable.onyx index d53f00c8f..5c7bc12c4 100644 --- a/core/sync/condition_variable.onyx +++ b/core/sync/condition_variable.onyx @@ -2,22 +2,20 @@ package core.sync // TODO: Free the semaphores after they are used. -#doc """ - A condition variable is used to implement a queue of threads - waiting for a condition to be true. Each thread joins the queue - using `condition_wait`. Then, another thread can signal that - the condition has changed and can "wake up" the first thread in - the queue using `condition_signal`. Alternatively, all threads - can be woken up using `condition_broadcast`. - - Condition variables are generally used to prevent spin checking - a condition and waiting for it to change. Instead, the thread - joins a wait-queue, and leave it up to another thread to wake - it up to continue processing. However sadly, in WebAssembly this - is not possible because with the atomic_wait and atomic_notify - instructions, which currently are not supported by any runtime - outside of the browser. -""" +/// A condition variable is used to implement a queue of threads +/// waiting for a condition to be true. Each thread joins the queue +/// using `condition_wait`. Then, another thread can signal that +/// the condition has changed and can "wake up" the first thread in +/// the queue using `condition_signal`. Alternatively, all threads +/// can be woken up using `condition_broadcast`. +/// +/// Condition variables are generally used to prevent spin checking +/// a condition and waiting for it to change. Instead, the thread +/// joins a wait-queue, and leave it up to another thread to wake +/// it up to continue processing. However sadly, in WebAssembly this +/// is not possible because with the atomic_wait and atomic_notify +/// instructions, which currently are not supported by any runtime +/// outside of the browser. Condition_Variable :: struct { Node :: struct { semaphore : Semaphore; @@ -28,24 +26,22 @@ Condition_Variable :: struct { queue: &Node; } -#doc "Initializes a new condition variable." +/// Initializes a new condition variable. condition_init :: (c: &Condition_Variable) { mutex_init(&c.mutex); c.queue = null; } -#doc "Destroys a condition variable." +/// Destroys a condition variable. condition_destroy :: (c: &Condition_Variable) { if c.queue != null do condition_broadcast(c); mutex_destroy(&c.mutex); } -#doc """ - Enters the thread in the wait-queue of the condition variable. - If `m` is not null, the mutex will first be released before - entering the queue, and then relocked before returning. -""" +/// Enters the thread in the wait-queue of the condition variable. +/// If `m` is not null, the mutex will first be released before +/// entering the queue, and then relocked before returning. condition_wait :: (c: &Condition_Variable, m: &Mutex) { node: Condition_Variable.Node; @@ -61,7 +57,7 @@ condition_wait :: (c: &Condition_Variable, m: &Mutex) { } -#doc "Wakes up one thread from the wait-queue." +/// Wakes up one thread from the wait-queue. condition_signal :: (c: &Condition_Variable) { scoped_mutex(&c.mutex); @@ -72,7 +68,7 @@ condition_signal :: (c: &Condition_Variable) { } -#doc "Wakes up all threads from the wait-queue." +/// Wakes up all threads from the wait-queue. condition_broadcast :: (c: &Condition_Variable) { scoped_mutex(&c.mutex); diff --git a/core/sync/mutex.onyx b/core/sync/mutex.onyx index 2c6dfc9be..4e88b5f59 100644 --- a/core/sync/mutex.onyx +++ b/core/sync/mutex.onyx @@ -5,51 +5,47 @@ use core use core.intrinsics.atomics {*} use core.thread { Thread_ID } -#doc """ - A mutex represents a resource that can only be held by one - thread at a time. It is used to create sections of code that - only one thread can be in at a time. - - Mutexes in WebAssembly are very cheap, because they simply - use the atomic_cmpxchg intrinsic to operate. This only uses - memory, so no real resource allocation is necessary. - - `lock` has two states: 0, and 1. - 0 means unlocked, 1 means locked - - To lock it: - Try to store 1 if the value was already 0. - Otherwise, if it was already 1, wait until it goes to 0. - - To unlock it: - Atomically set it to 0. - Notify at most 1 other thread about this change. -""" +/// A mutex represents a resource that can only be held by one +/// thread at a time. It is used to create sections of code that +/// only one thread can be in at a time. +/// +/// Mutexes in WebAssembly are very cheap, because they simply +/// use the atomic_cmpxchg intrinsic to operate. This only uses +/// memory, so no real resource allocation is necessary. +/// +/// `lock` has two states: 0, and 1. +/// 0 means unlocked, 1 means locked +/// +/// To lock it: +/// Try to store 1 if the value was already 0. +/// Otherwise, if it was already 1, wait until it goes to 0. +/// +/// To unlock it: +/// Atomically set it to 0. +/// Notify at most 1 other thread about this change. Mutex :: struct { lock : i32; owner : Thread_ID; } -#doc "Initializes a new mutex." +/// Initializes a new mutex. mutex_init :: (m: &Mutex) { m.lock = 0; m.owner = -1; } -#doc "Destroys a mutex." +/// Destroys a mutex. mutex_destroy :: (m: &Mutex) { m.lock = -1; m.owner = -1; } -#doc """ - Locks a mutex. If the mutex is currently held by another thread, - this function enters a spin loop until the mutex is unlocked. - In a JavaScript based implementation, the __atomic_wait intrinsic - is used to avoid having to spin loop. -""" +/// Locks a mutex. If the mutex is currently held by another thread, +/// this function enters a spin loop until the mutex is unlocked. +/// In a JavaScript based implementation, the __atomic_wait intrinsic +/// is used to avoid having to spin loop. mutex_lock :: (m: &Mutex) { while __atomic_cmpxchg(&m.lock, 0, 1) == 1 { if m.owner == context.thread_id do return; @@ -64,11 +60,9 @@ mutex_lock :: (m: &Mutex) { m.owner = context.thread_id; } -#doc """ - Unlocks a mutex, if the calling thread currently holds the mutex. - In a JavaScript based implementation, the __atomic_notify intrinsic - is used to wake up one waiting thread. -""" +/// Unlocks a mutex, if the calling thread currently holds the mutex. +/// In a JavaScript based implementation, the __atomic_notify intrinsic +/// is used to wake up one waiting thread. mutex_unlock :: (m: &Mutex) { if m.owner != context.thread_id do return; @@ -80,17 +74,15 @@ mutex_unlock :: (m: &Mutex) { } } -#doc """ - Helpful macro for making a particular block be protected by a macro. - - m: sync.Mutx; - sync.mutex_init(&m); - - { - sync.scoped_mutex(&m); - // Everything here is done by one thread at a time. - } -""" +/// Helpful macro for making a particular block be protected by a macro. +/// +/// m: sync.Mutx; +/// sync.mutex_init(&m); +/// +/// { +/// sync.scoped_mutex(&m); +/// // Everything here is done by one thread at a time. +/// } scoped_mutex :: macro (m: &Mutex) { ml :: mutex_lock mu :: mutex_unlock @@ -99,17 +91,15 @@ scoped_mutex :: macro (m: &Mutex) { defer mu(m); } -#doc """ - Abstracts the pattern decribed in scoped_mutex by automatically - calling scoped_mutex in the block of code given. - - m: sync.Mutx; - sync.mutex_init(&m); - - sync.critical_section(&m) { - // Everything here is done by one thread at a time. - } -""" +/// Abstracts the pattern decribed in scoped_mutex by automatically +/// calling scoped_mutex in the block of code given. +/// +/// m: sync.Mutx; +/// sync.mutex_init(&m); +/// +/// sync.critical_section(&m) { +/// // Everything here is done by one thread at a time. +/// } critical_section :: macro (m: &Mutex, body: Code) -> i32 { scoped_mutex :: scoped_mutex; scoped_mutex(m); diff --git a/core/sync/mutex_guard.onyx b/core/sync/mutex_guard.onyx new file mode 100644 index 000000000..dc89e2d19 --- /dev/null +++ b/core/sync/mutex_guard.onyx @@ -0,0 +1,50 @@ +package core.sync + +/// Represents a "guarded" value, i.e. one that is protected by a mutex. +/// +/// The only way to access the value inside is by using the `with` method and +/// passing in a code block that accepts a pointer to the value. This way, +/// there is no way to access the value without locking a mutex. (Unless of +/// course, you store the pointer somewhere else, but then you are just being +/// a bad citizen of the programming language ;) ). +MutexGuard :: struct (T: type_expr) { + _: [(sizeof T) + (sizeof Mutex)] u8; +} + +MutexGuard.make :: #match #local {} + +#overload +MutexGuard.make :: ($T: type_expr) -> MutexGuard(T) { + mg: _MutexGuard(T); + mutex_init(&mg.mutex); + return *cast(&MutexGuard(T)) &mg; +} + +#overload +MutexGuard.make :: (v: $T) -> MutexGuard(T) { + mg: _MutexGuard(T); + mutex_init(&mg.mutex); + mg.value = v; + return *cast(&MutexGuard(T)) &mg; +} + +MutexGuard.with :: macro (__guard: &MutexGuard, body: Code) -> u32 { + _unwrap_mutex_guard :: _unwrap_mutex_guard + + __unwrapped := _unwrap_mutex_guard(__guard); + sync.scoped_mutex(&__unwrapped.mutex); + #unquote body(&__unwrapped.value); + + return 0; +} + + +#local +_MutexGuard :: struct (T: type_expr) { + mutex: Mutex; + value: T; +} + +#local _unwrap_mutex_guard :: macro (m: &MutexGuard($T)) -> &_MutexGuard(T) { + return ~~m; +} diff --git a/core/sync/once.onyx b/core/sync/once.onyx index 1e7119518..ce270edaf 100644 --- a/core/sync/once.onyx +++ b/core/sync/once.onyx @@ -5,15 +5,15 @@ package core.sync // function only once. It is simply a flag with a mutex. // -#doc "Represents something will only happen once." +/// Represents something will only happen once. Once :: struct { done: bool; mutex: Mutex; } -#inject Once.exec :: #match #local {} +Once.exec :: #match #local {} -#doc "Run a function with no arguments once." +/// Run a function with no arguments once. #overload Once.exec :: (o: &Once, f: () -> $R) { scoped_mutex(&o.mutex); @@ -23,7 +23,7 @@ Once.exec :: (o: &Once, f: () -> $R) { f(); } -#doc "Run a function with one argument once." +/// Run a function with one argument once. #overload Once.exec :: (o: &Once, ctx: $Ctx, f: (Ctx) -> $R) { scoped_mutex(&o.mutex); diff --git a/core/sync/semaphore.onyx b/core/sync/semaphore.onyx index d389f57f0..3a60af490 100644 --- a/core/sync/semaphore.onyx +++ b/core/sync/semaphore.onyx @@ -3,29 +3,27 @@ package core.sync use runtime use core -#doc """ - A semaphore represents a counter that can only be incremented - and decremented by one thread at a time. "Waiting" on a semaphore - means decrementing the counter by 1 if it is greater than 0, otherwise - waiting until the counter is incremented. "Posting" on a semaphore - means incrementing the counter by a certain value, in turn releasing - other threads that might have been waiting for the value to change. - - Semaphores are generally used for controlling access to shared - resources. For a contrived example, say only 4 threads can use - a given network connection at a time. A semaphore would be created - with a value of 4. When a thread wants to use the network connection, - it would use `semaphore_wait` to obtain the resource, or wait if - the network is currently available. When it is done using the - network, it would call `semaphore_post` to release the resource, - allowing another thread to use it. -""" +/// A semaphore represents a counter that can only be incremented +/// and decremented by one thread at a time. "Waiting" on a semaphore +/// means decrementing the counter by 1 if it is greater than 0, otherwise +/// waiting until the counter is incremented. "Posting" on a semaphore +/// means incrementing the counter by a certain value, in turn releasing +/// other threads that might have been waiting for the value to change. +/// +/// Semaphores are generally used for controlling access to shared +/// resources. For a contrived example, say only 4 threads can use +/// a given network connection at a time. A semaphore would be created +/// with a value of 4. When a thread wants to use the network connection, +/// it would use `semaphore_wait` to obtain the resource, or wait if +/// the network is currently available. When it is done using the +/// network, it would call `semaphore_post` to release the resource, +/// allowing another thread to use it. Semaphore :: struct { mutex : Mutex; counter : i32; } -#doc "Initializes a semaphore with the specified value." +/// Initializes a semaphore with the specified value. semaphore_init :: (s: &Semaphore, value: i32) { s.counter = value; @@ -33,13 +31,13 @@ semaphore_init :: (s: &Semaphore, value: i32) { } -#doc "Destroys a semaphore." +/// Destroys a semaphore. semaphore_destroy :: (s: &Semaphore) { mutex_destroy(&s.mutex); } -#doc "Increment the counter in a semaphore by `count`." +/// Increment the counter in a semaphore by `count`. semaphore_post :: (s: &Semaphore, count := 1) { if count == 0 do return; @@ -54,7 +52,7 @@ semaphore_post :: (s: &Semaphore, count := 1) { } } -#doc "Waits until the thread is able to decrement one from the semaphore." +/// Waits until the thread is able to decrement one from the semaphore. semaphore_wait :: (s: &Semaphore) { while true { mutex_lock(&s.mutex); diff --git a/core/test/testing.onyx b/core/test/testing.onyx index 24c93dc5e..c683ecc9c 100644 --- a/core/test/testing.onyx +++ b/core/test/testing.onyx @@ -8,32 +8,29 @@ use runtime use core {printf} -#doc """ - Test tag. Use this to mark a function as a test. - - You can either use just the type name: - - @core.test.test - (t: &core.test.T) { - } - - Or you can specify a name using the full struct literal: - - @core.test.test.{"Important test name"} - (t: &core.test.T) { - } -""" +/// Test tag. Use this to mark a function as a test. +/// +/// You can either use just the type name: +/// +/// @core.test.test +/// (t: &core.test.T) { +/// } +/// +/// Or you can specify a name using the full struct literal: +/// +/// @core.test.test.{"Important test name"} +/// (t: &core.test.T) { +/// } test :: struct { name: str; } -#doc "Testing context" +/// Testing context T :: struct { current_test_case: &Test_Case; } -#inject T.assert :: (t: &T, cond: bool, name := "", site := #callsite) { t.current_test_case.assertions << .{ name, cond, site @@ -46,10 +43,8 @@ T.assert :: (t: &T, cond: bool, name := "", site := #callsite) { -#doc """ - Runs all test cases in the provide packages. - If no packages are provided, ALL package tests are run. -""" +/// Runs all test cases in the provide packages. +/// If no packages are provided, ALL package tests are run. run_tests :: (packages: [] package_id = .[], log := true) -> bool { ctx: T; diff --git a/core/threads/thread.onyx b/core/threads/thread.onyx index 079f20572..0ea65fbc9 100644 --- a/core/threads/thread.onyx +++ b/core/threads/thread.onyx @@ -11,25 +11,24 @@ use core.intrinsics.atomics {*} } -#doc "An id of a thread." +/// An id of a thread. Thread_ID :: #type i32 -#doc """ - Represents a thread. Currently, this is very simple; just the id - of the thread and whether or not it is alive. -""" +/// Represents a thread. Currently, this is very simple; just the id +/// of the thread and whether or not it is alive. Thread :: struct { id : Thread_ID; alive : bool; + + stack_base : rawptr + tls_base : rawptr } -#doc """ - Spawns a new thread using the runtime.__spawn_thread function. - The primary job of this function is to create the thread-local - storage and stack for the new thread, and pass those on. - Currently the stack size is not controllable, but that could - be remedied. -""" +/// Spawns a new thread using the runtime.__spawn_thread function. +/// The primary job of this function is to create the thread-local +/// storage and stack for the new thread, and pass those on. +/// Currently the stack size is not controllable, but that could +/// be remedied. spawn :: (t: &Thread, data: &$T, func: (&T) -> void) { sync.scoped_mutex(&thread_mutex); @@ -39,19 +38,18 @@ spawn :: (t: &Thread, data: &$T, func: (&T) -> void) { thread_map->put(t.id, t); - tls_base := raw_alloc(alloc.heap_allocator, __tls_size); - memory.set(tls_base, 0, __tls_size); + t.tls_base = raw_alloc(alloc.heap_allocator, __tls_size); + memory.set(t.tls_base, 0, __tls_size); - stack_base := raw_alloc(alloc.heap_allocator, 1 << 20); + t.stack_base = raw_alloc(alloc.heap_allocator, 1 << 20); + stack_top := memory.ptr_add(t.stack_base, 1 << 20); - runtime.platform.__spawn_thread(t.id, tls_base, stack_base, func, data); + runtime.platform.__spawn_thread(t.id, t.tls_base, stack_top, func, data); } -#doc """ - Waits for a thread to finish before returning. - If the thread was not alive in the first place, - immediately return. -""" +/// Waits for a thread to finish before returning. +/// If the thread was not alive in the first place, +/// immediately return. join :: (t: &Thread) { while t.alive { #if runtime.platform.Supports_Futexes { @@ -63,10 +61,8 @@ join :: (t: &Thread) { } } -#doc """ - Forcefully kill a thread using runtime.__kill_thread. - Does nothing if the thread was not alive. -""" +/// Forcefully kill a thread using runtime.__kill_thread. +/// Does nothing if the thread was not alive. kill :: (t: &Thread) -> i32 { if !t.alive do return -1; @@ -76,29 +72,29 @@ kill :: (t: &Thread) -> i32 { return 1; } -#doc """ - Special procedure that should only be called once globally - that initialize the map of thread ids to thread data. -""" +/// Special procedure that should only be called once globally +/// that initialize the map of thread ids to thread data. __initialize :: () { thread_map->init(); } -#doc """ - Special procedure that is called when a thread exits, - or by kill() above. -""" +/// Special procedure that is called when a thread exits, +/// or by kill() above. __exited :: (id: i32) { - sync.scoped_mutex(&thread_mutex); + sync.scoped_mutex(&thread_mutex) - thread := thread_map->get(id) ?? null; + thread := thread_map->get(id) ?? null if thread != null { - thread.alive = false; + raw_free(alloc.heap_allocator, thread.stack_base) + raw_free(alloc.heap_allocator, thread.tls_base) + + thread.alive = false + #if runtime.platform.Supports_Futexes { - runtime.platform.__futex_wake(&thread.id, 1); + runtime.platform.__futex_wake(&thread.id, 1) } - thread_map->delete(id); + thread_map->delete(id) } } diff --git a/core/time/date.onyx b/core/time/date.onyx index d7d166d61..6e3817d47 100644 --- a/core/time/date.onyx +++ b/core/time/date.onyx @@ -12,50 +12,50 @@ Date :: struct { #local _month_durations := u32.[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; -#inject Date { - month_duration_in_days :: (year: i32, month: i32) => { - d := _month_durations[month]; - - // February leap year case - if month == 1 { - if year % 4 == 0 && (year % 100 != 0 || year % 400 == 0) { - d += 1; - } - } +Date.month_duration_in_days :: (year: i32, month: i32) => { + d := _month_durations[month]; - return d; + // February leap year case + if month == 1 { + if year % 4 == 0 && (year % 100 != 0 || year % 400 == 0) { + d += 1; + } } + return d; +} - make :: (year, month, day: i32) -> Date { - return .{ year, month - 1, day }; - } - today :: () -> Date { - return now()->as_date(); - } +Date.make :: (year, month, day: i32) -> Date { + return .{ year, month - 1, day }; +} - add_months :: (d: Date, months: i32) -> Date { - nd := d; +Date.today :: () -> Date { + return now()->as_date(); +} - nd.month += months; - while nd.month >= 12 { - nd.month -= 12; - nd.year += 1; - } +Date.add_months :: (d: Date, months: i32) -> Date { + nd := d; - while nd.month < 0 { - nd.month += 12; - nd.year -= 1; - } + nd.month += months; + while nd.month >= 12 { + nd.month -= 12; + nd.year += 1; + } - return nd; + while nd.month < 0 { + nd.month += 12; + nd.year -= 1; } - add_days :: (d: Date, days: i32) -> Date { - nd := d; - nd.day += days; + return nd; +} + +Date.add_days :: (d: Date, days: i32) -> Date { + nd := d; + nd.day += days; + if days >= 0 { while true { duration := Date.month_duration_in_days(nd.year, nd.month); if nd.day <= duration { @@ -70,64 +70,74 @@ _month_durations := u32.[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; nd.year += 1; } } + } else { + while nd.day < 1 { + nd.month -= 1 + if nd.month < 0 { + nd.month = 11 + nd.year -= 1 + } - return nd; + duration := Date.month_duration_in_days(nd.year, nd.month) + nd.day += duration + } } - day_of_week :: (d: Date) -> i32 { - if d.year < 1700 do return -1; + return nd; +} - #persist month_key := i32.[ - 0, 3, 3, 6, 1, 4, 6, 2, 5, 0, 3, 5 - ]; +Date.day_of_week :: (d: Date) -> i32 { + if d.year < 1700 do return -1; - #persist century_key := i32.[ - 4, 2, 0, 6, 4, 2, 0 - ]; + #persist month_key := i32.[ + 0, 3, 3, 6, 1, 4, 6, 2, 5, 0, 3, 5 + ]; - dig := d.year % 100; - dig += dig / 4; - dig += month_key[d.month]; - dig += century_key[(d.year / 100) - 17]; - dig += d.day; + #persist century_key := i32.[ + 4, 2, 0, 6, 4, 2, 0 + ]; - if d.year % 4 == 0 && (d.year % 100 != 0 || d.year % 400 == 0) { - if d.month == 0 || d.month == 1 { - dig -= 1; - } - } + dig := d.year % 100; + dig += dig / 4; + dig += month_key[d.month]; + dig += century_key[(d.year / 100) - 17]; + dig += d.day; - return dig % 7; + if d.year % 4 == 0 && (d.year % 100 != 0 || d.year % 400 == 0) { + if d.month == 0 || d.month == 1 { + dig -= 1; + } } - start_of_month :: (d: Date) -> Date { - return .{ - year = d.year, - month = d.month, - day = 1, - }; - } + return dig % 7; +} - end_of_month :: (d: Date) -> Date { - return .{ - year = d.year, - month = d.month, - day = Date.month_duration_in_days(d.year, d.month), - }; - } +Date.start_of_month :: (d: Date) -> Date { + return .{ + year = d.year, + month = d.month, + day = 1, + }; +} - is_before :: (d1, d2: Date) -> bool { - if d1.year != d2.year do return d1.year < d2.year; - if d1.month != d2.month do return d1.month < d2.month; - return d1.day < d2.day; - } +Date.end_of_month :: (d: Date) -> Date { + return .{ + year = d.year, + month = d.month, + day = Date.month_duration_in_days(d.year, d.month), + }; +} - is_after :: (d1, d2: Date) -> bool { - if d1.year != d2.year do return d1.year > d2.year; - if d1.month != d2.month do return d1.month > d2.month; - return d1.day > d2.day; - } +Date.is_before :: (d1, d2: Date) -> bool { + if d1.year != d2.year do return d1.year < d2.year; + if d1.month != d2.month do return d1.month < d2.month; + return d1.day < d2.day; +} +Date.is_after :: (d1, d2: Date) -> bool { + if d1.year != d2.year do return d1.year > d2.year; + if d1.month != d2.month do return d1.month > d2.month; + return d1.day > d2.day; } @conv.Custom_Format_Proc.{ Date } @@ -137,9 +147,9 @@ _month_durations := u32.[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; @conv.Custom_Parse_Proc.{ Date } (d: &Date, text: str, _: Allocator) -> bool { - year, t := string.bisect(text, #char "-"); - month, t~ := string.bisect(t, #char "-"); - day, t~ := string.bisect(t, #char "-"); + year, t := string.bisect(text, '-'); + month, t~ := string.bisect(t, '-'); + day, t~ := string.bisect(t, '-'); d.year = ~~ conv.str_to_i64(year); d.month = ~~ (conv.str_to_i64(month) - 1); diff --git a/core/time/time.onyx b/core/time/time.onyx index 904392838..ccbba11eb 100644 --- a/core/time/time.onyx +++ b/core/time/time.onyx @@ -6,11 +6,9 @@ use core.string use core.conv use runtime -#doc """ - Represents a timestamp broken down by month, day, year, hour, minute, and seconds. - - *This structure does not represent or store timezone information.* -""" +/// Represents a timestamp broken down by month, day, year, hour, minute, and seconds. +/// +/// *This structure does not represent or store timezone information.* Timestamp :: struct #size (sizeof u32 * 12) { sec: i32; min: i32; @@ -23,28 +21,26 @@ Timestamp :: struct #size (sizeof u32 * 12) { isdst: i32; } -#inject Timestamp { - #doc "Converts a Date into a Timestamp." - from_date :: (d: Date) -> Timestamp { - return .{ - year = d.year - 1900, - mday = d.day, - mon = d.month - }; - } +/// Converts a Date into a Timestamp. +Timestamp.from_date :: (d: Date) -> Timestamp { + return .{ + year = d.year - 1900, + mday = d.day, + mon = d.month + }; +} - #doc "Converts the month, day and year fields into a `Date`." - as_date :: (t: Timestamp) -> Date { - return Date.make(t.year + 1900, t.mon + 1, t.mday); - } +/// Converts the month, day and year fields into a `Date`. +Timestamp.as_date :: (t: Timestamp) -> Date { + return Date.make(t.year + 1900, t.mon + 1, t.mday); +} - to_epoch :: to_epoch +Timestamp.to_epoch :: to_epoch - #doc "Formats a timestamp into a string." - format :: (t: Timestamp, format := "%Y-%m-%d %H:%M:%S") -> str { - t_ := t; - return strftime(format, &t_); - } +/// Formats a timestamp into a string. +Timestamp.format :: (t: Timestamp, format := "%Y-%m-%d %H:%M:%S") -> str { + t_ := t; + return strftime(format, &t_); } #tag conv.Custom_Format_Proc.{ Timestamp } @@ -60,9 +56,7 @@ Timestamp :: struct #size (sizeof u32 * 12) { return strptime(data, "%Y-%m-%d %H:%M:%S", time); } -#doc """ - Returns the current system time at UTC-0. -""" +/// Returns the current system time at UTC-0. now :: () -> Timestamp { current_time: i64; #if runtime.platform.Supports_Time { @@ -79,11 +73,9 @@ now :: () -> Timestamp { to_epoch :: tm_to_time -#doc """ - Converts UNIX epoch time to a timestamp, relative to the current timezone. - - *Note, this function is currently not implemented correctly as there is no reliable way to get the current system timezone. It is currently equivalent to `gmtime`* -""" +/// Converts UNIX epoch time to a timestamp, relative to the current timezone. +/// +/// *Note, this function is currently not implemented correctly as there is no reliable way to get the current system timezone. It is currently equivalent to `gmtime`* localtime :: #match #local {} #overload @@ -97,11 +89,9 @@ localtime :: (seconds: u64) -> Timestamp { } -#doc """ - Converts UNIX epoch time to a timestamp, relative to the Greenich mean time. - - *Note, this function is currently not implemented correctly as there is no reliable way to get the current system timezone. It is currently equivalent to `gmtime`* -""" +/// Converts UNIX epoch time to a timestamp, relative to the Greenich mean time. +// +/// *Note, this function is currently not implemented correctly as there is no reliable way to get the current system timezone. It is currently equivalent to `gmtime`* gmtime :: #match #local {} #overload @@ -116,51 +106,49 @@ gmtime :: (seconds: u64) -> Timestamp { strftime :: #match #local {} -#doc """ - Formats a timestamp into a string, using the format specified. - - The follow format specifiers are supported: - - **%A** Day of the week (Sunday, Monday, ...) - - **%a** Short day of the week (Sun, Mon, ...) - - **%B** Month (January, February, ...) - - **%b %h** Short month (Jan, Feb, ...) - - **%d %e** Day of the month (01, ..., 31) - - **%D** Full day (08/31/2023) - - **%H** Hour (00, ..., 23) - - **%I** 12-hour Hour (12, 01, ..., 11, 12) - - **%j** Day of the year (0, ..., 364) - - **%m** Month number (01, 02, ... 12) - - **%M** Minute (00, ..., 59) - - **%p** AM/PM signifier - - **%r** 12-hour time of day (12:15:39 pm) - - **%R** 24-hour time of day (without seconds) (15:37) - - **%S** Seconds (00, ..., 59) - - **%T** 24-hour time of day (15:37:40) - - **%w** Numeric day of week (0, ..., 6) - - **%Y** Year - - **%y** 2-digit year - - **%%** Percent-sign -""" +/// Formats a timestamp into a string, using the format specified. +/// +/// The follow format specifiers are supported: +/// +/// **%A** Day of the week (Sunday, Monday, ...) +/// +/// **%a** Short day of the week (Sun, Mon, ...) +/// +/// **%B** Month (January, February, ...) +/// +/// **%b %h** Short month (Jan, Feb, ...) +/// +/// **%d %e** Day of the month (01, ..., 31) +/// +/// **%D** Full day (08/31/2023) +/// +/// **%H** Hour (00, ..., 23) +/// +/// **%I** 12-hour Hour (12, 01, ..., 11, 12) +/// +/// **%j** Day of the year (0, ..., 364) +/// +/// **%m** Month number (01, 02, ... 12) +/// +/// **%M** Minute (00, ..., 59) +/// +/// **%p** AM/PM signifier +/// +/// **%r** 12-hour time of day (12:15:39 pm) +/// +/// **%R** 24-hour time of day (without seconds) (15:37) +/// +/// **%S** Seconds (00, ..., 59) +/// +/// **%T** 24-hour time of day (15:37:40) +/// +/// **%w** Numeric day of week (0, ..., 6) +/// +/// **%Y** Year +/// +/// **%y** 2-digit year +/// +/// **%%** Percent-sign #overload strftime :: (format_: [] u8, tm: &Timestamp) -> str { s := io.buffer_stream_make(); @@ -168,7 +156,7 @@ strftime :: (format_: [] u8, tm: &Timestamp) -> str { strftime(&w, format_, tm); - return string.as_str(&s); + return str.as_str(&s); } #overload @@ -178,7 +166,7 @@ strftime :: (buf: [] u8, format_: [] u8, tm: &Timestamp) -> str { strftime(&w, format_, tm); - return string.as_str(&s); + return str.as_str(&s); } #local weekdays := str.[ "sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday" ]; @@ -240,7 +228,7 @@ strftime :: (w: &io.Writer, format_: [] u8, tm: &Timestamp) { } } - case #default { + case _ { io.write(w, c); } } @@ -257,9 +245,7 @@ strftime :: (w: &io.Writer, format_: [] u8, tm: &Timestamp) { strptime :: #match #local {} -#doc """ - Parses a string into a `Timestamp`. -""" +/// Parses a string into a `Timestamp`. #overload strptime :: (buf: [] u8, format: [] u8) -> ? Timestamp { t: Timestamp; @@ -271,9 +257,7 @@ strptime :: (buf: [] u8, format: [] u8) -> ? Timestamp { return .None; } -#doc """ - Parses a string into a `Timestamp`. Returns `true` if the parsing was successful. -""" +/// Parses a string into a `Timestamp`. Returns `true` if the parsing was successful. #overload strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { use core {*} @@ -285,10 +269,10 @@ strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { while working && buf.length > 0 && format.length > 0 { c := format[0]; switch c { - case #char "%" { + case '%' { string.advance(&format); switch format[0] { - case #char "a", #char "A" { + case 'a', 'A' { for i in weekdays.count { w := weekdays[i]; if string.equal_insensitive(w, buf[0 .. w.length]) { @@ -307,7 +291,7 @@ strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { working = false; } - case #char "b", #char "B", #char "h" { + case 'b', 'B', 'h' { for i in monthnames.count { m := monthnames[i]; if string.equal_insensitive(m, buf[0 .. m.length]) { @@ -326,20 +310,20 @@ strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { working = false; } - case #char "d", #char "e" { + case 'd', 'e' { working = parse_number_and_advance(&buf, &tm.mday, 1, 31, 0); } - case #char "D" { + case 'D' { working = parse_number_and_advance(&buf, &tm.mon, 1, 12, -1); if !working do break; - if buf[0] == #char "/" { + if buf[0] == '/' { string.advance(&buf); working = parse_number_and_advance(&buf, &tm.mday, 1, 31, 0); if !working do break; - if buf[0] == #char "/" { + if buf[0] == '/' { string.advance(&buf); working = parse_number_and_advance(&buf, &tm.year, 0, 99, 0); if working && tm.year < 69 { @@ -349,14 +333,14 @@ strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { } } - case #char "H" do working = parse_number_and_advance(&buf, &tm.hour, 0, 23, 0); - case #char "I" do working = parse_number_and_advance(&buf, &tm.hour, 1, 12, 0); - case #char "j" do working = parse_number_and_advance(&buf, &tm.yday, 1, 366, -1); - case #char "m" do working = parse_number_and_advance(&buf, &tm.mon, 1, 12, -1); - case #char "M" do working = parse_number_and_advance(&buf, &tm.min, 0, 59, 0); - case #char "n", #char "t" do string.strip_leading_whitespace(&buf); + case 'H' do working = parse_number_and_advance(&buf, &tm.hour, 0, 23, 0); + case 'I' do working = parse_number_and_advance(&buf, &tm.hour, 1, 12, 0); + case 'j' do working = parse_number_and_advance(&buf, &tm.yday, 1, 366, -1); + case 'm' do working = parse_number_and_advance(&buf, &tm.mon, 1, 12, -1); + case 'M' do working = parse_number_and_advance(&buf, &tm.min, 0, 59, 0); + case 'n', 't' do string.strip_leading_whitespace(&buf); - case #char "p" { + case 'p' { if string.equal_insensitive(buf[0 .. 2], "am") { if tm.hour == 12 do tm.hour = 0; string.advance(&buf, 2); @@ -370,17 +354,17 @@ strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { } } - case #char "r" { + case 'r' { working = parse_number_and_advance(&buf, &tm.hour, 1, 12, 0); if !working do break; - if buf[0] == #char ":" { + if buf[0] == ':' { string.advance(&buf); working = parse_number_and_advance(&buf, &tm.min, 0, 59, 0); if !working do break; - if buf[0] == #char ":" { + if buf[0] == ':' { string.advance(&buf); working = parse_number_and_advance(&buf, &tm.sec, 0, 59, 0); @@ -403,30 +387,30 @@ strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { } } - case #char "R" { + case 'R' { working = parse_number_and_advance(&buf, &tm.hour, 1, 12, 0); if !working do break; - if buf[0] == #char ":" { + if buf[0] == ':' { string.advance(&buf); working = parse_number_and_advance(&buf, &tm.min, 0, 59, 0); } } - case #char "S" do working = parse_number_and_advance(&buf, &tm.sec, 0, 59, 0); + case 'S' do working = parse_number_and_advance(&buf, &tm.sec, 0, 59, 0); - case #char "T" { + case 'T' { working = parse_number_and_advance(&buf, &tm.hour, 1, 12, 0); if !working do break; - if buf[0] == #char ":" { + if buf[0] == ':' { string.advance(&buf); working = parse_number_and_advance(&buf, &tm.min, 0, 59, 0); if !working do break; - if buf[0] == #char ":" { + if buf[0] == ':' { string.advance(&buf); working = parse_number_and_advance(&buf, &tm.sec, 0, 59, 0); @@ -434,34 +418,34 @@ strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { } } - case #char "w" do working = parse_number_and_advance(&buf, &tm.wday, 0, 6, 0); - case #char "Y" do working = parse_number_and_advance(&buf, &tm.year, 1900, 65535, -1900); + case 'w' do working = parse_number_and_advance(&buf, &tm.wday, 0, 6, 0); + case 'Y' do working = parse_number_and_advance(&buf, &tm.year, 1900, 65535, -1900); - case #char "y" { + case 'y' { working = parse_number_and_advance(&buf, &tm.year, 0, 99, 0); if working && tm.year < 69 { tm.year += 100; } } - case #char "%" { - if buf[0] != #char "%" { + case '%' { + if buf[0] != '%' { working = false; } string.advance(&buf); } - case #default { + case _ { working = false; } } } - case #char " ", #char "\t", #char "\r", #char "\n", #char "\f", #char "\v" { + case ' ', '\t', '\r', '\n', '\f', '\v' { string.strip_leading_whitespace(&buf); } - case #default { + case _ { if c != buf[0] { working = false; @@ -484,12 +468,12 @@ strptime :: (buf_: [] u8, format_: [] u8, tm: &Timestamp) -> bool { n := 0; while buf.count > 0 { c := buf.data[0]; - if c < #char "0" || c > #char "9" { + if c < '0' || c > '9' { break; } n *= 10; - n += ~~(c - #char "0"); + n += ~~(c - '0'); string.advance(buf); } diff --git a/docs/ideas/platform_layer.md b/docs/ideas/platform_layer.md index fd234c0c9..43ff74e01 100644 --- a/docs/ideas/platform_layer.md +++ b/docs/ideas/platform_layer.md @@ -90,6 +90,7 @@ this document will serve as that "header file" ### Procedures - `__exit(code: i32) -> void` - `__sleep(milliseconds: i32) -> void` +- `__args(allocator: Allocator) -> [] cstr` ### Values diff --git a/examples/01_hello_world.onyx b/examples/01_hello_world.onyx index 1ef644c52..3a8d6ddee 100644 --- a/examples/01_hello_world.onyx +++ b/examples/01_hello_world.onyx @@ -14,7 +14,7 @@ package main // option to disable loading the standard library, if that is needed. For the sake // of completeness, the following line manually includes the standard library, but // this is not necessary. -#load "core/module" + // To use packages, use the 'use' keyword. The 'core' package houses all of the diff --git a/examples/02_variables.onyx b/examples/02_variables.onyx index 52d461117..73b1f0da7 100644 --- a/examples/02_variables.onyx +++ b/examples/02_variables.onyx @@ -1,4 +1,4 @@ -// Notice this time, we are not adding, 'package main' or '#load "core/module"' +// Notice this time, we are not adding, 'package main' or '' // to the top of the file, since every file is automatically part of the // main package unless specified otherwise, and every compilations includes // the standard library. diff --git a/examples/04_fixed_arrays.onyx b/examples/04_fixed_arrays.onyx index 972a62f3b..1045cae26 100644 --- a/examples/04_fixed_arrays.onyx +++ b/examples/04_fixed_arrays.onyx @@ -15,7 +15,7 @@ // This file will give examples of all of these things, as well as some of the gotchas // you need to be aware of. -#load "core/module" + use core {*} diff --git a/examples/05_slices.onyx b/examples/05_slices.onyx index 218a4ac84..99b1e7665 100644 --- a/examples/05_slices.onyx +++ b/examples/05_slices.onyx @@ -5,7 +5,7 @@ // is a powerful construct to have. In fact, strings in Onyx, i.e. // the 'str' type, is actually just a slice of u8. -#load "core/module" + use core {*} diff --git a/examples/06_dynamic_arrays.onyx b/examples/06_dynamic_arrays.onyx index 3c609a53a..c5831a49b 100644 --- a/examples/06_dynamic_arrays.onyx +++ b/examples/06_dynamic_arrays.onyx @@ -7,7 +7,7 @@ // Dynamic arrays in Onyx are easy to use on purpose, because I // know how useful they are in almost every program I write. -#load "core/module" + use core {*} diff --git a/examples/07_structs.onyx b/examples/07_structs.onyx index 25e9888ce..44002f215 100644 --- a/examples/07_structs.onyx +++ b/examples/07_structs.onyx @@ -5,7 +5,7 @@ // 'structs' in Onyx are very similar to structs in C and C++, with a couple // of additional capabilities to make using them even easier. -#load "core/module" + use core {*} diff --git a/examples/08_enums.onyx b/examples/08_enums.onyx index 1ae2a7358..0beb09d39 100644 --- a/examples/08_enums.onyx +++ b/examples/08_enums.onyx @@ -4,7 +4,7 @@ // expressions. This is going to improve in the future, but for the moment, // enums are rather limited. -#load "core/module" + use core {*} diff --git a/examples/09_for_loops.onyx b/examples/09_for_loops.onyx index 9d024c83d..293352343 100644 --- a/examples/09_for_loops.onyx +++ b/examples/09_for_loops.onyx @@ -4,7 +4,7 @@ // in Onyx are extremely simple to use by design and should make programming very // enjoyable. But I digress, let's look at some examples. -#load "core/module" + use core {package, *} diff --git a/examples/10_switch_statements.onyx b/examples/10_switch_statements.onyx index 29aa997a0..8898383b3 100644 --- a/examples/10_switch_statements.onyx +++ b/examples/10_switch_statements.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -14,7 +14,7 @@ main :: (args: [] cstr) { println("Multiple statments go between '{}'."); } - case #default { + case _ { println("Default case reached."); } } @@ -35,7 +35,7 @@ main :: (args: [] cstr) { fallthrough; // Now we fallthrough } - case #default { + case _ { println("Default case reached."); } } @@ -57,7 +57,7 @@ main :: (args: [] cstr) { println("This statement will never be reached."); } - case #default { + case _ { println("Default case reached."); } } diff --git a/examples/11_map.onyx b/examples/11_map.onyx index 19620095d..872180090 100644 --- a/examples/11_map.onyx +++ b/examples/11_map.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/examples/12_varargs.onyx b/examples/12_varargs.onyx index 96668e4f5..745743eda 100644 --- a/examples/12_varargs.onyx +++ b/examples/12_varargs.onyx @@ -15,7 +15,7 @@ // - 'variadic argument' is shortened to 'vararg' in many situtations -#load "core/module" + use core {*} diff --git a/examples/13_use_keyword.onyx b/examples/13_use_keyword.onyx index 7301a58b9..4b0646d81 100644 --- a/examples/13_use_keyword.onyx +++ b/examples/13_use_keyword.onyx @@ -14,7 +14,7 @@ // most cases, but can easily break and require you to write the code without it. This // is being address and hopefully in the next couple months it will be much more robust. -#load "core/module" + use core {*} diff --git a/examples/14_overloaded_procs.onyx b/examples/14_overloaded_procs.onyx index b37a61cdf..4703f91ee 100644 --- a/examples/14_overloaded_procs.onyx +++ b/examples/14_overloaded_procs.onyx @@ -32,7 +32,7 @@ // Let's look at some examples of overloaded procedures and how they are resolved. -#load "core/module" + use core {*} diff --git a/examples/15_polymorphic_procs.onyx b/examples/15_polymorphic_procs.onyx index 02641dee9..b5a5caeb0 100644 --- a/examples/15_polymorphic_procs.onyx +++ b/examples/15_polymorphic_procs.onyx @@ -57,7 +57,7 @@ compose :: (a: $A, f: (A) -> $B, g: (B) -> $C) -> C { } -#load "core/module" + use core {*} diff --git a/examples/16_pipe_operator.onyx b/examples/16_pipe_operator.onyx index c82e7c852..f985a1cc4 100644 --- a/examples/16_pipe_operator.onyx +++ b/examples/16_pipe_operator.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core { println } diff --git a/examples/17_operator_overload.onyx b/examples/17_operator_overload.onyx index a2a84c76e..40fc1cb76 100644 --- a/examples/17_operator_overload.onyx +++ b/examples/17_operator_overload.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} // Operator overloading allows you to define what it means to perform diff --git a/examples/18_macros.onyx b/examples/18_macros.onyx index 04d6c3140..1c7ecc997 100644 --- a/examples/18_macros.onyx +++ b/examples/18_macros.onyx @@ -134,5 +134,5 @@ main :: (args: [] cstr) { // } } -#load "core/module" + use core {*} diff --git a/examples/19_do_blocks.onyx b/examples/19_do_blocks.onyx index ba62cb8ae..659d0b172 100644 --- a/examples/19_do_blocks.onyx +++ b/examples/19_do_blocks.onyx @@ -39,7 +39,7 @@ main :: (args: [] cstr) { #if false { left, right := do { - parts := string.split(line, #char "|"); + parts := string.split(line, '|'); defer memory.free_slice(^parts); return parts[0], parts[1]; }; @@ -52,7 +52,7 @@ main :: (args: [] cstr) { #if false { left, right: str; { - parts := string.split(line, #char "|"); + parts := string.split(line, '|'); left, right = parts[0], parts[1]; memory.free_slice(^parts); } @@ -65,5 +65,5 @@ main :: (args: [] cstr) { } -#load "core/module" + use core {*} diff --git a/examples/20_auto_return.onyx b/examples/20_auto_return.onyx index 9451735d5..ddf467438 100644 --- a/examples/20_auto_return.onyx +++ b/examples/20_auto_return.onyx @@ -6,7 +6,7 @@ main :: (args: [] cstr) { // If you don't want to explicitly write the return type // of a function, you can use #auto: - f :: (x: i32) -> #auto { + f :: (x: i32) -> _ { return x * 2; } @@ -27,7 +27,7 @@ main :: (args: [] cstr) { // come from? The only way to do this is use an auto-return // type and let the compiler fill it in. - consume :: (it: $T) -> #auto where iter.Iterable(T) { + consume :: (it: $T) -> _ where iter.Iterable(T) { consume_inner :: macro (it: Iterator($V)) -> [] V { arr: [..] V; for v in it do arr << v; @@ -42,6 +42,6 @@ main :: (args: [] cstr) { println(arr); } -#load "core/module" + use core {*} diff --git a/examples/21_quick_functions.onyx b/examples/21_quick_functions.onyx index 5d794f979..8d7211201 100644 --- a/examples/21_quick_functions.onyx +++ b/examples/21_quick_functions.onyx @@ -3,7 +3,7 @@ // determine a return type with #auto, you can write a completely type free // procedure like so: -no_types :: (x: $__type_x, y: $__type_y) -> #auto { +no_types :: (x: $__type_x, y: $__type_y) -> _ { return x + ~~ y; } @@ -44,5 +44,5 @@ main :: (args) => { println(val); } -#load "core/module" + use core {*} diff --git a/examples/22_interfaces.onyx b/examples/22_interfaces.onyx index ca83830bd..9677fb5ab 100644 --- a/examples/22_interfaces.onyx +++ b/examples/22_interfaces.onyx @@ -140,6 +140,6 @@ main :: (args) => { overloaded_procedure_example(); } -#load "core/module" + use core {*} use core.intrinsics.onyx {*} diff --git a/examples/50_misc.onyx b/examples/50_misc.onyx index 2eb514329..e4605e647 100644 --- a/examples/50_misc.onyx +++ b/examples/50_misc.onyx @@ -66,5 +66,5 @@ main :: (args) => { multiple_declaration_improvements(); } -#load "core/module" + use core {*} diff --git a/misc/vscode/extension.ts b/misc/vscode/extension.ts index 275344402..15c5ca698 100644 --- a/misc/vscode/extension.ts +++ b/misc/vscode/extension.ts @@ -4,6 +4,7 @@ import * as fs from "fs"; import * as vslc2 from "vscode-languageclient"; import * as vslc from "vscode-languageclient/node"; +import * as cp from "child_process"; let client: vslc.LanguageClient; @@ -86,12 +87,15 @@ export async function activate(context: vscode.ExtensionContext) { executable = `${onyx_path}/onyx`; } + console.appendLine(`Onyx executable is: ${executable}`); + if (onyx_path) { - let serverOptions: vslc.ServerOptions = { - command: executable, - args: ["lsp"], - transport: vslc.TransportKind.stdio, - }; + let serverOptions = async () => { + return cp.spawn(executable, ["lsp"], { + detached: true, + cwd: vscode.workspace.workspaceFolders[0].uri.fsPath + }); + } let clientOptions: vslc.LanguageClientOptions = { documentSelector: [ @@ -100,11 +104,14 @@ export async function activate(context: vscode.ExtensionContext) { connectionOptions: { cancellationStrategy: null, maxRestartCount: 5 + }, + uriConverters: { + code2Protocol: (x) => x.fsPath, + protocol2Code: (x) => vscode.Uri.parse(decodeURIComponent(x)) } }; client = new vslc.LanguageClient("onyx-lsp", serverOptions, clientOptions); - client.start(); } diff --git a/misc/vscode/onyxlang-0.1.12.vsix b/misc/vscode/onyxlang-0.1.12.vsix new file mode 100644 index 000000000..7e32efbd8 Binary files /dev/null and b/misc/vscode/onyxlang-0.1.12.vsix differ diff --git a/misc/vscode/onyxlang-0.1.13.vsix b/misc/vscode/onyxlang-0.1.13.vsix new file mode 100644 index 000000000..df362c5f0 Binary files /dev/null and b/misc/vscode/onyxlang-0.1.13.vsix differ diff --git a/misc/vscode/onyxlang-0.1.14.vsix b/misc/vscode/onyxlang-0.1.14.vsix new file mode 100644 index 000000000..8bdb8b722 Binary files /dev/null and b/misc/vscode/onyxlang-0.1.14.vsix differ diff --git a/misc/vscode/out/extension.js b/misc/vscode/out/extension.js index 2be9d6946..4caf73ea3 100644 --- a/misc/vscode/out/extension.js +++ b/misc/vscode/out/extension.js @@ -14,6 +14,7 @@ const vscode = require("vscode"); const vsctmls = require("vscode-textmate-languageservice"); const fs = require("fs"); const vslc = require("vscode-languageclient/node"); +const cp = require("child_process"); let client; function get_onyx_path() { let onyx_path = process.env['ONYX_PATH']; @@ -84,12 +85,14 @@ function activate(context) { // Windows distributions are different executable = `${onyx_path}/onyx`; } + console.appendLine(`Onyx executable is: ${executable}`); if (onyx_path) { - let serverOptions = { - command: executable, - args: ["lsp"], - transport: vslc.TransportKind.stdio, - }; + let serverOptions = () => __awaiter(this, void 0, void 0, function* () { + return cp.spawn(executable, ["lsp"], { + detached: true, + cwd: vscode.workspace.workspaceFolders[0].uri.fsPath + }); + }); let clientOptions = { documentSelector: [ { scheme: "file", language: "onyx" }, @@ -97,6 +100,10 @@ function activate(context) { connectionOptions: { cancellationStrategy: null, maxRestartCount: 5 + }, + uriConverters: { + code2Protocol: (x) => x.fsPath, + protocol2Code: (x) => vscode.Uri.parse(decodeURIComponent(x)) } }; client = new vslc.LanguageClient("onyx-lsp", serverOptions, clientOptions); diff --git a/misc/vscode/out/ovmDebug.js b/misc/vscode/out/ovmDebug.js index 440bd4566..a75f9cd6c 100644 --- a/misc/vscode/out/ovmDebug.js +++ b/misc/vscode/out/ovmDebug.js @@ -15,6 +15,8 @@ const EventEmitter = require("node:events"); const await_notify_1 = require("await-notify"); const net = require("node:net"); const child_process = require("node:child_process"); +const fs = require("node:fs"); +const logger_1 = require("@vscode/debugadapter/lib/logger"); class OVMDebugSession extends debugadapter_1.LoggingDebugSession { constructor() { super("ovm-debug-log.txt"); @@ -202,14 +204,26 @@ class OVMDebugSession extends debugadapter_1.LoggingDebugSession { } launchRequest(response, args, request) { return __awaiter(this, void 0, void 0, function* () { + let debugSocketPath = "/tmp/onyx-debug-socket"; + if (fs.existsSync(debugSocketPath)) { + fs.unlinkSync(debugSocketPath); + } + this.sendEvent(new logger_1.LogOutputEvent(`Spawning Onyx debug session\nSocket: ${debugSocketPath}\nWorking Dir: ${args.workingDir}\n`, logger_1.LogLevel.Log)); + let onyx_path = `${args.onyxPath}/bin/onyx`; if (args.wasmFile) { - this.running_process = child_process.spawn("onyx-run", ["--debug", args.wasmFile], { + this.running_process = child_process.spawn(onyx_path, ["run", "--debug", "--debug-socket", debugSocketPath, args.wasmFile], { "cwd": args.workingDir, + "env": { + "ONYX_PATH": args.onyxPath, + } }); } else if (args.onyxFiles) { - this.running_process = child_process.spawn("onyx", ["run", "--debug", ...args.onyxFiles], { + this.running_process = child_process.spawn(onyx_path, ["run", "--debug", "--debug-socket", debugSocketPath, ...args.onyxFiles], { "cwd": args.workingDir, + "env": { + "ONYX_PATH": args.onyxPath, + } }); } else { @@ -219,18 +233,57 @@ class OVMDebugSession extends debugadapter_1.LoggingDebugSession { }); return; } + if (!this.running_process.pid) { + this.sendErrorResponse(response, { + format: "Failed to spawn Onyx debug session.", + id: 1 + }); + return; + } this.running_process.stdout.setEncoding("utf-8"); this.running_process.stdout.on("data", (chunk) => { this.sendEvent(new debugadapter_1.OutputEvent(chunk, "console")); }); - this.attachRequest(response, { "socketPath": "/tmp/ovm-debug.0000", "stopOnEntry": args.stopOnEntry }); + this.sendEvent(new logger_1.LogOutputEvent(`Process is spawned: ${this.running_process.pid}\n`, logger_1.LogLevel.Log)); + let done_hack = false; + let success = Promise.race([ + new Promise((res, rej) => setTimeout(() => res(false), 2000)), + new Promise((res, rej) => __awaiter(this, void 0, void 0, function* () { + while (!done_hack) { + if (fs.existsSync(debugSocketPath)) { + res(true); + } + yield new Promise((res, rej) => setTimeout(res, 100)); + } + })) + ]); + done_hack = true; + if (!success) { + this.sendErrorResponse(response, { + format: "Failed to spawn Onyx debug session.", + id: 1 + }); + return; + } + // This "sleep" is very hacky and needs to be replaced. The problem + // is the we need to wait until the socket exists. + yield this.attachRequest(response, { "socketPath": debugSocketPath, "stopOnEntry": args.stopOnEntry }); }); } attachRequest(response, args, request) { var _a; return __awaiter(this, void 0, void 0, function* () { + this.sendEvent(new logger_1.LogOutputEvent(`Connecting to process\n`, logger_1.LogLevel.Log)); yield this._configurationDone.wait(1000); - yield this.debugger.connect(args.socketPath); + try { + yield this.debugger.connect(args.socketPath); + } + catch (e) { + this.sendEvent(new logger_1.LogOutputEvent(`Error connecting to session: ${e.toString()}`, logger_1.LogLevel.Error)); + this.sendErrorResponse(response, 41); + return; + } + this.sendEvent(new logger_1.LogOutputEvent(`Connected to process`, logger_1.LogLevel.Log)); this._clientConnected = true; this._clientConnectedNotifier.notify(); this.stopOnEntry = (_a = args.stopOnEntry) !== null && _a !== void 0 ? _a : false; @@ -357,6 +410,7 @@ class OVMDebugger extends EventEmitter { }); return new Promise((res, rej) => { this.client.on("connect", res); + this.client.on("error", rej); }); } pause(thread_id = 0xffffffff) { diff --git a/misc/vscode/ovmDebug.ts b/misc/vscode/ovmDebug.ts index d09127580..717b4b048 100644 --- a/misc/vscode/ovmDebug.ts +++ b/misc/vscode/ovmDebug.ts @@ -12,8 +12,9 @@ import { Subject } from "await-notify"; import * as net from "node:net"; import * as child_process from "node:child_process"; import { ChildProcess } from 'node:child_process'; -import { openStdin } from 'node:process'; - +import * as fs from "node:fs"; +import { LogOutputEvent, LogLevel } from '@vscode/debugadapter/lib/logger'; +import { debug } from 'node:console'; interface IOVMAttachRequestArguments extends DebugProtocol.AttachRequestArguments { socketPath?: string; @@ -23,6 +24,7 @@ interface IOVMAttachRequestArguments extends DebugProtocol.AttachRequestArgument interface IOVMLaunchRequestArguments extends DebugProtocol.AttachRequestArguments { wasmFile?: string; onyxFiles?: [string]; + onyxPath: string; workingDir: string; stopOnEntry?: boolean; } @@ -290,14 +292,29 @@ export class OVMDebugSession extends LoggingDebugSession { } protected async launchRequest(response: DebugProtocol.LaunchResponse, args: IOVMLaunchRequestArguments, request?: DebugProtocol.Request): Promise { + let debugSocketPath = "/tmp/onyx-debug-socket"; + if (fs.existsSync(debugSocketPath)) { + fs.unlinkSync(debugSocketPath); + } + + this.sendEvent(new LogOutputEvent(`Spawning Onyx debug session\nSocket: ${debugSocketPath}\nWorking Dir: ${args.workingDir}\n`, LogLevel.Log)); + + let onyx_path = `${args.onyxPath}/bin/onyx` + if (args.wasmFile) { - this.running_process = child_process.spawn("onyx-run", ["--debug", args.wasmFile], { + this.running_process = child_process.spawn(onyx_path, ["run", "--debug", "--debug-socket", debugSocketPath, args.wasmFile], { "cwd": args.workingDir, + "env": { + "ONYX_PATH": args.onyxPath, + } }); } else if (args.onyxFiles) { - this.running_process = child_process.spawn("onyx", ["run", "--debug", ...args.onyxFiles], { + this.running_process = child_process.spawn(onyx_path, ["run", "--debug", "--debug-socket", debugSocketPath,...args.onyxFiles], { "cwd": args.workingDir, + "env": { + "ONYX_PATH": args.onyxPath, + } }); } else { @@ -309,18 +326,66 @@ export class OVMDebugSession extends LoggingDebugSession { return; } + if (!this.running_process.pid) { + this.sendErrorResponse(response, { + format: "Failed to spawn Onyx debug session.", + id: 1 + } as DebugProtocol.Message); + + return; + } + this.running_process.stdout.setEncoding("utf-8"); this.running_process.stdout.on("data", (chunk) => { this.sendEvent(new OutputEvent(chunk, "console")); }); - this.attachRequest(response, {"socketPath": "/tmp/ovm-debug.0000", "stopOnEntry": args.stopOnEntry}); + this.sendEvent(new LogOutputEvent(`Process is spawned: ${this.running_process.pid}\n`, LogLevel.Log)); + + let done_hack = false; + + let success = Promise.race([ + new Promise((res, rej) => setTimeout(() => res(false), 2000)), + new Promise(async (res, rej) => { + while (!done_hack) { + if (fs.existsSync(debugSocketPath)) { + res(true); + } + + await new Promise((res, rej) => setTimeout(res, 100)); + } + }) + ]) + + done_hack = true; + + if (!success) { + this.sendErrorResponse(response, { + format: "Failed to spawn Onyx debug session.", + id: 1 + } as DebugProtocol.Message); + return; + } + + // This "sleep" is very hacky and needs to be replaced. The problem + // is the we need to wait until the socket exists. + + await this.attachRequest(response, {"socketPath": debugSocketPath, "stopOnEntry": args.stopOnEntry}); } protected async attachRequest(response: DebugProtocol.AttachResponse, args: IOVMAttachRequestArguments, request?: DebugProtocol.Request): Promise { + this.sendEvent(new LogOutputEvent(`Connecting to process\n`, LogLevel.Log)); await this._configurationDone.wait(1000); - await this.debugger.connect(args.socketPath); + try { + await this.debugger.connect(args.socketPath); + } catch (e) { + this.sendEvent(new LogOutputEvent(`Error connecting to session: ${e.toString()}`, LogLevel.Error)); + this.sendErrorResponse(response, 41); + return; + } + + this.sendEvent(new LogOutputEvent(`Connected to process`, LogLevel.Log)); this._clientConnected = true; this._clientConnectedNotifier.notify(); @@ -522,6 +587,7 @@ class OVMDebugger extends EventEmitter { return new Promise((res, rej) => { this.client.on("connect", res); + this.client.on("error", rej); }); } diff --git a/misc/vscode/package-lock.json b/misc/vscode/package-lock.json index ee87c3f6f..489a1faf6 100644 --- a/misc/vscode/package-lock.json +++ b/misc/vscode/package-lock.json @@ -1,21 +1,21 @@ { - "name": "onyx", - "version": "0.1.7", + "name": "onyxlang", + "version": "0.1.12", "lockfileVersion": 2, "requires": true, "packages": { "": { - "name": "onyx", - "version": "0.1.7", + "name": "onyxlang", + "version": "0.1.12", "license": "BSD-2-Clause", "dependencies": { - "@types/node": "^18.6.4", "@vscode/debugadapter": "^1.57.0", "await-notify": "^1.0.1", "vscode-languageclient": "^8.0.2", "vscode-textmate-languageservice": "0.2.1" }, "devDependencies": { + "@types/node": "^20.12.4", "@types/vscode": "^1.1.37", "vscode-debugadapter": "^1.51.0" }, @@ -56,9 +56,13 @@ } }, "node_modules/@types/node": { - "version": "18.11.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.18.tgz", - "integrity": "sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA==" + "version": "20.12.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.4.tgz", + "integrity": "sha512-E+Fa9z3wSQpzgYQdYmme5X3OTuejnnTx88A6p6vkkJosR3KBz+HpE3kqNm98VE6cfLFcISx7zW7MsJkH6KwbTw==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/vscode": { "version": "1.74.0", @@ -417,6 +421,12 @@ "node": ">=8" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/vscode-debugadapter": { "version": "1.51.0", "resolved": "https://registry.npmjs.org/vscode-debugadapter/-/vscode-debugadapter-1.51.0.tgz", @@ -547,9 +557,13 @@ } }, "@types/node": { - "version": "18.11.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.18.tgz", - "integrity": "sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA==" + "version": "20.12.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.4.tgz", + "integrity": "sha512-E+Fa9z3wSQpzgYQdYmme5X3OTuejnnTx88A6p6vkkJosR3KBz+HpE3kqNm98VE6cfLFcISx7zW7MsJkH6KwbTw==", + "dev": true, + "requires": { + "undici-types": "~5.26.4" + } }, "@types/vscode": { "version": "1.74.0", @@ -830,6 +844,12 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==" }, + "undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "vscode-debugadapter": { "version": "1.51.0", "resolved": "https://registry.npmjs.org/vscode-debugadapter/-/vscode-debugadapter-1.51.0.tgz", diff --git a/misc/vscode/package.json b/misc/vscode/package.json index c6b398173..8ca816744 100644 --- a/misc/vscode/package.json +++ b/misc/vscode/package.json @@ -2,7 +2,7 @@ "name": "onyxlang", "displayName": "Onyx Programming Language", "description": "Onyx syntax highlighting and debugger support.", - "version": "0.1.11", + "version": "0.1.14", "publisher": "onyxlang", "license": "BSD-2-Clause", "engines": { @@ -93,6 +93,11 @@ }, "launch": { "properties": { + "onyxPath": { + "type": "string", + "description": "The path to your Onyx installation. Generally $HOME/.onyx", + "default": "" + }, "wasmFile": { "type": "string", "description": "The WASM file for debugging, compiled with the --debug flag.", @@ -122,7 +127,7 @@ "type": "onyx", "request": "attach", "stopOnEntry": true, - "socketPath": "/tmp/ovm-debug.0000" + "socketPath": "${command:pickSession}" }, { "name": "Onyx Launch", @@ -140,13 +145,13 @@ "./textmate-configuration.json" ], "dependencies": { - "@types/node": "^18.6.4", "@vscode/debugadapter": "^1.57.0", "await-notify": "^1.0.1", "vscode-languageclient": "^8.0.2", "vscode-textmate-languageservice": "0.2.1" }, "devDependencies": { + "@types/node": "^20.12.4", "@types/vscode": "^1.1.37", "vscode-debugadapter": "^1.51.0" } diff --git a/misc/vscode/syntaxes/onyx.tmLanguage b/misc/vscode/syntaxes/onyx.tmLanguage index 5e59cd050..9c0cdc15a 100644 --- a/misc/vscode/syntaxes/onyx.tmLanguage +++ b/misc/vscode/syntaxes/onyx.tmLanguage @@ -594,6 +594,18 @@ name keyword.tag.onyx + + match + \b(\b[[:alpha:]_]+[[:alnum:]_\.]*\b)\s*!\{ + captures + + 1 + + name + keyword.tag.onyx + + + diff --git a/runtime/build.sh b/runtime/build.sh index 8b0a2fea2..2158fa953 100755 --- a/runtime/build.sh +++ b/runtime/build.sh @@ -20,6 +20,7 @@ echo "Compiling onyx_runtime.$suffix" $ONYX_CC -shared -fpic -w -O2 \ -o onyx_runtime.$suffix \ $FLAGS \ + -Wno-incompatible-pointer-types \ -I ../shared/include -I ../compiler/include \ ./onyx_runtime.c \ -lpthread diff --git a/runtime/onyx_runtime.c b/runtime/onyx_runtime.c index 1107fc49c..bd4b57d67 100644 --- a/runtime/onyx_runtime.c +++ b/runtime/onyx_runtime.c @@ -19,6 +19,7 @@ #include #include #include + #include #include #include #include @@ -120,6 +121,9 @@ ONYX_LIBRARY { ONYX_FUNC(__net_sendto_host) ONYX_FUNC(__net_recv) ONYX_FUNC(__net_recvfrom) + ONYX_FUNC(__net_resolve_start) + ONYX_FUNC(__net_resolve_next) + ONYX_FUNC(__net_resolve_end) ONYX_FUNC(__cptr_make) ONYX_FUNC(__cptr_read) diff --git a/runtime/src/ort_net_linux.h b/runtime/src/ort_net_linux.h index 85a656e02..78360d9a9 100644 --- a/runtime/src/ort_net_linux.h +++ b/runtime/src/ort_net_linux.h @@ -13,7 +13,7 @@ struct onyx_socket_addr { unsigned int addr; }; -static inline int onyx_socket_domain(int i) { +static inline int onyx_socket_family(int i) { // :EnumDependent switch (i) { case 1: return AF_INET; @@ -23,7 +23,7 @@ static inline int onyx_socket_domain(int i) { } } -static inline int onyx_socket_protocol(int i) { +static inline int onyx_socket_socktype(int i) { // :EnumDependent switch (i) { case 0: return SOCK_STREAM; @@ -32,16 +32,35 @@ static inline int onyx_socket_protocol(int i) { } } +static inline int socket_family_to_onyx(int i) { + // :EnumDependent + switch (i) { + case AF_INET: return 1; + case AF_INET6: return 2; + case AF_UNIX: return 3; + default: return -1; + } +} + +static inline int socket_socktype_to_onyx(int i) { + // :EnumDependent + switch (i) { + case SOCK_STREAM: return 0; + case SOCK_DGRAM: return 1; + default: return -1; + } +} + ONYX_DEF(__net_create_socket, (WASM_I32, WASM_I32, WASM_I32, WASM_I32), (WASM_I32)) { - int domain = onyx_socket_domain(params->data[1].of.i32); - if (domain == -1) goto bad_settings; + int family = onyx_socket_family(params->data[1].of.i32); + if (family == -1) goto bad_settings; - int type = onyx_socket_protocol(params->data[2].of.i32); + int type = onyx_socket_socktype(params->data[2].of.i32); if (type == -1) goto bad_settings; int proto = params->data[3].of.i32; - int sock = socket(domain, type, proto); + int sock = socket(family, type, proto); if (sock >= 0) { *((int *) ONYX_PTR(params->data[0].of.i32)) = sock; @@ -389,7 +408,7 @@ ONYX_DEF(__net_recv, (WASM_I32, WASM_I32, WASM_I32), (WASM_I32)) { if (received < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { - results->data[0] = WASM_I32_VAL(received); + results->data[0] = WASM_I32_VAL(-2); } } @@ -412,3 +431,49 @@ ONYX_DEF(__net_recvfrom, (WASM_I32, WASM_I32, WASM_I32, WASM_I32, WASM_I32), (WA return NULL; } + +ONYX_DEF(__net_resolve_start, (WASM_I32, WASM_I32), (WASM_I64)) { + char *hostname = ONYX_PTR(params->data[0].of.i32); + char portstr[8] = { 0 }; + bh_snprintf(portstr, 8, "%d", params->data[1].of.i32); + + struct addrinfo *result; + int err = getaddrinfo(hostname, portstr, NULL, &result); + if (err < 0) { + results->data[0] = WASM_I64_VAL(0); + } else { + results->data[0] = WASM_I64_VAL((u64) result); + } + + return NULL; +} + +ONYX_DEF(__net_resolve_next, (WASM_I64, WASM_I32, WASM_I32), (WASM_I64)) { + struct addrinfo *info = (struct addrinfo *) params->data[0].of.i64; + char *buf = ONYX_PTR(params->data[1].of.i32); + + if (!info) { + results->data[0] = WASM_I64_VAL(0); + return NULL; + } + + *(i32 *) &buf[0] = socket_family_to_onyx(info->ai_family); + *(i32 *) &buf[4] = socket_socktype_to_onyx(info->ai_socktype); + *(i32 *) &buf[8] = info->ai_protocol; + memcpy(&buf[12], info->ai_addr, bh_min(info->ai_addrlen, params->data[2].of.i32 - 12)); + + results->data[0] = WASM_I64_VAL((u64) info->ai_next); + return NULL; +} + +ONYX_DEF(__net_resolve_end, (WASM_I64), ()) { + struct addrinfo *info = (struct addrinfo *) params->data[0].of.i64; + + if (!info) return NULL; + + freeaddrinfo(info); + + return NULL; +} + + diff --git a/runtime/src/ort_net_windows.h b/runtime/src/ort_net_windows.h index 4daaa957f..f106a0592 100644 --- a/runtime/src/ort_net_windows.h +++ b/runtime/src/ort_net_windows.h @@ -87,3 +87,15 @@ ONYX_DEF(__net_recv, (WASM_I32, WASM_I32, WASM_I32), (WASM_I32)) { ONYX_DEF(__net_recvfrom, (WASM_I32, WASM_I32, WASM_I32, WASM_I32, WASM_I32), (WASM_I32)) { return NULL; } + +ONYX_DEF(__net_resolve_start, (WASM_I32, WASM_I32), (WASM_I64)) { + return NULL; +} + +ONYX_DEF(__net_resolve_next, (WASM_I64, WASM_I32, WASM_I32), (WASM_I64)) { + return NULL; +} + +ONYX_DEF(__net_resolve_end, (WASM_I64), ()) { + return NULL; +} diff --git a/runtime/src/ort_os.h b/runtime/src/ort_os.h index f2c0c9bfd..8f63a4c12 100644 --- a/runtime/src/ort_os.h +++ b/runtime/src/ort_os.h @@ -159,11 +159,11 @@ ONYX_DEF(__futex_wait, (WASM_PTR, WASM_I32, WASM_I32), (WASM_I32)) { struct timespec *t = NULL; if (params->data[2].of.i32 >= 0) { delay.tv_sec = params->data[2].of.i32 / 1000; - delay.tv_nsec = params->data[2].of.i32 * 1000000; + delay.tv_nsec = (params->data[2].of.i32 % 1000) * 1000000; t = &delay; } - int res = syscall(SYS_futex, addr, FUTEX_WAIT, params->data[1].of.i32, t, NULL, 0); + int res = syscall(SYS_futex, addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, params->data[1].of.i32, t, NULL, 0); if (res == 0) { if (*addr == params->data[1].of.i32) results->data[0] = WASM_I32_VAL(0); @@ -203,7 +203,7 @@ ONYX_DEF(__futex_wake, (WASM_PTR, WASM_I32), (WASM_I32)) { int *addr = ONYX_PTR(params->data[0].of.i32); #if defined(_BH_LINUX) - int res = syscall(SYS_futex, addr, FUTEX_WAKE, params->data[1].of.i32, NULL, NULL, 0); + int res = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, params->data[1].of.i32, NULL, NULL, 0); results->data[0] = WASM_I32_VAL(res); #endif @@ -233,7 +233,7 @@ ONYX_DEF(__futex_wake, (WASM_PTR, WASM_I32), (WASM_I32)) { -#if defined(_BH_LINUX) || defined(_BH_DARWIN) +#if defined(_BH_LINUX) static wasm_func_t *wasm_cleanup_func; static void unix_signal_handler(int signo, siginfo_t *info, void *context) { @@ -244,7 +244,7 @@ static void unix_signal_handler(int signo, siginfo_t *info, void *context) { #endif ONYX_DEF(__register_cleanup, (WASM_I32, WASM_I32), (WASM_I32)) { - #if defined(_BH_LINUX) || defined(_BH_DARWIN) + #if defined(_BH_LINUX) int len = (127 < params->data[1].of.i32 ? 127 : params->data[1].of.i32); char name[128]; diff --git a/scripts/core_tests.onyx b/scripts/core_tests.onyx index eecc5061a..216811e6e 100644 --- a/scripts/core_tests.onyx +++ b/scripts/core_tests.onyx @@ -1,12 +1,10 @@ -#load "core/module" + use core {package, test} -#inject core { - Running_Tests :: true -} +core.Running_Tests :: true main :: () { test.run_tests(); diff --git a/scripts/default.json b/scripts/default.json index 6955e0b79..6b5035938 100644 --- a/scripts/default.json +++ b/scripts/default.json @@ -19,13 +19,13 @@ }, "files": { ".gitignore": "*.wasm\nlib\nbin", - "onyx-lsp.ini": "[lsp]\nmode=project\nonyxFiles=src/main.onyx\nworkingDir=.", - "onyx-pkg.ini": "[metadata]\nname={{name}}\ndescription={{description}}\nurl={{url}}\nauthor={{author}}\nversion=0.0.1\n", + "onyx-pkg.kdl": "package {\n name \"{{name}}\"\n author \"{{author}}\"\n url \"{{url}}\"\n description \"{{description}}\"\n version \"0.0.1\"\n}\n\nconfig {\n dependency_source_path \"./lib\"\n dependency_binary_path \"./bin\"\n}\n\nlsp {\n mode \"project\"\n source_files \"src/main.onyx\"\n include_dirs \"\"\n working_dir \".\"\n}", "src": { - "main.onyx": "#load \"lib/packages\"\n\nuse core {*}\n\nmain :: () {\n println(\"Hello Onyx!\");\n}\n" + "main.onyx": "#load \"./../lib/packages\"\n\nuse core {println}\n\nmain :: () {\n println(\"Hello Onyx!\");\n}\n" } }, "commands": [ - "git init" + "git init", + "onyx sync" ] } diff --git a/scripts/lsp.wasm b/scripts/lsp.wasm new file mode 100644 index 000000000..66e2d1371 Binary files /dev/null and b/scripts/lsp.wasm differ diff --git a/scripts/onyx-pkg.onyx b/scripts/onyx-pkg.onyx index 6438c8078..3d521d562 100644 --- a/scripts/onyx-pkg.onyx +++ b/scripts/onyx-pkg.onyx @@ -1,6 +1,6 @@ -#load "core/module" -#load "core/encoding/ini" + +#load "core:encoding/ini" // // A list of repository URLs to get packages from. @@ -27,623 +27,782 @@ Protocols :: str.[ // // NOTE: This returns a string that ends with the path separator. Template_Directory :: () -> str { - return os.path_join(os.path_directory(#file), "pkg_templates") - |> string.alloc_copy(); + return os.path_directory(#file) + |> os.path_join("pkg_templates") + |> str.copy() } -Version :: SemVer.{0, 1, 8} +Version :: SemVer.{0, 1, 13} use core {package, *} -use core.encoding {kdl} +use core.encoding {kdl, json} use runtime global_arguments: struct { - #tag "--config-file" - config_file := "./onyx-pkg.kdl"; -} = .{}; - -main :: (args: [] cstr) { - config = .{}; + @"--config-file" + config_file := "./onyx-pkg.kdl" +} = .{} - arg_parse.arg_parse(args, &global_arguments); +main :: () { + args := os.args() + config = .{} - command := args[0] |> string.as_str(); - arguments := args[1..args.count]; + arg_parse.arg_parse(args, &global_arguments) - if command == "init" { - run_init_command(arguments); - store_config_file(); - return; - } + command := args[0] + arguments := args[1 .. args.count] - loaded_config_file := false; - defer if loaded_config_file do store_config_file(); + loaded_config_file := false + defer if loaded_config_file do store_config_file() - command_procedures := runtime.info.get_procedures_with_tag(Command); - defer delete(&command_procedures); + command_procedures := runtime.info.get_procedures_with_tag(Command) + defer delete(&command_procedures) - command_tag := array.first(command_procedures, [p](p.tag.command == command)); + command_tag := Array.first(command_procedures, [p](p.tag.command == command)) if !command_tag { - run_help_command(arguments); - return; + run_help_command(arguments) + return + } + + if arguments.count > 0 { + if arguments[0] == "--help" { + show_help_for_command(command_tag.tag) + os.exit(1) + } } if command_tag.tag.require_config_file { if !load_config_file() { - error_print("Failed to open {}.\n", global_arguments.config_file); - info_print("", "Is this the root directory of an Onyx project?\n"); - os.exit(1); + error_print("Failed to open {}.\n", global_arguments.config_file) + info_print("", "Is this the root directory of an Onyx project?\n") + os.exit(1) } - loaded_config_file = true; + loaded_config_file = true } - assert(command_tag.type == #type ([] cstr) -> void, "BAD TYPE FOR COMMAND PROCEDURE!"); - (*cast(&([] cstr) -> void) &command_tag.func)(arguments); + assert(command_tag.type == #type ([] str) -> void, "BAD TYPE FOR COMMAND PROCEDURE!") + (*cast(&([] str) -> void) &command_tag.func)(arguments) } Command :: struct { - command: str; - description: str; - arguments: str; + command: str + description: str + arguments: str - argument_descriptions: str = ""; + argument_descriptions: str = "" - require_config_file := true; + require_config_file := true } -#tag Command.{ "help", "Show help.", "", require_config_file=false } -run_help_command :: (args: [] cstr) { - printf("onyx pkg version {}\n", Version); - printf("Package dependency resolver and synchronizer for Onyx.\n\nUsage in\n"); +@Command.{ "help", "Show this help message", "", require_config_file=false } +run_help_command :: (args: [] str) { + printf("Onyx toolchain version {}\n", Version) + printf("\n") + printf("Package dependency resolver and synchronizer for Onyx.\n") + printf("\n") + printf("Usage: onyx pkg cmd [..flags]\n") + printf("\n") + printf("Commands:\n") - command_procedures := runtime.info.get_procedures_with_tag(Command); - defer delete(&command_procedures); - for command_procedures { - printf("{}\n", it.tag.description); - printf(" onyx pkg {} {}\n", it.tag.command, it.tag.arguments); + command_procedures := runtime.info.get_procedures_with_tag(Command) + defer delete(&command_procedures) - if it.tag.argument_descriptions.count > 0 { - lines := string.split(it.tag.argument_descriptions, #char "\n", context.temp_allocator); + max_command_width := + iter.as_iter(command_procedures) + |> iter.map(x => x.tag.command.length) + |> iter.fold(0, (x, y) => math.max(x, y)) - print("\n"); - for line in lines { - if line.count == 0 do continue; - printf(" {}\n", line); - } - print("\n"); - } + command_fmt := tprintf("{{w{}}}", max_command_width + 2) - print("\n"); + for command_procedures { + color_print( + .{ .White, " " }, + .{ .Blue, tprintf(command_fmt, it.tag.command) }, + .{ .White, " " }, + .{ .White, it.tag.description }, + .{ .White, "\n" }, + ) } } -#tag Command.{ "init", "Initialize a new project.", "", require_config_file=false } -run_init_command :: (args: [] cstr) { +show_help_for_command :: (cmd: &Command) { + printf("Onyx toolchain version {}\n", Version) + printf("\n") + printf("Package dependency resolver and synchronizer for Onyx.\n") + printf("\n") + color_print( + .{ .White, "Usage: " }, + .{ .Blue, "onyx pkg " }, + .{ .Green, cmd.command }, + .{ .White, " " }, + .{ .Yellow, cmd.arguments }, + .{ .White, "\n\n" }, + ) + + printf("Description: {}\n", cmd.description) + + println(cmd.argument_descriptions) +} + + +@Command.{ "init", "Initialize a new project in the current directory", "", require_config_file=false } +run_init_command :: (args: [] str) { if os.file_exists(global_arguments.config_file) { - error_print("Config file present; project already initialized.\n"); - return; + error_print("Config file present; project already initialized.\n") + return } - printf("Creating new project manifest in {}.\n\n", global_arguments.config_file); + printf("Creating new project manifest in {}.\n\n", global_arguments.config_file) read_field :: macro (f: str, dest: &$T, default: T) { while true { - print(f); + print(f) line := r->read_line(consume_newline=true, allocator=context.temp_allocator) - |> string.strip_whitespace(); + |> str.strip_whitespace() if !line { - *dest = default; - break; + *dest = default + break } - if conv.parse_any(dest, T, line, context.allocator) do break; + if conv.parse_any(dest, T, line, context.allocator) do break if T == str { - *cast(&str) dest = string.alloc_copy(line); - break; + *cast(&str) dest = str.copy(line) + break } } } - config.dependency_binary_path = "./bin"; - config.dependency_source_path = "./lib"; + config.dependency_binary_path = "./bin" + config.dependency_source_path = "./lib" // @TODO // Validation for these fields. - r := io.reader_make(&stdio.stream); - read_field("Package name: ", &config.package_name, ""); - read_field("Package description: ", &config.package_description, ""); - read_field("Package url: ", &config.package_url, ""); - read_field("Package author: ", &config.package_author, ""); - read_field("Package version (0.0.1): ", &config.package_version, .{0, 0, 1}); + r := io.reader_make(&stdio.stream) + read_field("Package name: ", &config.package_name, "") + read_field("Package description: ", &config.package_description, "") + read_field("Package url: ", &config.package_url, "") + read_field("Package author: ", &config.package_author, "") + read_field("Package version (0.0.1): ", &config.package_version, .{0, 0, 1}) + + store_config_file() } -#tag Command.{ "add", "Add a new dependency to the project.", "package-url [version]", +@Command.{ "add", "Add a new dependency to the project", "package-url [version]", """ -package-url Git repository to clone for the package. This can be anything that - git knows how to clone. -verion Semantic version number (Major.Minor.Patch). If omitted, the most recent - version is used. + package-url Git repository to clone for the package. This can be anything that + git knows how to clone. + + verion Semantic version number (Major.Minor.Patch). If omitted, the most recent + version is used. """ } -run_add_command :: (args: [] cstr) { +run_add_command :: (args: [] str) { if args.count < 1 { - error_print("Expected package URL"); - return; + error_print("Expected package URL") + return } - dep_name := string.as_str(args[0]); - dep_repo := Git.get_full_repo_uri(dep_name); + dep_name := args[0] + dep_repo := Git.get_full_repo_uri(dep_name) - version: SemVer; + version: SemVer if args.count > 1 { - if !conv.parse_any(&version, string.as_str(args[1])) { - error_print("Failed to parse version number given: {}\n", string.as_str(args[1])); - return; + if !conv.parse_any(&version, args[1]) { + error_print("Failed to parse version number given: {}\n", args[1]) + return } } else { - version = Git.get_latest_version(dep_repo); + version = Git.get_latest_version(dep_repo) } if config.dependencies->has(dep_name) { - error_print("Dependency '{}' already specified at version '{}'.\n", dep_name, config.dependencies[dep_name]?.version); + error_print("Dependency '{}' already specified at version '{}'.\n", dep_name, config.dependencies[dep_name]?.target) } elseif version->is_zero() { - error_print("Unable to find latest version of '{}'\n", string.as_str(args[0])); + error_print("Unable to find latest version of '{}'\n", args[0]) } else { config.dependencies[dep_name] = .{ name = dep_name, - version = version, + target = .{ version = version }, source = .{ Git = dep_repo } - }; - info_print("Added", "'{}' version {}\n", dep_name, version); + } + info_print("Added", "'{}' version {}\n", dep_name, version) } } -#tag Command.{ "remove", "Remove a dependency.", "package-or-url", +@Command.{ "remove", "Remove a dependency", "package-or-url", """ -package-or-url Git repository name or package name on disk to remove. + package-or-url Git repository name or package name on disk to remove. """ } -run_remove_command :: (args: [] cstr) { +run_remove_command :: (args: [] str) { if args.count < 1 { - error_print("Expected package name."); - return; + error_print("Expected package name.\n") + return } - dep := string.as_str(args[0]); + dep := args[0] if config.dependencies->has(dep) { - version := config.dependencies->get(dep)->unwrap().version; - config.dependencies->delete(dep); - info_print("Removed", "'{}' version {}\n", dep, version); - return; + version := config.dependencies->get(dep)->unwrap().target.version + config.dependencies->delete(dep) + info_print("Removed", "'{}' version {}\n", dep, version) + return } - error_print("Dependency '{}' is not currently used.\n", dep); + error_print("Dependency '{}' is not currently used.\n", dep) } -#tag Command.{ "show", "Show dependencies and versions.", "" } -run_show_command :: (args: [] cstr) { - printf("Package name : {}\n", config.package_name); - printf("Package description : {}\n", config.package_description); - printf("Package url : {}\n", config.package_url); - printf("Package author : {}\n", config.package_author); - printf("Package version : {}\n", config.package_version); - print("\n"); +@Command.{ "show", "Show dependencies and versions", "" } +run_show_command :: (args: [] str) { + printf("Package name : {}\n", config.package_name) + printf("Package description : {}\n", config.package_description) + printf("Package url : {}\n", config.package_url) + printf("Package author : {}\n", config.package_author) + printf("Package version : {}\n", config.package_version) + print("\n") max_width := array.fold(config.dependencies.entries, 0, (d, w) => { - return math.max(d.key.count, w); - }); - format_str := tprintf(" {{w{}}} | {{}}\n", max_width); + return math.max(d.key.count, w) + }) + format_str := tprintf(" {{w{}}} | {{}}\n", max_width) - print("Dependencies:\n"); + print("Dependencies:\n") for config.dependencies.entries { - printf(format_str, it.key, it.value); + printf(format_str, it.key, it.value) } - print("\n"); + print("\n") } -#tag Command.{ "update", "Update dependencies to newest compatible versions.", "" } -// @Feature // Add "locked" dependencies that will never update? -run_update_command :: (args: [] cstr) { - info_print("Info", "Updating dependencies to newest compatible versions.\n"); +@Command.{ "update", "Update dependencies to newest compatible versions", "" } +run_update_command :: (args: [] str) { + info_print("Info", "Updating dependencies to newest compatible versions.\n") for& config.dependencies.entries { - repo := it.value.source.Git ?? [] { continue; }; - new_version := Git.get_latest_compatible_version(repo, it.value.version); + if it.value.locked { + info_print("Skipping", "{} because it is locked.\n", it.key) + continue + } + + if it.value.target.branch { + info_print("Skipping", "{} because it references a branch, not a version.\n", it.key) + continue + } - if it.value.version != new_version { - info_print("Update", "{} {} -> {}\n", it.key, it.value.version, new_version); + repo := it.value.source.Git ?? [] { continue; } + version := it.value.target.version! + new_version := Git.get_latest_compatible_version(repo, version) + + if version != new_version { + info_print("Update", "{} {} -> {}\n", it.key, version, new_version) } - it.value.version = new_version; + it.value.target = .{ version = new_version } } } -#tag Command.{ "sync", "Synchronize local dependency folder.", "[--clean]", +@Command.{ "sync", "Synchronize local dependency folder", "[..flags]", """ ---clean Remove directories of unneeded dependencies. This is not the default - behavior, as it could break builds. - ---skip-native Skips compiling native libraries during synchronization. +Flags: + --clean Remove directories of unneeded dependencies. This is not the default + behavior, as it could break builds. + + --skip-native Skips compiling native libraries during synchronization. """ } -run_sync_command :: (args: [] cstr) { +run_sync_command :: (args: [] str) { Sync_Options :: struct { - #tag "--clean" - clean := false; + @"--clean" + clean := false - #tag "--skip-native" - skip_native := false; + @"--skip-native" + skip_native := false } - options: Sync_Options; - arg_parse.arg_parse(args, &options); + options: Sync_Options + arg_parse.arg_parse(args, &options) if options.clean { - info_print("Cleaning", "Removing {} directory\n", config.dependency_source_path); - os.remove_directory(config.dependency_source_path); + info_print("Cleaning", "Removing {} directory\n", config.dependency_source_path) + os.remove_directory(config.dependency_source_path) } To_Install :: struct { - use pack: Package; - downgrade_if_necessary: bool; + use pack: Package + downgrade_if_necessary: bool } - dependencies_to_install := make([..] To_Install); - dependencies_installed := make(Map(str, SemVer)); - defer { - delete(&dependencies_to_install); - delete(&dependencies_installed); - } + use dependencies_to_install := make([..] To_Install) + use dependencies_installed := make(Map(str, Dependency.Target)) for& config.dependencies.entries { dependencies_to_install << .{ - .{it.value.source, it.value.version}, true - }; + .{it.value.source, it.value.target}, true + } } while dependencies_to_install.count > 0 { - alloc.clear_temp_allocator(); - to_install := array.delete(&dependencies_to_install, 0); + alloc.clear_temp_allocator() + to_install := array.delete(&dependencies_to_install, 0) - repo := to_install.source.Git ?? [] { continue; }; + repo := to_install.source.Git ?? [] { continue; } if dependencies_installed->has(repo) { - continue; + continue } - success, installed_folder := install_package(to_install.pack, to_install.downgrade_if_necessary, options.skip_native); + success, installed_folder := install_package(to_install.pack, to_install.downgrade_if_necessary, options.skip_native) if !success { - error_print("Aborting sync.\n"); - return; + error_print("Aborting sync.\n") + return } inner_config := read_config_from_installed_dependency(installed_folder) ?? [] { - error_print("Misconfigured onyx-pkg.kdl in '{}'. Omitting.\n", repo); - continue; - }; + error_print("Misconfigured onyx-pkg.kdl in '{}'. Omitting.\n", repo) + continue + } if inner_config.package_version->is_zero() { - error_print("Expected a version for '{}' that is not '0.0.0'.\n", repo); - continue; + error_print("Expected a version for '{}' that is not '0.0.0'.\n", repo) + continue } for& inner_config.dependencies.entries { - key := it.value.source.Git ?? [] { continue; }; - dep := dependencies_installed[key]; - if dep { + key := it.value.source.Git ?? [] { continue; } + dep := dependencies_installed[key] + if !dep || it.value.target.branch { + dependencies_to_install << .{ .{ it.value.source, it.value.target }, false } + } elseif dep { + version := it.value.target.version ?? [] { continue } + // TODO : Check if this is right? Could this accidentally forcefully upgrade a package? - if it.value.version->is_newer(dep->unwrap()) { - uninstall_package(.{it.value.source, it.value.version}); - dependencies_installed->delete(key); - dependencies_to_install << .{ .{it.value.source, it.value.version}, false }; + if version->is_newer(dep!.version!) { + uninstall_package(.{it.value.source, it.value.target}) + dependencies_installed->delete(key) + dependencies_to_install << .{ .{it.value.source, it.value.target}, false } - } elseif !(it.value.version->is_compatible(dep->unwrap())) { + } elseif !(version->is_compatible(dep!.version!)) { // TODO: Explain this more - error_print("Different major versions of '{}' being used!\n", it.key); - os.exit(1); + error_print("Different major versions of '{}' being used!\n", it.key) + os.exit(1) } - } else { - dependencies_to_install << .{ .{it.value.source, it.value.version}, false }; } } - dependencies_installed[repo] = to_install.version; + dependencies_installed->put(repo, to_install.target) + } + + if !run_tool_installation(".") { + error_print("Aborting sync.\n") + return } - build_package_file_to_load(); + build_package_file_to_load() } -#tag Command.{ "rebuild", "Rebuild native library for a package", "package-or-url", +@Command.{ "rebuild", "Rebuild native library for a package", "package-or-url", """ -package-or-url Git repository name or package name on disk to remove. + package-or-url Git repository name or package name on disk to rebuild. """ } -run_rebuild_command :: (args: [] cstr) { +run_rebuild_command :: (args: [] str) { if args.count < 1 { - error_print("Expected package name."); - return; + error_print("Expected package name.\n") + return } - dep := string.as_str(args[0]); + dep := args[0] if config.dependencies->has(dep) { - dep = config.dependencies->get(dep)->unwrap().source.Git->unwrap(); + dep = config.dependencies->get(dep)->unwrap().source.Git->unwrap() } - info_print("Rebuild", "{}\n", dep); + info_print("Rebuild", "{}\n", dep) if success, err := rebuild_native_library(dep); success { - info_print("Rebuilt", "{}\n", dep); + info_print("Rebuilt", "{}\n", dep) } else { - error_print("Rebuild failed.\n", dep); - println(err); - os.exit(1); + error_print("Rebuild failed.\n", dep) + println(err) + os.exit(1) } } -#tag Command.{ "publish", "Bump version number and create a publishable version of the package", "" } -run_publish_command :: (args: [] cstr) { +@Command.{ "publish", "Create a published version of this package", "" } +run_publish_command :: (args: [] str) { // @TODO // Better error handling and reporting, as this is a delicate process. if !os.dir_exists(".git") { - error_print("Not in Git repository.\n"); - printf("It does not look like you are in a Git repository. In order to publish packages\n"); - printf("with onyx-pkg, you have to initialize a Git repository in the current directory.\n\n"); - return; + error_print("Not in Git repository.\n") + printf("It does not look like you are in a Git repository. In order to publish packages\n") + printf("with onyx-pkg, you have to initialize a Git repository in the current directory.\n\n") + return } - r := io.reader_make(&stdio.stream); + r := io.reader_make(&stdio.stream) while true { - printf("Is this a m[a]jor, m[i]nor, or [p]atch release? or [c]ancel? (a/i/p/c) "); + printf("Is this a m[a]jor, m[i]nor, or [p]atch release? or [c]ancel? (a/i/p/c) ") input := r->read_line(consume_newline=true, inplace=true) - |> string.strip_whitespace() - |> string.to_lowercase(); + |> str.strip_whitespace() + |> str.to_lowercase() switch input { case "a" { // Major version bump - config.package_version->bump_major(); + config.package_version->bump_major() } case "i" { // Minor version bump - config.package_version->bump_minor(); + config.package_version->bump_minor() } case "p" { // Patch version bump - config.package_version->bump_patch(); + config.package_version->bump_patch() } case "c" { - return; + return } - case #default do continue; + case _ do continue } - break; + break } - store_config_file(); + store_config_file() - info_print("Publishing", "Creating new published version\n"); + info_print("Publishing", "Creating new published version\n") if Git.publish_version() { - info_print("Published", "Successfully published new version.\n"); + info_print("Published", "Successfully published new version.\n") } else { - error_print("Failed to publish new version.\n"); + error_print("Failed to publish new version.\n") } } -#tag Command.{ "list-versions", "List all installable versions of a remote package.", "[package-url]", require_config_file=false } -run_list_versions :: (args: [] cstr) { +@Command.{ "list-versions", "List all installable versions of a remote package", "[package-url]", require_config_file=false } +run_list_versions :: (args: [] str) { if args.count < 1 { - return; + return } - pack := string.as_str(args[0]); + pack := args[0] - pack = Git.get_full_repo_uri(pack); - versions := Git.get_available_versions(pack); - defer delete(&versions); + pack = Git.get_full_repo_uri(pack) + versions := Git.get_available_versions(pack) + defer delete(&versions) - array.sort(versions, SemVer.compare); + array.sort(versions, SemVer.compare) for versions { - printf("{}\n", it); + printf("{}\n", it) } } -#tag Command.{ - "migrate", "Migrate an old onyx-pkg.ini to the new onyx-pkg.kdl", "", +@Command.{ + "migrate", "Migrate an old Onyx package", "", require_config_file = false, } -run_migrate_command :: (args: [] cstr) { - config = read_old_config("./onyx-pkg.ini")->unwrap(); - store_config_file(); +run_migrate_command :: (args: [] str) { + config = read_old_config("./onyx-pkg.ini")->unwrap() + store_config_file() } -#tag Command.{ - "new", "Create a new project from a template in the current directory.", "(template_name | --list) [directory]", +@Command.{ + "new", "Create a new project from a template in the specified directory", "(template_name | --list | --create) [directory]", """ -template_name Template name to create. - -directory Directory in which to place the new package. Defaults to '.'. +Arguments: + template_name Template name to create. + directory Directory in which to place the new package. Defaults to '.'. """, require_config_file = false } -run_new_command :: (args: [] cstr) { +run_new_command :: (args: [] str) { if args.count >= 1 { - if string.as_str(args[0]) == "--list" { - printf("List of installed templates\n"); + if args[0] == "--list" { + printf("List of installed templates\n") for os.list_directory(Template_Directory()) { - if !string.ends_with(it->name(), ".json") do continue; + if !str.ends_with(it->name(), ".json") do continue + + printf(" {}\n", os.path_basename(it->name())) + } + return + } - printf(" {}\n", os.path_basename(it->name())); + if args[0] == "--create" { + destination := do { + if args.count == 1 do return "template.json" + return args[1] } - return; + + printf("Creating '{}' from the current directory\n", destination) + + files: Map(str, str) + to_process := make([..] str) + to_process->push(".") + + for folder in Iterator.from(&to_process) { + for entry in os.list_directory(folder) { + name := entry->name() + fullpath := os.path_join(folder, name) + + if entry.type == .Directory { + if name == ".git" || name == "lib" || name == "bin" { + info_print("Skipping", "Skipping folder {}\n", name) + continue + } + + to_process->push(fullpath) + } + + if entry.type == .RegularFile { + files->put( + fullpath + os.get_contents(fullpath) + ) + } + } + } + + use output := os.open(destination, .Write)->expect("Failed to open output file") + use writer := io.Writer.make(&output, 0) + + writer->write("{\"variables\":{},") + writer->write("\"commands\":[],") + writer->write("\"files\":") + json.encode(&writer, files) + writer->write("}") + + return } } - template_name := "default"; - directory := "."; + template_name := "default" + directory := "." if args.count >= 1 { - template_name = string.as_str(args[0]); + template_name = args[0] } if args.count >= 2 { - directory = string.as_str(args[1]); + directory = args[1] } if os.list_directory(directory)->count(x => true) > 0 { - error_print("Refusing to initialize project in non-empty directory.\n"); - return; + error_print("Refusing to initialize project in non-empty directory.\n") + return } - template_dir := Template_Directory(); - template_file := os.path_join(template_dir, tprintf("{}.json", template_name)); + template_dir := Template_Directory() + template_file := os.path_join(template_dir, tprintf("{}.json", template_name)) if !os.file_exists(template_file) { - error_print("Template '{}' not found in {}\n", template_name, template_dir); - return; + error_print("Template '{}' not found in {}\n", template_name, template_dir) + return } use core.encoding {json} - template, err := json.decode_with_error(os.get_contents(template_file)); - if err->has_error() { - error_print("Failed to parse template file.\n"); - print(err->message()); - return; + template_res := json.decode_with_result(os.get_contents(template_file)) + if template_res.Err { + error_print("Failed to parse template file.\n") + print(template_res.Err!) + return } - vars := make(Map(str, str)); - input := io.reader_make(&stdio.stream); + vars := make(Map(str, str)) + input := io.reader_make(&stdio.stream) - template_variables := template.root["variables"]->as_map(); + template := template_res.Ok! + template_variables := template.root["variables"]->as_map() for template_variables->as_iter() { - assert(it.value["type"]->as_str() == "string", "Only string types are supported right now in template variables."); + assert(it.value["type"]->as_str() == "string", "Only string types are supported right now in template variables.") - printf("{}: ", it.value["description"]->as_str()); + printf("{}: ", it.value["description"]->as_str()) line := input->read_line(consume_newline=true, allocator=context.temp_allocator) - |> string.strip_whitespace(); + |> str.strip_whitespace() - vars->put(it.key, line); + vars->put(it.key, line) } if !os.dir_exists(directory) { - os.dir_create(directory); + os.dir_create(directory) } - populate_directory(directory, template.root["files"], &vars); + populate_directory(directory, template.root["files"], &vars) for template.root["commands"]->as_array_iter() { - command := it->as_str(); - info_print("Running", "{}\n", command); + command := it->as_str() + info_print("Running", "{}\n", command) - args := string.split(command, ' ', context.temp_allocator); - cmd := args[0]; - args = args[1 .. args.count]; + args := command->split(' ', context.temp_allocator) + cmd := args[0] + args = args[1 .. args.count] - run_proc := os.process_spawn(cmd, args, starting_directory=directory); - run_result := os.process_wait(&run_proc); + run_proc := os.process_spawn(cmd, args, starting_directory=directory) + run_result := os.process_wait(&run_proc) if run_result != .Success { - error_print("Failed to run '{}'\n", command); + error_print("Failed to run '{}'\n", command) } } populate_directory :: (dir: str, files: json.Value, vars: &Map(str, str)) { for files->as_map_iter() { - destination := os.path_join(dir, it.first); + destination := os.path_join(dir, it.first) switch it.second->type() { case .String { - info_print("Creating", tprintf("{}\n", destination)); - - contents := process_contents(it.second->as_str(), vars); - for os.with_file(destination, .Write) { - io.stream_write(it, contents); + info_print("Creating", tprintf("{}\n", destination)) + + { + folders := it.first->split('/') + folders = folders[0 .. folders.count - 1] + target := dir + for folder in folders { + target = os.path_join(target, folder) + if !os.dir_exists(target) { + os.dir_create(target) + } + } } + + contents := process_contents(it.second->as_str(), vars) + use file := os.open(destination, .Write).Ok? + io.stream_write(&file, contents) } case .Object { - os.dir_create(destination); - populate_directory(destination, it.second, vars); + os.dir_create(destination) + populate_directory(destination, it.second, vars) } } } } process_contents :: (contents: str, vars: &Map(str, str)) -> str { - output: dyn_str; + output: dyn_str - to_process := contents; + to_process := contents while to_process { - to_output, to_process~ := string.bisect(to_process, "{{"); - string.append(&output, to_output); + to_output, to_process~ := str.bisect(to_process, "{{") + dyn_str.append(&output, to_output) - var_name, to_process~ := string.bisect(to_process, "}}"); - string.append(&output, vars->get(var_name) ?? ""); + var_name, to_process~ := str.bisect(to_process, "}}") + dyn_str.append(&output, vars->get(var_name) ?? "") } - return output; + return output } } -#tag Command.{ - "build", "Builds the project according to the build configuration specified the package file.", "[build_config]", +@Command.{ + "build", "Builds the package using the build configuration specified the package file", "[build_config]", """ -build_config The name of the configuration to use (defaults to 'default'). +Arguments: + build_config The name of the configuration to use (defaults to 'default'). """ } -run_build_command :: (args: [] cstr) { - build_config := "default"; +run_build_command :: (args: [] str) { + build_config := "default" if args.count >= 1 { - build_config = string.as_str(args[0]); + build_config = args[0] } - maybe_bc := config.build_configs[build_config]; + run_build_configuration(build_config) +} + +run_build_configuration :: #match #local {} + +#overload +run_build_configuration :: (build_config: str) -> bool { + maybe_bc := config.build_configs[build_config] if !maybe_bc { - error_print("Unrecognized build configuration '{}'.\n", build_config); - return; + error_print("Unrecognized build configuration '{}'\n", build_config) + return false } - info_print("Building", "Compiling with build configuration '{}'.\n", build_config); + bc := maybe_bc->unwrap() + return run_build_configuration(bc, build_config) +} - bc := maybe_bc->unwrap(); +#overload +run_build_configuration :: (bc: BuildConfig, build_config := "") -> bool { + switch bc { + case .CompileOnyx as c { + info_print("Building", "Compiling '{}'\n", c.target) - args: [..] str; - args << "build"; + command := os.command() + command->path("onyx") - for bc.sources do args << it; - for bc.include { args << "-I"; args << it; } - for bc.defines do args << tprintf("-D{}", it); - for bc.args do args << it; - - args << "-r"; - args << bc.runtime; - - args << "-o"; - args << bc.target; - - p := os.process_spawn("onyx", args); - r := io.reader_make(&p); - output := io.read_all(&r); - switch os.process_wait(&p) { - case .Success { - info_print("Built", "Successfully compiled with build configuration '{}'.\n", build_config); + if c.working_dir { + command->dir(c.working_dir) + } + + command->args(.[ "build" ]) + for c.include do command->args(.["-I", it]) + for c.defines do command->args(.[tprintf("-D{}", it)]) + + command->args(c.args) + + command->args(.["-r", c.runtime]) + command->args(.["-o", c.target]) + + command->args(c.sources) + + switch command->output() { + case .Ok as output { + info_print("Built", "Compiled '{}'\n", c.target) + return true + } + + case .Err as e { + error_print("Failed to compile '{}'\n", c.target) + println(e.output) + return false + } + } } - case #default { - error_print("Failed to compile with build configuration '{}'.\n", build_config); - println(output); + case .RunCommands as cmds { + for cmd in cmds { + cmd_str := str.join(cmd, " ") + info_print("Executing", "{}\n", cmd_str) + + command := os.command() + command->path(cmd[0]) + command->args(cmd[1 .. cmd.length]) + + switch command->output() { + case .Ok as output { + info_print("Executed", "{}\n", cmd_str) + } + + case .Err as e { + error_print("Failed to run '{}'\n", str.join(cmd, " ")) + println(e.output) + return false + } + } + } + + return true + } + + case .Collection as steps { + info_print("Running", "Running collection '{}'\n", build_config) + for step in steps { + if !run_build_configuration(step) { + return false + } + } + + return true } } } @@ -662,307 +821,353 @@ run_build_command :: (args: [] cstr) { } -install_package :: (pack: Package, downgrade_if_necessary := false, skip_native_compilation := false) -> (bool, installed_folder: str) { +install_package :: (pack: Package, downgrade_if_necessary := false, skip_native_compilation := false) -> (bool, str) { // // Currently this only supports Git-based packages. repo := pack.source.Git ?? [] { - return return false, ""; - }; - package_folder := get_install_path_of_repo(repo); + return return false, "" + } + package_folder := get_install_path_of_repo(repo) - if os.file_exists(package_folder) { - installed_version := get_installed_version_of_package(repo); + switch pack.target { + case .version as version { + if os.file_exists(package_folder) { + installed_version := get_installed_version_of_package(repo) - if installed_version == pack.version { - info_print("Exists", "{} {}\n", repo, installed_version); + if installed_version == version { + info_print("Exists", "{} {}\n", repo, installed_version) - success := true; - if !native_library_is_up_to_date(package_folder) { - success = run_native_library_installation(package_folder); + success := true + if !native_library_is_up_to_date(package_folder) { + success = run_native_library_installation(package_folder) + } + + return success, package_folder } - return success, package_folder; - } + if installed_version->is_newer(version) && !downgrade_if_necessary { + error_print("Refusing to downgrade '{}' from {} to {}.\n", repo, installed_version, version) + return false, "" + } - if installed_version->is_newer(pack.version) && !downgrade_if_necessary { - error_print("Refusing to downgrade '{}' from {} to {}.\n", repo, installed_version, pack.version); - return false, ""; + // :PRETTY + verb := "Upgrading" if version->is_newer(installed_version) else "Downgrading" + info_print(verb, "{} {} -> {}\n", repo, installed_version, version) + uninstall_package(pack) } - // :PRETTY - verb := "Upgrading" if pack.version->is_newer(installed_version) else "Downgrading"; - info_print(verb, "{} {} -> {}\n", repo, installed_version, pack.version); - uninstall_package(pack); + if !Git.clone_version(repo, version) { + error_print("Failed to fetch {} version {}.\n", repo, version) + return false, "" + } } - if !Git.clone_version(repo, pack.version) { - error_print("Failed to fetch {} version {}.\n", repo, pack.version); - return false, ""; + case .branch as branch { + if !Git.clone_branch(repo, branch) { + error_print("Failed to fetch {} on branch {}.\n", repo, branch) + return false, "" + } } - - if skip_native_compilation do return true, package_folder; + } + + if skip_native_compilation do return true, package_folder - install_success := run_native_library_installation(package_folder); - return install_success, package_folder; + native_install_success := run_native_library_installation(package_folder) + tool_install_success := run_tool_installation(package_folder) + + return native_install_success && tool_install_success, package_folder } uninstall_package :: (pack: Package) -> bool { - repo := pack.source.Git?; - folder_name := strip_protocol_and_www_from_repo(repo); - package_folder := os.path_join(config.dependency_source_path, folder_name); + repo := pack.source.Git? + folder_name := strip_protocol_and_www_from_repo(repo) + package_folder := os.path_join(config.dependency_source_path, folder_name) if os.file_exists(package_folder) { // Should this check if the version to be deleted is the one that is actually installed? - attempt_remove_native_library(package_folder); - os.remove_directory(package_folder); + attempt_remove_native_library(package_folder) + os.remove_directory(package_folder) // This should maybe cleanup the parent directory if it is now empty. - return true; + return true } - return false; + return false } attempt_remove_native_library :: (package_folder: str) -> bool { - inner_config := read_config_from_installed_dependency(package_folder)?; + inner_config := read_config_from_installed_dependency(package_folder)? - if !inner_config.native_library do return false; + if !inner_config.native_library do return false - target := os.path_join(config.dependency_binary_path, tprintf("{}{}", inner_config.native_library->unwrap(), native_library_suffix)); - os.remove_file(target); - return true; + target := os.path_join(config.dependency_binary_path, tprintf("{}{}", inner_config.native_library->unwrap(), native_library_suffix)) + os.remove_file(target) + return true } rebuild_native_library :: (folder: str) -> (bool, str) { - cleaned_folder := get_install_path_of_repo(folder); + cleaned_folder := get_install_path_of_repo(folder) - attempt_remove_native_library(cleaned_folder); + attempt_remove_native_library(cleaned_folder) - success, build_error := run_native_library_installation(cleaned_folder); - return success, build_error; + success, build_error := run_native_library_installation(cleaned_folder) + return success, build_error } get_installed_version_of_package :: (package_path: str) -> SemVer { - inner_config := read_config_from_installed_dependency(get_install_path_of_repo(package_path)); - return inner_config?.package_version; + inner_config := read_config_from_installed_dependency(get_install_path_of_repo(package_path)) + return inner_config?.package_version } read_config_from_installed_dependency :: (dependency_folder: str) -> ? Config { - return load_config(tprintf("{}/onyx-pkg.kdl", dependency_folder)); + return load_config(tprintf("{}/onyx-pkg.kdl", dependency_folder)) } strip_protocol_and_www_from_repo :: (repo: str) -> str { - to_return := repo; + to_return := repo - if string.contains(to_return, "://") { - _, to_return~ := string.bisect(to_return, "://"); + if str.contains(to_return, "://") { + _, to_return~ := str.bisect(to_return, "://") } - if string.starts_with(to_return, "www.") { - to_return = to_return["www.".count .. to_return.count]; + if str.starts_with(to_return, "www.") { + to_return = to_return["www.".count .. to_return.count] } - if string.ends_with(to_return, ".git") { - to_return = to_return[0 .. to_return.count - ".git".count]; + if str.ends_with(to_return, ".git") { + to_return = to_return[0 .. to_return.count - ".git".count] } #if runtime.compiler_os == .Windows { - to_return = string.alloc_copy(to_return); + to_return = str.copy(to_return) - string.replace(to_return, '/', '\\'); + str.replace(to_return, '/', '\\') } - return to_return; + return to_return } get_install_path_of_repo :: (repo: str) -> str { - return os.path_join(config.dependency_source_path, strip_protocol_and_www_from_repo(repo)); + return os.path_join(config.dependency_source_path, strip_protocol_and_www_from_repo(repo)) } run_native_library_installation :: (folder: str) -> (bool, str) { inner_config := read_config_from_installed_dependency(folder) ?? [] { - error_print("Failed to parse onyx-pkg.kdl in '{}'.\n", folder); - return return false, ""; - }; + error_print("Failed to parse onyx-pkg.kdl in '{}'.\n", folder) + return return false, "" + } - if !inner_config.native_library_build do return true, ""; + if !inner_config.native_library_build do return true, "" info_print("Install", "Running installation of '{}'\n", folder); - args := string.split(inner_config.native_library_build->unwrap(), #char " ", context.temp_allocator); - cmd := args[0]; - args = args[1 .. args.count]; + args := str.split(inner_config.native_library_build->unwrap(), ' ', context.temp_allocator) + cmd := args[0] + args = args[1 .. args.count] { - build_proc := os.process_spawn(cmd, args, starting_directory=folder); - build_result := os.process_wait(&build_proc); + build_proc := os.process_spawn(cmd, args, starting_directory=folder) + build_result := os.process_wait(&build_proc) - build_reader := io.reader_make(&build_proc); - defer io.reader_free(&build_reader); - build_info := build_reader->read_all(); + build_reader := io.reader_make(&build_proc) + defer io.reader_free(&build_reader) + build_info := build_reader->read_all() if build_result != .Success { - error_print("Failed to build native library in {}.\n", folder); - return false, build_info; + error_print("Failed to build native library in {}.\n", folder) + return false, build_info } } if !os.dir_exists(config.dependency_binary_path) { if !os.dir_create(config.dependency_binary_path) { - error_print("Failed to create native library directory, {}.\n", config.dependency_binary_path); - return false, ""; + error_print("Failed to create native library directory, {}.\n", config.dependency_binary_path) + return false, "" } } - source_path := tprintf("{}/{}{}", folder, inner_config.native_library->unwrap(), native_library_suffix); - dest_path := tprintf("{}/{}{}", config.dependency_binary_path, inner_config.native_library->unwrap(), native_library_suffix); - success := os.rename_file(source_path, dest_path); + source_path := tprintf("{}/{}{}", folder, inner_config.native_library->unwrap(), native_library_suffix) + dest_path := tprintf("{}/{}{}", config.dependency_binary_path, inner_config.native_library->unwrap(), native_library_suffix) + success := os.rename_file(source_path, dest_path) if !success { - error_print("Failed to move native library to final destination.\n {} -> {}\n", source_path, dest_path); + error_print("Failed to move native library to final destination.\n {} -> {}\n", source_path, dest_path) } - return success, ""; + return success, "" } native_library_is_up_to_date :: (folder: str) -> bool { - inner_config := read_config_from_installed_dependency(folder)?; + inner_config := read_config_from_installed_dependency(folder)? // If no native library, no worries. - if !inner_config.native_library do return true; + if !inner_config.native_library do return true + + target := os.path_join(config.dependency_binary_path, tprintf("{}{}", inner_config.native_library->unwrap(), native_library_suffix)) + inner_package_file := tprintf("{}/onyx-pkg.kdl", folder) + + target_stat, package_stat: os.FileStat + if !os.file_stat(target, &target_stat) do return false + if !os.file_stat(inner_package_file, &package_stat) do return false + + return target_stat.modified_time >= package_stat.modified_time +} + +run_tool_installation :: (folder: str) -> bool { + inner_config := read_config_from_installed_dependency(folder) ?? [] { + error_print("Failed to parse onyx-pkg.kdl in '{}'.\n", folder) + return #from_proc false + } + + if inner_config.tools->empty() do return true + + if !os.dir_exists(".onyx") { + if !os.dir_create(".onyx") { + error_print("Failed to create tool directory, '.onyx'.\n") + return false + } + } - target := os.path_join(config.dependency_binary_path, tprintf("{}{}", inner_config.native_library->unwrap(), native_library_suffix)); - inner_package_file := tprintf("{}/onyx-pkg.kdl", folder); + for Iterator.from(inner_config.tools) { + bc := it.value + bc = .{ + CompileOnyx = .{ + ..bc.CompileOnyx!, + target = tprintf(".onyx/{}.wasm", it.key), + working_dir = folder, + args = Array.make(.["--no-compiler-extensions"]) + } + } - target_stat, package_stat: os.FileStat; - if !os.file_stat(target, &target_stat) do return false; - if !os.file_stat(inner_package_file, &package_stat) do return false; + if !run_build_configuration(bc) { + error_print("Failed to install tool '{}' from '{}'.\n", it.key, inner_config.package_name) + return false + } + } - return target_stat.modified_time >= package_stat.modified_time; + return true } run_command_and_forward_output :: (cmd: str) => { - args := string.split(cmd, #char " ", context.temp_allocator); - prog := args[0]; - args = args[1 .. args.count]; + args := str.split(cmd, ' ', context.temp_allocator) + prog := args[0] + args = args[1 .. args.count] - run_proc := os.process_spawn(prog, args); - r := io.reader_make(&run_proc); + run_proc := os.process_spawn(prog, args) + r := io.reader_make(&run_proc) while !r->empty() { - line := r->read_line(consume_newline=true); - print(line); + line := r->read_line(consume_newline=true) + print(line) } - return os.process_wait(&run_proc); + return os.process_wait(&run_proc) } build_package_file_to_load :: () { if !os.dir_exists(config.dependency_source_path) { - os.dir_create(config.dependency_source_path); + os.dir_create(config.dependency_source_path) } - filepath := os.path_join(config.dependency_source_path, "packages.onyx"); + filepath := os.path_join(config.dependency_source_path, "packages.onyx") if os.file_exists(filepath) { - os.remove_file(filepath); + os.remove_file(filepath) } - for os.with_file(filepath, .Write) { - w := io.writer_make(it); - defer io.writer_free(&w); + use file := os.open(filepath, .Write).Ok? + use w := io.writer_make(&file) - io.write(&w, """ + io.write(&w, """ // // THIS FILE WAS AUTOMATICALLY GENERATED BY onyx pkg. // DO NOT MODIFY UNLESS YOU KNOW WHAT YOU ARE DOING. // // PACKAGE LOADING -"""); +""") - for config.dependencies->as_iter() { - dependency_repo := it.value.source.Git ?? [] { continue; }; - dependency_folder := strip_protocol_and_www_from_repo(dependency_repo); + for config.dependencies->as_iter() { + dependency_repo := it.value.source.Git ?? [] { continue; } + dependency_folder := strip_protocol_and_www_from_repo(dependency_repo) - io.write_format(&w, - "#load \"./{}/module.onyx\"\n", - dependency_folder); - } + io.write_format(&w, + "#load \"./{}/module.onyx\"\n", + dependency_folder) + } - io.write(&w, "\n\n// NATIVE LIBRARY PATH\n"); + io.write(&w, "\n\n// NATIVE LIBRARY PATH\n") - io.write_format(&w, "#library_path \"{}\"\n", config.dependency_binary_path); - } + io.write_format(&w, "#library_path \"{}\"\n", config.dependency_binary_path) } -#tag conv.Custom_Parse.{parse} -#tag conv.Custom_Format.{format} +@conv.Custom_Parse.{parse} +@conv.Custom_Format.{format} SemVer :: struct { - major, minor, patch: i32; + major, minor, patch: i32 format :: (output: &conv.Format_Output, formatting: &conv.Format, semver: &SemVer) { - conv.format(output, "{}.{}.{}", semver.major, semver.minor, semver.patch); + conv.format(output, "{}.{}.{}", semver.major, semver.minor, semver.patch) } parse :: (semver: &SemVer, to_parse_: str, _: Allocator) -> bool { - to_parse := to_parse_; + to_parse := to_parse_ - major := string.read_until(&to_parse, #char ".") |> conv.str_to_i64(); - string.advance(&to_parse); - minor := string.read_until(&to_parse, #char ".") |> conv.str_to_i64(); - string.advance(&to_parse); - patch := string.read_until(&to_parse, #char ".") |> conv.str_to_i64(); + major := str.read_until(&to_parse, '.') |> conv.str_to_i64() + str.advance(&to_parse) + minor := str.read_until(&to_parse, '.') |> conv.str_to_i64() + str.advance(&to_parse) + patch := str.read_until(&to_parse, '.') |> conv.str_to_i64() - if major == 0 && minor == 0 && patch == 0 do return false; + if major == 0 && minor == 0 && patch == 0 do return false - semver.major = ~~ major; - semver.minor = ~~ minor; - semver.patch = ~~ patch; - return true; + semver.major = ~~ major + semver.minor = ~~ minor + semver.patch = ~~ patch + return true } - is_zero :: (use this: SemVer) => major == 0 && minor == 0 && patch == 0; + is_zero :: (use this: SemVer) => major == 0 && minor == 0 && patch == 0 // -1 if a < b // 0 if a == b // 1 if a > b compare :: (a, b: SemVer) -> i32 { - if a.major != b.major do return math.sign(b.major - a.major); - if a.minor != b.minor do return math.sign(b.minor - a.minor); - return math.sign(b.patch - a.patch); + if a.major != b.major do return math.sign(b.major - a.major) + if a.minor != b.minor do return math.sign(b.minor - a.minor) + return math.sign(b.patch - a.patch) } - is_newer :: macro (from, to: SemVer) => from->compare(to) == -1; + is_newer :: macro (from, to: SemVer) => from->compare(to) == -1 is_compatible :: (from, to: SemVer) -> bool { - return from.major == to.major; + return from.major == to.major } bump_major :: (use this: &SemVer) { - major += 1; - minor = 0; - patch = 0; + major += 1 + minor = 0 + patch = 0 } bump_minor :: (use this: &SemVer) { - minor += 1; - patch = 0; + minor += 1 + patch = 0 } bump_patch :: (use this: &SemVer) { - patch += 1; + patch += 1 } } -#operator == macro (s1, s2: SemVer) => s1.major == s2.major && s1.minor == s2.minor && s1.patch == s2.patch; -#operator != macro (s1, s2: SemVer) => !(s1 == s2); +#operator == macro (s1, s2: SemVer) => s1.major == s2.major && s1.minor == s2.minor && s1.patch == s2.patch +#operator != macro (s1, s2: SemVer) => !(s1 == s2) Package :: struct { - source: DependencySource; - version: SemVer; + source: DependencySource + target: Dependency.Target } #if runtime.compiler_os == .Linux || runtime.compiler_os == .MacOS { @@ -976,363 +1181,491 @@ Git :: struct { get_full_repo_uri :: (package_search: str) -> str { for Known_Repositories { for proto in Protocols { - r := tprintf("{}{}", proto, tprintf(it, package_search)); - git_proc := os.process_spawn(git_path, .["ls-remote", "--tags", r]); + r := tprintf("{}{}", proto, tprintf(it, package_search)) + git_proc := os.process_spawn(git_path, .["ls-remote", "--tags", r]) if os.process_wait(&git_proc) == .Success { - return r |> string.alloc_copy(); + return r |> str.copy() } } } - return ""; + return "" } get_available_versions :: (repo: str) -> [] SemVer { - versions := make([..] SemVer); + versions := make([..] SemVer) - git_proc := os.process_spawn(git_path, .["ls-remote", "--tags", repo]); - r := io.reader_make(&git_proc); + git_proc := os.process_spawn(git_path, .["ls-remote", "--tags", repo]) + r := io.reader_make(&git_proc) for r->lines(inplace=true) { - last_slash := string.last_index_of(it, #char "/"); - tag_name := it[last_slash+1 .. it.count-1]; + last_slash := str.last_index_of(it, '/') + tag_name := it[last_slash+1 .. it.count-1] - if tag_name[0] != #char "v" do continue; - string.advance(&tag_name); + if tag_name[0] != 'v' do continue + str.advance(&tag_name) - version: SemVer; + version: SemVer if conv.parse_any(&version, tag_name) { - versions << version; + versions << version } } - os.process_wait(&git_proc); + os.process_wait(&git_proc) - return versions; + return versions } get_latest_version :: (repo: str) -> SemVer { - versions := get_available_versions(repo); + versions := get_available_versions(repo) if versions.count == 0 { - return .{0, 0, 0}; + return .{0, 0, 0} } - defer delete(&versions); + defer delete(&versions) - array.sort(versions, SemVer.compare); - latest := versions[0]; - return latest; + array.sort(versions, SemVer.compare) + latest := versions[0] + return latest } get_latest_compatible_version :: (repo: str, current_version: SemVer) -> SemVer { - versions := get_available_versions(repo); + versions := get_available_versions(repo) if versions.count == 0 { - return .{0, 0, 0}; + return .{0, 0, 0} } - defer delete(&versions); + defer delete(&versions) - array.sort(versions, SemVer.compare); + array.sort(versions, SemVer.compare) for versions { - if current_version->is_compatible(it) do return it; + if current_version->is_compatible(it) do return it } - return .{0, 0, 0}; + return .{0, 0, 0} } clone_version :: (repo: str, version: SemVer) -> bool { - info_print("Fetch", "{} {}\n", repo, version); + info_print("Fetch", "{} {}\n", repo, version) - version_str := tprintf("v{}", version); - temporary_dest := os.path_join(config.dependency_source_path, ".cloned"); + version_str := tprintf("v{}", version) + temporary_dest := os.path_join(config.dependency_source_path, ".cloned") - os.remove_directory(temporary_dest); + os.remove_directory(temporary_dest) successfully_cloned := do -> bool { for proto in Protocols { // Use 'git clone' to clone the bare minimum amount to get the released version. - proto_repo := tprintf("{}{}", proto, repo); - git_proc := os.process_spawn(git_path, .["clone", "--single-branch", "--depth", "1", "-b", version_str, proto_repo, temporary_dest]); - result := os.process_wait(&git_proc); + proto_repo := tprintf("{}{}", proto, repo) + git_proc := os.process_spawn(git_path, .["clone", "--single-branch", "--depth", "1", "-b", version_str, proto_repo, temporary_dest]) + result := os.process_wait(&git_proc) - if result == .Success do return true; + if result == .Success do return true } - return false; - }; + return false + } if successfully_cloned { - install_dest := strip_protocol_and_www_from_repo(repo); - - // Move the cloned repository to its permanent location. - actual_dest := os.path_join(config.dependency_source_path, install_dest); - if os.dir_exists(actual_dest) { - error_print("Expected {} to not exist when fetching '{}'.\n", actual_dest, repo); - os.remove_directory(temporary_dest); - return false; - } + return _move_to_permanent_storage(repo, temporary_dest) + } - rolling_parent := make(dyn_str); - path := string.split(actual_dest, os.PATH_SEP); - for path[0 .. path.length-1] { - string.append(&rolling_parent, it); - string.append(&rolling_parent, os.PATH_SEP); + return successfully_cloned + } - if !os.dir_exists(rolling_parent) { - os.dir_create(rolling_parent); - } + clone_branch :: (repo: str, branch: str) -> bool { + info_print("Fetch", "{} {}\n", repo, branch) + + temporary_dest := os.path_join(config.dependency_source_path, ".cloned") + os.remove_directory(temporary_dest) + + successfully_cloned := do -> bool { + for proto in Protocols { + // Use 'git clone' to clone the bare minimum amount to get the released version. + proto_repo := tprintf("{}{}", proto, repo) + git_proc := os.process_spawn(git_path, .["clone", "--single-branch", "--depth", "1", "-b", branch, proto_repo, temporary_dest]) + result := os.process_wait(&git_proc) + + if result == .Success do return true } - if !os.dir_rename(temporary_dest, actual_dest) { - error_print("Failed to move temporary package to final destination when fetching '{}'.\n", repo); - os.remove_directory(temporary_dest); - return false; + error_print("Failed to clone {}/{}\n", repo, branch) + return false + } + + if successfully_cloned { + return _move_to_permanent_storage(repo, temporary_dest, true) + } + + return successfully_cloned + } + + _move_to_permanent_storage :: (repo: str, temporary_dest: str, overwrite := false) -> bool { + install_dest := strip_protocol_and_www_from_repo(repo) + + // Move the cloned repository to its permanent location. + actual_dest := os.path_join(config.dependency_source_path, install_dest) + if os.dir_exists(actual_dest) { + if overwrite { + os.remove_directory(actual_dest) + + } else { + error_print("Expected {} to not exist when fetching '{}'.\n", actual_dest, repo) + os.remove_directory(temporary_dest) + return false } + } + + rolling_parent := make(dyn_str) + path := str.split(actual_dest, os.PATH_SEP) + for path[0 .. path.length-1] { + dyn_str.append(&rolling_parent, it) + dyn_str.append(&rolling_parent, os.PATH_SEP) - // Remove the .git folder, as it is unneeded. - unnecessary_git_dir := os.path_join(actual_dest, ".git"); - if !os.remove_directory(unnecessary_git_dir) { - error_print("Failed to delete .git folder of '{}'.\n", repo); - return false; + if !os.dir_exists(rolling_parent) { + os.dir_create(rolling_parent) } } - return successfully_cloned; + if !os.dir_rename(temporary_dest, actual_dest) { + error_print("Failed to move temporary package to final destination when fetching '{}'.\n", repo) + os.remove_directory(temporary_dest) + return false + } + + // Remove the .git folder, as it is unneeded. + unnecessary_git_dir := os.path_join(actual_dest, ".git") + if !os.remove_directory(unnecessary_git_dir) { + error_print("Failed to delete .git folder of '{}'.\n", repo) + return false + } + + return true } publish_version :: () -> bool { run_command :: macro (cmd: str, args: [] str) { - p := os.process_spawn(cmd, args); + p := os.process_spawn(cmd, args) if os.process_wait(&p) != .Success { - return false; + return false } } - run_command(git_path, .["add", global_arguments.config_file]); - run_command(git_path, .["commit", "-m", tprintf("version {}", config.package_version)]); - run_command(git_path, .["tag", tprintf("v{}", config.package_version)]); - run_command(git_path, .["push", "--tags"]); - run_command(git_path, .["push"]); - return true; + run_command(git_path, .["add", global_arguments.config_file]) + run_command(git_path, .["commit", "-m", tprintf("version {}", config.package_version)]) + run_command(git_path, .["tag", tprintf("v{}", config.package_version)]) + run_command(git_path, .["push", "--tags"]) + run_command(git_path, .["push"]) + return true } } -config: Config; +config: Config Config :: struct { - package_name: str; - package_description: str; - package_url: str; - package_author: str; - package_version: SemVer; - package_license: str; + package_name: str + package_description: str + package_url: str + package_author: str + package_version: SemVer + package_license: str + + dependency_source_path: str + dependency_binary_path: str - dependency_source_path: str; - dependency_binary_path: str; + native_library: ? str + native_library_build: ? str - native_library: ? str; - native_library_build: ? str; + build_configs: Map(str, BuildConfig) - build_configs: Map(str, BuildConfig); + tools: Map(str, BuildConfig) - dependencies: Map(str, Dependency); + dependencies: Map(str, Dependency) - _source_doc: ? kdl.Document; + _source_doc: ? kdl.Document } Dependency :: struct { - name: str; - version: SemVer; - source: DependencySource; + name: str + Target :: union { + version: SemVer + branch: str + } + target: Target + locked: bool + source: DependencySource } DependencySource :: union { - Unknown: void; - Git: str; + Unknown: void + Git: str } -BuildConfig :: struct { - include: [..] str; - args: [..] str; - defines: [..] str; - sources: [..] str; - runtime: str; - target: str; +BuildConfig :: union { + CompileOnyx: struct { + include: [..] str + args: [..] str + defines: [..] str + sources: [..] str + runtime: str + target: str + working_dir: str + } + RunCommands: [] [] str + Collection: [] str } load_config_file :: () -> bool { - _config := load_config(global_arguments.config_file); + _config := load_config(global_arguments.config_file) if !_config { - return false; + return false } - config = _config->unwrap(); - return true; + config = _config->unwrap() + return true } store_config_file :: () -> bool { - return store_config(global_arguments.config_file); + return store_config(global_arguments.config_file) } load_config :: (path: str) -> ? Config { - contents := os.get_contents(path); - if !contents do return .{}; + contents := os.get_contents(path) + if !contents do return .{} - defer delete(&contents); - doc := kdl.parse(contents).Ok?; + defer delete(&contents) + doc := kdl.parse(contents).Ok? - c: Config; - c._source_doc = doc; - c.dependency_source_path = "./lib"; - c.dependency_binary_path = "./bin"; + c: Config + c._source_doc = doc + c.dependency_source_path = "./lib" + c.dependency_binary_path = "./bin" doc->query("top() > package")->with([p] { - pack := p; + pack := p - load_string(pack, "name", &c.package_name); - load_string(pack, "author", &c.package_author); - load_string(pack, "description", &c.package_description); - load_string(pack, "url", &c.package_url); - load_string(pack, "license", &c.package_license); + load_string(pack, "name", &c.package_name) + load_string(pack, "author", &c.package_author) + load_string(pack, "description", &c.package_description) + load_string(pack, "url", &c.package_url) + load_string(pack, "license", &c.package_license) - version: str; - load_string(pack, "version", &version); + version: str + load_string(pack, "version", &version) - conv.parse_any(&c.package_version, version); - }); + conv.parse_any(&c.package_version, version) + }) doc->query("top() > config")->with([p] { - load_string(p, "dependency_source_path", &c.dependency_source_path); - load_string(p, "dependency_binary_path", &c.dependency_binary_path); - }); + load_string(p, "dependency_source_path", &c.dependency_source_path) + load_string(p, "dependency_binary_path", &c.dependency_binary_path) + }) doc->query("top() > native")->with([p] { - load_string(p, "library", &c.native_library); - load_string(p, "build", &c.native_library_build); - }); + load_string(p, "library", &c.native_library) + load_string(p, "build", &c.native_library_build) + }) - for doc->query_all("top() > build > []") { - b: BuildConfig; - b.runtime = "onyx"; - b.target = "out.wasm"; + for doc->query_all("top() > tools > []") { + sources: [..] str - load_string(it, "runtime", &b.runtime); - load_string(it, "target", &b.target); - - for it->query_all("include") { - array.concat(&b.include, - iter.as_iter(it.values)->flatten(x => (*x)->as_str())); + for it->query_all("source") { + Array.concat(&sources, + Iterator.from(it.values)->flatten(x => (*x)->as_str())) } - for it->query_all("define") { - array.concat(&b.defines, - iter.as_iter(it.values)->flatten(x => (*x)->as_str())); + c.tools[it.node] = .{ + CompileOnyx = .{ + sources = sources + } } + } - for it->query_all("args") { - array.concat(&b.args, - iter.as_iter(it.values)->flatten(x => (*x)->as_str())); - } + for doc->query_all("top() > build > []") { + b: BuildConfig - for it->query_all("source") { - array.concat(&b.sources, - iter.as_iter(it.values)->flatten(x => (*x)->as_str())); - } + kind := it->value_or_null()->as_str() ?? "compile" + switch kind { + case "compile" { + runtime := "onyx" + target := "out.wasm" + + include, defines, args, sources: [..] str + + load_string(it, "runtime", &runtime) + load_string(it, "target", &target) + + for it->query_all("include") { + array.concat(&include, + iter.as_iter(it.values)->flatten(x => (*x)->as_str())) + } + + for it->query_all("define") { + array.concat(&defines, + iter.as_iter(it.values)->flatten(x => (*x)->as_str())) + } + + for it->query_all("args") { + array.concat(&args, + iter.as_iter(it.values)->flatten(x => (*x)->as_str())) + } + + for it->query_all("source") { + array.concat(&sources, + iter.as_iter(it.values)->flatten(x => (*x)->as_str())) + } + + c.build_configs[it.node] = .{ + CompileOnyx = .{ + include, args, defines, sources, runtime, target, working_dir = "" + } + } + } + + case "shell" { + commands := it->query_all("run") + |> Iterator.map(x => { + return cast([] str, + Iterator.from(x.values) + |> Iterator.flatten(y => y.*->as_str()) + |> Iterator.collect() + ) + }) + |> Iterator.collect() + + c.build_configs[it.node] = .{ + RunCommands = commands + } + } + + case "collection" { + steps := it->query_all("build") + |> Iterator.flatten(x => x->value_or_null()->as_str()) + |> Iterator.collect() - c.build_configs[it.node] = b; + c.build_configs[it.node] = .{ + Collection = steps + } + } + } } for doc->query_all("top() > dependencies > []") { - d: Dependency; - d.name = it.node; - - version_str := it->value_or_null()->as_str() ?? ""; - conv.parse_any(&d.version, version_str); + d: Dependency + d.name = it.node + + if it.values.length == 0 { + it.props->get("branch")->with([branch] { + d.target = .{ branch = branch->as_str() ?? "" } + }) + + } else { + version_str := it->value_or_null()->as_str() ?? "" + version: SemVer + conv.parse_any(&version, version_str) + d.target = .{ version = version } + } it.props->get("git")->with([src] { - d.source = .{ Git = src->as_str() ?? "" }; - }); + d.source = .{ Git = src->as_str() ?? "" } + }) + + it.props->get("locked")->with([locked] { + d.locked = locked->as_bool() ?? false + }) - c.dependencies[d.name] = d; + c.dependencies[d.name] = d } - return c; + return c load_string :: (p: &kdl.Node, field: str, target: &$T) { p->query(field)->with([n] { n->value_or_null()->as_str()->with([s] { - *target = s; - }); - }); + *target = s + }) + }) } } store_config :: (path: str) -> bool { - doc := kdl.new_doc(); + doc := kdl.new_doc() - package_node := doc->create_node("package"); - doc.nodes << package_node; + package_node := doc->create_node("package") + doc.nodes << package_node { - name_node := doc->create_node("name"); - name_node->add_value(.{ String = config.package_name }); + name_node := doc->create_node("name") + name_node->add_value(.{ String = config.package_name }) - author_node := doc->create_node("author"); - author_node->add_value(.{ String = config.package_author }); + author_node := doc->create_node("author") + author_node->add_value(.{ String = config.package_author }) - url_node := doc->create_node("url"); - url_node->add_value(.{ String = config.package_url }); + url_node := doc->create_node("url") + url_node->add_value(.{ String = config.package_url }) - description_node := doc->create_node("description"); - description_node->add_value(.{ String = config.package_description }); + description_node := doc->create_node("description") + description_node->add_value(.{ String = config.package_description }) - version_node := doc->create_node("version"); - version_node->add_value(.{ String = tprintf("{}", config.package_version) }); + version_node := doc->create_node("version") + version_node->add_value(.{ String = tprintf("{}", config.package_version) }) - license_node := doc->create_node("license"); - license_node->add_value(.{ String = config.package_license }); + license_node := doc->create_node("license") + license_node->add_value(.{ String = config.package_license }) array.concat(&package_node.children, .[ name_node, author_node, url_node, description_node, version_node - ]); + ]) } - config_node := doc->create_node("config"); - doc.nodes << config_node; + config_node := doc->create_node("config") + doc.nodes << config_node { - source_path_node := doc->create_node("dependency_source_path"); - source_path_node->add_value(.{ String = config.dependency_source_path }); - config_node.children << source_path_node; + source_path_node := doc->create_node("dependency_source_path") + source_path_node->add_value(.{ String = config.dependency_source_path }) + config_node.children << source_path_node - binary_path_node := doc->create_node("dependency_binary_path"); - binary_path_node->add_value(.{ String = config.dependency_binary_path }); - config_node.children << binary_path_node; + binary_path_node := doc->create_node("dependency_binary_path") + binary_path_node->add_value(.{ String = config.dependency_binary_path }) + config_node.children << binary_path_node } if config.native_library { - native_node := doc->create_node("native"); - doc.nodes << native_node; + native_node := doc->create_node("native") + doc.nodes << native_node - library_node := doc->create_node("library"); - library_node->add_value(.{ String = config.native_library->unwrap() }); - native_node.children << library_node; + library_node := doc->create_node("library") + library_node->add_value(.{ String = config.native_library->unwrap() }) + native_node.children << library_node - build_node := doc->create_node("build"); - build_node->add_value(.{ String = config.native_library_build ?? "" }); - native_node.children << build_node; + build_node := doc->create_node("build") + build_node->add_value(.{ String = config.native_library_build ?? "" }) + native_node.children << build_node } if !config.dependencies->empty() { - dependency_node := doc->create_node("dependencies"); - doc.nodes << dependency_node; + dependency_node := doc->create_node("dependencies") + doc.nodes << dependency_node for config.dependencies->as_iter() { - dep_node := doc->create_node(it.key); - dependency_node.children << dep_node; + dep_node := doc->create_node(it.key) + dependency_node.children << dep_node - dep_node->add_value(.{ String = tprintf("{}", it.value.version) }); + switch it.value.target { + case .version as ver { + dep_node->add_value(.{ String = tprintf("{}", ver) }) + } + + case .branch as br { + dep_node.props->put("branch", .{ data = .{ String = br } }) + } + } switch it.value.source { case .Git as s { - dep_node.props["git"] = .{ data = .{ String = s } }; + dep_node.props["git"] = .{ data = .{ String = s } } } - case #default --- + case _ --- + } + + if it.value.locked { + dep_node.props["locked"] = .{ data = .{ Boolean = true } } } } } @@ -1340,230 +1673,226 @@ store_config :: (path: str) -> bool { config._source_doc->with([source] { for source->query_all("top() > []") { if !array.contains(str.["package", "config", "native", "dependencies"], it.node) { - doc.nodes << it; + doc.nodes << it } } - }); + }) - file := os.open(path, .Write)->or_return(false); - defer os.close(&file); + file := os.open(path, .Write)->or_return(false) + defer os.close(&file) - w := io.writer_make(&file); - defer io.writer_free(&w); + w := io.writer_make(&file) + defer io.writer_free(&w) - kdl.write(&doc, &w); + kdl.write(&doc, &w) - return true; + return true } // Old INI config code read_old_config :: (path: str) -> ? Config { - for os.with_file(path) { - r := io.reader_make(it); - defer io.reader_free(&r); + use file := os.open(path).Ok? + use r := io.reader_make(&file) - inner_config: IniConfig; - result, error := encoding.ini.parse_ini_file(&r, &inner_config); + inner_config: IniConfig + result, error := encoding.ini.parse_ini_file(&r, &inner_config) - if result != .Success { - return .{}; - } - - c: Config; + if result != .Success { + return .{} + } - c.package_name = inner_config.metadata.name; - c.package_description = inner_config.metadata.description; - c.package_url = inner_config.metadata.url; - c.package_author = inner_config.metadata.author; - c.package_version = inner_config.metadata.version; + c: Config - c.dependency_binary_path = inner_config.config.lib_bin_directory; - c.dependency_source_path = inner_config.config.lib_source_directory; + c.package_name = inner_config.metadata.name + c.package_description = inner_config.metadata.description + c.package_url = inner_config.metadata.url + c.package_author = inner_config.metadata.author + c.package_version = inner_config.metadata.version - if inner_config.native_library.library { - c.native_library = inner_config.native_library.library; - c.native_library_build = inner_config.native_library.build_cmd; - } + c.dependency_binary_path = inner_config.config.lib_bin_directory + c.dependency_source_path = inner_config.config.lib_source_directory - for inner_config.dependencies.dependencies.entries { - c.dependencies->put(it.key[string.last_index_of(it.key, '/')+1 .. it.key.length], .{ - name = it.key, - version = it.value, - source = .{ Git = it.key } - }); - } + if inner_config.native_library.library { + c.native_library = inner_config.native_library.library + c.native_library_build = inner_config.native_library.build_cmd + } - return c; + for inner_config.dependencies.dependencies.entries { + c.dependencies->put(it.key[str.last_index_of(it.key, '/')+1 .. it.key.length], .{ + name = it.key, + target = .{ version = it.value }, + source = .{ Git = it.key } + }) } + + return c } IniConfig :: struct { Metadata :: struct { - name: str; - description: str; - url: str; - author: str; - version: SemVer; + name: str + description: str + url: str + author: str + version: SemVer } - metadata: Metadata; + metadata: Metadata Config :: struct { - lib_source_directory: str = "./lib"; - lib_bin_directory: str = "./bin"; - run_cmd: str; - debug_cmd: str; - test_cmd: str; + lib_source_directory: str = "./lib" + lib_bin_directory: str = "./bin" + run_cmd: str + debug_cmd: str + test_cmd: str } - config: Config = .{}; + config: Config = .{} Native_Library :: struct { - build_cmd: str; - library: str; + build_cmd: str + library: str } - native_library: Native_Library; + native_library: Native_Library Dependencies :: struct { - dependencies: Map(str, SemVer); + dependencies: Map(str, SemVer) - parse_ini :: parse_dependencies; - write_ini :: write_dependencies; + parse_ini :: parse_dependencies + write_ini :: write_dependencies } - dependencies: Dependencies; + dependencies: Dependencies Dependency_Folders :: struct { // Dependency to folder - folders: Map(str, str); + folders: Map(str, str) - parse_ini :: parse_dependency_folders; - write_ini :: write_dependency_folders; + parse_ini :: parse_dependency_folders + write_ini :: write_dependency_folders } - dependency_folders: Dependency_Folders; + dependency_folders: Dependency_Folders } #local parse_dependencies :: (dependencies: &IniConfig.Dependencies, r: &io.Reader) -> bool { while true { - r->skip_whitespace(); - if r->is_empty() do return true; - if p, _ := r->peek_byte(); p == #char "[" do return true; + r->skip_whitespace() + if r->is_empty() do return true + if p, _ := r->peek_byte(); p == '[' do return true - dep := r->read_until(#char "=") |> string.strip_trailing_whitespace(); - r->read_byte(); - r->skip_whitespace(); + dep := r->read_until('=') |> str.strip_trailing_whitespace() + r->read_byte() + r->skip_whitespace() - version_str := r->read_until(#char "\n") |> string.strip_trailing_whitespace(); - version: SemVer; - conv.parse_any(&version, version_str); - dependencies.dependencies[dep] = version; + version_str := r->read_until('\n') |> str.strip_trailing_whitespace() + version: SemVer + conv.parse_any(&version, version_str) + dependencies.dependencies[dep] = version } - return true; + return true } #local write_dependencies :: (dependencies: &IniConfig.Dependencies, w: &io.Writer) -> bool { for& dependencies.dependencies.entries { - io.write_format(w, "{}={}\n", it.key, it.value); + io.write_format(w, "{}={}\n", it.key, it.value) } - return true; + return true } #local parse_dependency_folders :: (dependencies: &IniConfig.Dependency_Folders, r: &io.Reader) -> bool { while true { - r->skip_whitespace(); - if r->is_empty() do return true; - if p, _ := r->peek_byte(); p == #char "[" do return true; + r->skip_whitespace() + if r->is_empty() do return true + if p, _ := r->peek_byte(); p == '[' do return true - dep := r->read_until(#char "=") |> string.strip_trailing_whitespace(); - r->read_byte(); - r->skip_whitespace(); + dep := r->read_until('=') |> str.strip_trailing_whitespace() + r->read_byte() + r->skip_whitespace() - folder := r->read_until(#char "\n") |> string.strip_trailing_whitespace(); - dependencies.folders[dep] = folder; + folder := r->read_until('\n') |> str.strip_trailing_whitespace() + dependencies.folders[dep] = folder } - return true; + return true } #local write_dependency_folders :: (dependencies: &IniConfig.Dependency_Folders, w: &io.Writer) -> bool { for& dependencies.folders.entries { - io.write_format(w, "{}={}\n", it.key, it.value); + io.write_format(w, "{}={}\n", it.key, it.value) } - return true; + return true } load_old_config_file :: () -> bool { - file_data := os.get_contents(global_arguments.config_file); - if string.empty(file_data) { - return false; + file_data := os.get_contents(global_arguments.config_file) + if str.empty(file_data) { + return false } - reader, stream := io.reader_from_string(file_data); - defer cfree(stream); + reader, stream := io.reader_from_string(file_data) + defer cfree(stream) - result, error := encoding.ini.parse_ini_file(&reader, &config); + result, error := encoding.ini.parse_ini_file(&reader, &config) if result != .Success { - eprintf("{w5} | {}\n", error.line, error.msg); - return false; + eprintf("{w5} | {}\n", error.line, error.msg) + return false } - return true; + return true } store_old_config_file :: () -> bool { - for os.with_file(global_arguments.config_file, .Write) { - writer := io.writer_make(it); - defer io.writer_free(&writer); + use file := os.open(global_arguments.config_file, .Write)->or_return(false) + use writer := io.writer_make(&file) - return encoding.ini.write_ini_file(&writer, config); - } + return encoding.ini.write_ini_file(&writer, config) } Color_Print :: struct { Color :: enum { - Black; - Red; - Green; - Yellow; - Blue; - Purple; - Cyan; - White; - __Unused; - Default; - } - - color: Color; - text: str; + Black + Red + Green + Yellow + Blue + Purple + Cyan + White + __Unused + Default + } + + color: Color + text: str } color_print :: (segments: ..Color_Print) { for segments { - printf("\x1b[3{}m{}", cast(u32) it.color, it.text); + printf("\x1b[3{}m{}", cast(u32) it.color, it.text) } - print("\x1b[0m"); + print("\x1b[0m") } error_print :: (text: str, va: ..any) { - buf: [1024] u8; + buf: [1024] u8 color_print( .{ .Red, " Error " }, .{ .Default, conv.format_va(buf, text, cast([] any) va) } - ); + ) } info_print :: (verb: str, text: str, va: ..any) { - buf: [1024] u8; + buf: [1024] u8 // HACK - for 12 - cast(i32) verb.length do print(" "); + for 12 - cast(i32) verb.length do print(" ") color_print( .{ .Green, tprintf("{} ", verb) }, .{ .Default, conv.format_va(buf, text, cast([] any) va) } - ); + ) } diff --git a/scripts/run_tests.onyx b/scripts/run_tests.onyx index f8c7a50de..25ffc1cde 100644 --- a/scripts/run_tests.onyx +++ b/scripts/run_tests.onyx @@ -18,7 +18,7 @@ print_color :: (color: Color, format: str, args: ..any) { buffer: [2048] u8; output := conv.str_format_va(buffer, format, args); - if runtime.compiler_os == .Linux && !settings.no_color { + if (runtime.compiler_os == .Linux || runtime.compiler_os == .MacOS) && !settings.no_color { color_code: str; switch color { case .Red do color_code = "\x1b[91m"; @@ -26,7 +26,7 @@ print_color :: (color: Color, format: str, args: ..any) { case .Yellow do color_code = "\x1b[93m"; case .Blue do color_code ="\x1b[94m"; case .White do fallthrough; - case #default do color_code = "\x1b[97m"; + case _ do color_code = "\x1b[97m"; } printf("{}{}\x1b[0m", color_code, output); @@ -47,7 +47,7 @@ find_onyx_files :: (root: str, cases: &[..] Test_Case) { for os.list_directory(root) { path_buffer: [512] u8; if string.ends_with(it->name(), ".onyx") { - test_case := string.concat(path_buffer, root, "/", it->name()) |> string.alloc_copy(); + test_case := string.concat(path_buffer, root, "/", it->name()) |> string.copy(); expected_file := test_case[0 .. (test_case.count - 5)]; if !os.file_exists(expected_file) { @@ -129,7 +129,7 @@ main :: (args) => { cmd->args(.["build", it.source_file]); } else { printf("[{}] Running test {}...\n", context.thread_id, it.source_file); - cmd->args(.["run", it.source_file, "--generate-method-info"]); + cmd->args(.["run", "--generate-method-info", it.source_file]); } output := cmd->output(); @@ -153,19 +153,18 @@ main :: (args) => { program_output := output.Ok->unwrap(); - for expected_file in os.with_file(it.expected_file) { - expected_reader := io.reader_make(expected_file); - expected_output := io.read_all(&expected_reader); + use expected_file := os.open(it.expected_file)->unwrap(); + use expected_reader := io.reader_make(&expected_file); + expected_output := io.read_all(&expected_reader); - if program_output != expected_output { - print_color(.Red, "[{}] Output did not match for {}.\n", context.thread_id, it.source_file); - printf("Expected:\n{}\n", expected_output); - printf("Got:\n{}\n", program_output); - thread_data.at_least_one_test_failed = true; + if program_output != expected_output { + print_color(.Red, "[{}] Output did not match for {}.\n", context.thread_id, it.source_file); + printf("Expected:\n{}\n", expected_output); + printf("Got:\n{}\n", program_output); + thread_data.at_least_one_test_failed = true; - sync.critical_section(&thread_data.failed_tests_mutex) { - array.push(thread_data.failed_tests, it.source_file); - } + sync.critical_section(&thread_data.failed_tests_mutex) { + array.push(thread_data.failed_tests, it.source_file); } } } diff --git a/settings.sh b/settings.sh index b4307b7b2..d203f0205 100644 --- a/settings.sh +++ b/settings.sh @@ -8,8 +8,8 @@ export ONYX_CC='gcc' # The architecture of your system. If your not sure, leave this alone. export ONYX_ARCH="$(uname | tr '[:upper:]' '[:lower:]')_$(uname -m)" -# export ONYX_RUNTIME_LIBRARY="ovmwasm" -export ONYX_RUNTIME_LIBRARY="wasmer" +export ONYX_RUNTIME_LIBRARY="ovmwasm" +# export ONYX_RUNTIME_LIBRARY="wasmer" # Enable Dynamic call export ONYX_USE_DYNCALL=1 diff --git a/shared/include/bh.h b/shared/include/bh.h index 73979feab..582ba68b5 100644 --- a/shared/include/bh.h +++ b/shared/include/bh.h @@ -53,6 +53,8 @@ #if defined(_BH_DARWIN) #include + #include + #include #endif #include @@ -188,6 +190,14 @@ static inline const char* bh_num_suffix(u64 i) { } } +static inline u32 bh_clz(u32 i) { + #ifdef _BH_WINDOWS + return __lzcnt(i); + #else + return __builtin_clz(i); + #endif +} + @@ -357,6 +367,7 @@ b32 bh_str_ends_with(char* str, char* end); b32 bh_str_contains(char *str, char *needle); u32 bh_str_last_index_of(char *str, char needle); char* bh_strdup(bh_allocator a, char* str); +char* bh_strdup_len(bh_allocator a, char* str, i32 len); @@ -458,9 +469,18 @@ char* bh_path_get_full_name(char const* filename, bh_allocator a); char* bh_path_get_parent(char const* filename, bh_allocator a); char* bh_path_convert_separators(char* path); + +typedef struct bh_mapped_folder { + char *name; + char *folder; +} bh_mapped_folder; + // This function returns a volatile pointer. Do not store it without copying! // `included_folders` is bh_arr(const char *). -char* bh_lookup_file(char* filename, char* relative_to, char *suffix, b32 add_suffix, const char ** included_folders, b32 search_included_folders); +// 'mapped_folders' is bh_arr(bh_mapped_folder). +char* bh_lookup_file(char* filename, char* relative_to, char *suffix, const char ** included_folders, bh_mapped_folder* mapped_folders, bh_allocator allocator); + +char* bh_search_for_mapped_file(char* filename, char* relative_to, char *suffix, bh_mapped_folder* mapped_folders, bh_allocator allocator); #define bh_file_read_contents(allocator_, x) _Generic((x), \ bh_file*: bh_file_read_contents_bh_file, \ @@ -506,7 +526,7 @@ void bh_dir_close(bh_dir dir); -#ifdef _BH_LINUX +#if defined(_BH_LINUX) typedef struct bh_file_watch { int inotify_fd; int kill_pipe[2]; @@ -514,8 +534,14 @@ void bh_dir_close(bh_dir dir); fd_set fds; } bh_file_watch; #endif -#if defined(_BH_WINDOWS) || defined(_BH_DARWIN) - // TODO: Make these work on Windows and MacOS +#if defined(_BH_DARWIN) + typedef struct bh_file_watch { + int kqueue_fd; + struct kevent *listeners; + } bh_file_watch; +#endif +#if defined(_BH_WINDOWS) + // TODO: Make these work on Windows typedef u32 bh_file_watch; #endif @@ -649,6 +675,7 @@ typedef struct bh__arr { #define bh__arrhead(arr) (((bh__arr *)(arr)) - 1) #define bh_arr_allocator(arr) (arr ? bh__arrhead(arr)->allocator : BH_INTERNAL_ALLOCATOR) +#define bh_arr_allocator_assert(arr) (arr ? bh__arrhead(arr)->allocator : (assert(0 && "UNSET ALLOCATOR"), ((bh_allocator) {0}))) #define bh_arr_length(arr) (arr ? bh__arrhead(arr)->length : 0) #define bh_arr_capacity(arr) (arr ? bh__arrhead(arr)->capacity : 0) #define bh_arr_size(arr) (arr ? bh__arrhead(arr)->capacity * sizeof(*(arr)) : 0) @@ -677,6 +704,10 @@ typedef struct bh__arr { bh_arr_length(arr) + 1 > bh_arr_capacity(arr) ? bh__arr_grow(bh_arr_allocator(arr), (void **) &(arr), sizeof(*(arr)), bh_arr_length(arr) + 1) : 0, \ arr[bh__arrhead(arr)->length++] = value) +#define bh_arr_push_unsafe(arr, value) ( \ + bh_arr_length(arr) + 1 > bh_arr_capacity(arr) ? bh__arr_grow(bh_arr_allocator_assert(arr), (void **) &(arr), sizeof(*(arr)), bh_arr_length(arr) + 1) : 0, \ + arr[bh__arrhead(arr)->length++] = value) + #define bh_arr_set_at(arr, n, value) ( \ bh__arr_grow(bh_arr_allocator(arr), (void **) &(arr), sizeof(*(arr)), (n) + 1), \ bh_arr_set_length((arr), bh_max(bh_arr_length(arr), (i32) (n) + 1)), \ @@ -983,7 +1014,7 @@ ptr bh_alloc(bh_allocator a, isize size) { ptr bh_alloc_aligned(bh_allocator a, isize size, isize alignment) { ptr ret = a.proc(a.data, bh_allocator_action_alloc, size, alignment, NULL, 0); - if (ret) memset(ret, 0, size); + if (ret != 0) memset(ret, 0, size); return ret; } @@ -1578,6 +1609,24 @@ char* bh_strdup(bh_allocator a, char* str) { return buf; } +char* bh_strdup_len(bh_allocator a, char* str, i32 len) { + if (!str) return NULL; + + if (len < 0) { + len = strlen(str); + } + + char* buf = bh_alloc(a, len + 1); + + char* t = buf; + while (len-- > 0) { + *t++ = *str++; + } + + *t = '\0'; + return buf; +} + @@ -1896,10 +1945,12 @@ bh_file_contents bh_file_read_contents_bh_file(bh_allocator alloc, bh_file* file } bh_file_contents bh_file_read_contents_direct(bh_allocator alloc, const char* filename) { - bh_file file; - bh_file_open(&file, filename); - bh_file_contents fc = bh_file_read_contents(alloc, &file); - bh_file_close(&file); + bh_file file = {0}; + bh_file_contents fc = {0}; + if (bh_file_open(&file, filename) == BH_FILE_ERROR_NONE) { + fc = bh_file_read_contents(alloc, &file); + bh_file_close(&file); + } return fc; } @@ -2006,7 +2057,14 @@ char* bh_path_get_parent(char const* filename, bh_allocator a) { } // This function returns a volatile pointer. Do not store it without copying! -char* bh_lookup_file(char* filename, char* relative_to, char *suffix, b32 add_suffix, bh_arr(const char *) included_folders, b32 search_included_folders) { +char* bh_lookup_file( + char* filename, + char* relative_to, + char *suffix, + bh_arr(const char *) included_folders, + bh_arr(bh_mapped_folder) mapped_folders, + bh_allocator allocator +) { assert(relative_to != NULL); static char path[512]; @@ -2015,13 +2073,18 @@ char* bh_lookup_file(char* filename, char* relative_to, char *suffix, b32 add_su static char fn[256]; fori (i, 0, 256) fn[i] = 0; - if (!bh_str_ends_with(filename, suffix) && add_suffix) { + if (suffix && !bh_str_ends_with(filename, suffix)) { bh_snprintf(fn, 256, "%s%s", filename, suffix); } else { bh_snprintf(fn, 256, "%s", filename); } - fori (i, 0, 256) if (fn[i] == '/') fn[i] = DIR_SEPARATOR; + b32 contains_colon = 0; + + fori (i, 0, 256) { + if (fn[i] == ':') contains_colon = 1; + if (fn[i] == '/') fn[i] = DIR_SEPARATOR; + } if (bh_str_starts_with(filename, "./")) { if (relative_to[strlen(relative_to) - 1] != DIR_SEPARATOR) @@ -2029,23 +2092,133 @@ char* bh_lookup_file(char* filename, char* relative_to, char *suffix, b32 add_su else bh_snprintf(path, 512, "%s%s", relative_to, fn + 2); - if (bh_file_exists(path)) return bh_path_get_full_name(path, BH_INTERNAL_ALLOCATOR); + if (bh_file_exists(path)) return bh_path_get_full_name(path, allocator); return path; } - if (search_included_folders) { + if (contains_colon && mapped_folders) { + char *source_name = fn; + char *subpath = NULL; + + fori (i, 0, 256) { + if (fn[i] == ':') { + fn[i] = '\0'; + subpath = &fn[i + 1]; + break; + } + } + + assert(subpath); + + bh_arr_each(bh_mapped_folder, folder, mapped_folders) { + if (!strncmp(source_name, folder->name, 256)) { + if (folder->folder[strlen(folder->folder) - 1] != DIR_SEPARATOR) + bh_snprintf(path, 512, "%s%c%s", folder->folder, DIR_SEPARATOR, subpath); + else + bh_snprintf(path, 512, "%s%s", folder->folder, subpath); + + if (bh_file_exists(path)) + return bh_path_get_full_name(path, allocator); + + break; + } + } + } + + else if (included_folders) { bh_arr_each(const char *, folder, included_folders) { if ((*folder)[strlen(*folder) - 1] != DIR_SEPARATOR) bh_snprintf(path, 512, "%s%c%s", *folder, DIR_SEPARATOR, fn); else bh_snprintf(path, 512, "%s%s", *folder, fn); - if (bh_file_exists(path)) return bh_path_get_full_name(path, BH_INTERNAL_ALLOCATOR); + if (bh_file_exists(path)) return bh_path_get_full_name(path, allocator); } } - return fn; + return bh_path_get_full_name(fn, allocator); +} + +char* bh_search_for_mapped_file(char* filename, char* relative_to, char *suffix, bh_mapped_folder* mapped_folders, bh_allocator allocator) { + assert(relative_to != NULL); + + static char path[512]; + fori (i, 0, 512) path[i] = 0; + + static char fn[256]; + fori (i, 0, 256) fn[i] = 0; + + if (suffix && !bh_str_ends_with(filename, suffix)) { + bh_snprintf(fn, 256, "%s%s", filename, suffix); + } else { + bh_snprintf(fn, 256, "%s", filename); + } + + b32 contains_colon = 0; + + fori (i, 0, 256) { + if (fn[i] == ':') contains_colon = 1; + if (fn[i] == '/') fn[i] = DIR_SEPARATOR; + } + + // Absolute path + #ifdef _BH_WINDOWS + if (contains_colon && fn[1] == ':') { // Handle C:\... + if (bh_file_exists(fn)) { + return bh_path_get_full_name(fn, allocator); + } + } + #endif + + if (fn[0] == '/') { + if (bh_file_exists(fn)) { + return bh_path_get_full_name(fn, allocator); + } + } + + // mapped_folder:filename + if (contains_colon) { + char *source_name = fn; + char *subpath = NULL; + + fori (i, 0, 256) { + if (fn[i] == ':') { + fn[i] = '\0'; + subpath = &fn[i + 1]; + break; + } + } + + assert(subpath); + + bh_arr_each(bh_mapped_folder, folder, mapped_folders) { + if (!strncmp(source_name, folder->name, 256)) { + if (folder->folder[strlen(folder->folder) - 1] != DIR_SEPARATOR) + bh_snprintf(path, 512, "%s%c%s", folder->folder, DIR_SEPARATOR, subpath); + else + bh_snprintf(path, 512, "%s%s", folder->folder, subpath); + + if (bh_file_exists(path)) + return bh_path_get_full_name(path, allocator); + + break; + } + } + + return NULL; + } + + // Fallback to relative to, "relative_to" + if (relative_to[strlen(relative_to) - 1] != DIR_SEPARATOR) + bh_snprintf(path, 512, "%s%c%s", relative_to, DIR_SEPARATOR, fn); + else + bh_snprintf(path, 512, "%s%s", relative_to, fn); + + if (bh_file_exists(path)) + return bh_path_get_full_name(path, allocator); + + return NULL; } // @@ -2158,7 +2331,7 @@ void bh_dir_close(bh_dir dir) { #undef DIR_SEPARATOR -#ifdef _BH_LINUX +#if defined(_BH_LINUX) bh_file_watch bh_file_watch_new() { // TODO: Proper error checking @@ -2207,6 +2380,52 @@ void bh_file_watch_stop(bh_file_watch *w) { #endif // ifdef _BH_LINUX +#if defined(_BH_DARWIN) + +bh_file_watch bh_file_watch_new() { + bh_file_watch w; + + w.kqueue_fd = kqueue(); + + w.listeners = NULL; + bh_arr_new(bh_heap_allocator(), w.listeners, 4); + + return w; +} + +void bh_file_watch_free(bh_file_watch *w) { + bh_arr_each(struct kevent, ev, w->listeners) { + close(ev->ident); + } + + bh_arr_free(w->listeners); + close(w->kqueue_fd); +} + +void bh_file_watch_add(bh_file_watch *w, const char *filename) { + int new_fd = open(filename, O_EVTONLY); + + struct kevent new_event; + EV_SET(&new_event, new_fd, EVFILT_VNODE, EV_ADD | EV_ENABLE | EV_CLEAR, + NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, NULL); + + bh_arr_push(w->listeners, new_event); +} + +b32 bh_file_watch_wait(bh_file_watch *w) { + struct kevent events; + + int nev = kevent(w->kqueue_fd, w->listeners, bh_arr_length(w->listeners), &events, 1, NULL); + if (nev == -1) return 0; + + return 1; +} + +void bh_file_watch_stop(bh_file_watch *w) { +} + +#endif // ifdef _BH_DARWIN + #endif // ifndef BH_NO_FILE diff --git a/shared/include/onyx.h b/shared/include/onyx.h new file mode 100644 index 000000000..785a10e07 --- /dev/null +++ b/shared/include/onyx.h @@ -0,0 +1,182 @@ +#ifndef ONYX_H +#define ONYX_H + +#include + +#if defined(_MSC_VER) + #define API __declspec(dllexport) +#elif defined(__GNUC__) + #define API __attribute__((visibility("default"))) +#else + #define API + #pragma warning Unknown dynamic link import/export semantics. +#endif + + +typedef struct onyx_context_t onyx_context_t; + +typedef enum onyx_option_t { + ONYX_OPTION_NO_OP, + ONYX_OPTION_CURRENT_DIRECTORY, + + ONYX_OPTION_POST_MVP_FEATURES, + ONYX_OPTION_MULTI_THREADING, + + ONYX_OPTION_GENERATE_FOREIGN_INFO, + ONYX_OPTION_GENERATE_TYPE_INFO, + ONYX_OPTION_GENERATE_METHOD_INFO, + ONYX_OPTION_GENERATE_DEBUG_INFO, + ONYX_OPTION_GENERATE_STACK_TRACE, + ONYX_OPTION_GENERATE_NAME_SECTION, + ONYX_OPTION_GENERATE_SYMBOL_INFO, + ONYX_OPTION_GENERATE_LSP_INFO, + ONYX_OPTION_GENERATE_DOC_INFO, + ONYX_OPTION_DISABLE_CORE, + ONYX_OPTION_DISABLE_STALE_CODE, + + ONYX_OPTION_OPTIONAL_SEMICOLONS, + + ONYX_OPTION_DISABLE_FILE_CONTENTS, + ONYX_OPTION_DISABLE_EXTENSIONS, + + ONYX_OPTION_COLLECT_PERF, + + ONYX_OPTION_PLATFORM, +} onyx_option_t; + +typedef enum onyx_pump_t { + ONYX_PUMP_CONTINUE, + ONYX_PUMP_DONE, + ONYX_PUMP_ERRORED, +} onyx_pump_t; + +typedef enum onyx_platform_t { + ONYX_PLATFORM_ONYX = 1, + ONYX_PLATFORM_WASI = 2, + ONYX_PLATFORM_JS = 3, + ONYX_PLATFORM_CUSTOM = 4, +} onyx_platform_t; + +typedef enum onyx_error_t { + ONYX_ERROR_WARNING = 2, + ONYX_ERROR_WAITING = 3, + ONYX_ERROR_CRITICAL = 4, + ONYX_ERROR_CLI = 5, +} onyx_error_t; + +typedef enum onyx_output_type_t { + ONYX_OUTPUT_TYPE_WASM = 0, + ONYX_OUTPUT_TYPE_JS = 1, + ONYX_OUTPUT_TYPE_ODOC = 2, + ONYX_OUTPUT_TYPE_OSYM = 3, +} onyx_output_type_t; + +typedef enum onyx_stat_t { + ONYX_STAT_FILE_COUNT = 1, + ONYX_STAT_LINE_COUNT = 2, + ONYX_STAT_TOKEN_COUNT = 3, +} onyx_stat_t; + +typedef enum onyx_event_type_t { + ONYX_EVENT_UNKNOWN = 0, + ONYX_EVENT_LOG = 1, + ONYX_EVENT_PHASE_START = 2, + ONYX_EVENT_SYMBOL_DEFINED = 3, + ONYX_EVENT_ALL_TYPES_CHECKED = 4, +} onyx_event_type_t; + + +// +// Metadata +// + +API int32_t onyx_version_major(); +API int32_t onyx_version_minor(); +API int32_t onyx_version_patch(); +API char *onyx_version_suffix(); +API char *onyx_version_build_time(); +API char *onyx_version_runtime(); + + + +// +// Lifecycle +// + +API onyx_context_t *onyx_context_create(); +API void onyx_context_free(onyx_context_t *ctx); + +/// Call after all options have been set and before the first `onyx_pump`. +API void onyx_options_ready(onyx_context_t *ctx); +API onyx_pump_t onyx_pump(onyx_context_t *ctx); + + +// +// Events +// + +API int32_t onyx_event_count(onyx_context_t *ctx); +API onyx_event_type_t onyx_event_type(onyx_context_t *ctx, int event_idx); +API int32_t onyx_event_field_int(onyx_context_t *ctx, int event_idx, char *field); +API const char *onyx_event_field_str(onyx_context_t *ctx, int event_idx, char *field); + + +// +// Options +// +API int32_t onyx_set_option_cstr(onyx_context_t *ctx, onyx_option_t opt, char *value); +API int32_t onyx_set_option_bytes(onyx_context_t *ctx, onyx_option_t opt, char *value, int32_t length); +API int32_t onyx_set_option_int(onyx_context_t *ctx, onyx_option_t opt, int32_t value); +API void onyx_add_defined_var(onyx_context_t *ctx, char *variable, int32_t variable_length, char *value, int32_t value_length); + +// +// Loading code +// + +/// Adds a file to the compilation, following typical `#load` rules. +/// 1. `foo:file.onyx` will search in the `foo` mapped folder. +/// 2. `file.onyx` will search in the current directory for `file.onyx`. +API void onyx_include_file(onyx_context_t *ctx, char *filename, int32_t length); +API void onyx_add_mapped_dir(onyx_context_t *ctx, char *mapped_name, int32_t mapped_length, char *dir, int32_t dir_length); + +/// Directly injects Onyx code as a new compilation unit +API void onyx_inject_code(onyx_context_t *ctx, uint8_t *code, int32_t length); + +// +// Errors +// + +API int32_t onyx_error_count(onyx_context_t *ctx); +API const char *onyx_error_message(onyx_context_t *ctx, int32_t error_idx); +API const char *onyx_error_filename(onyx_context_t *ctx, int32_t error_idx); +API int32_t onyx_error_line(onyx_context_t *ctx, int32_t error_idx); +API int32_t onyx_error_column(onyx_context_t *ctx, int32_t error_idx); +API int32_t onyx_error_length(onyx_context_t *ctx, int32_t error_idx); +API int32_t onyx_error_line_text(onyx_context_t *ctx, int32_t error_idx, char *line_buffer, int max_length); +API onyx_error_t onyx_error_rank(onyx_context_t *ctx, int32_t error_idx); + + +// +// Code generation +// + +API int32_t onyx_output_length(onyx_context_t *ctx, onyx_output_type_t type); +API void onyx_output_write(onyx_context_t *ctx, onyx_output_type_t type, void *buffer); + +// +// Compilation Info +// + +API int64_t onyx_stat(onyx_context_t *ctx, onyx_stat_t stat); +API const char *onyx_stat_filepath(onyx_context_t *ctx, int32_t file_index); + + +// +// Running WASM +// + +API void onyx_run_wasm(void *buffer, int32_t buffer_length, int argc, char **argv); +API void onyx_run_wasm_with_debug(void *buffer, int32_t buffer_length, int argc, char **argv, char *socket_path); + +#endif + diff --git a/shared/lib/darwin_arm64/lib/libovmwasm.a b/shared/lib/darwin_arm64/lib/libovmwasm.a deleted file mode 100644 index 515623ffb..000000000 Binary files a/shared/lib/darwin_arm64/lib/libovmwasm.a and /dev/null differ diff --git a/tests/any_tests b/tests/any_tests new file mode 100644 index 000000000..bbeb4b963 --- /dev/null +++ b/tests/any_tests @@ -0,0 +1,4 @@ +Foo { name = "Test", numbers = [ 2, 3, 5, 7, 11 ] } +Foo { name = "Test", numbers = [ 2, 3, 5, 7, 11 ] } +DELAYED: Foo { name = "Test", numbers = [ 2, 3, 5, 7, 11 ] } +DELAYED: works diff --git a/tests/any_tests.onyx b/tests/any_tests.onyx new file mode 100644 index 000000000..f56566ba9 --- /dev/null +++ b/tests/any_tests.onyx @@ -0,0 +1,39 @@ +use core {*} + +Foo :: struct { + name: str; + numbers: [] i32; +} + +print_anys :: (args: ..any) { + for arg in args { + stdio.print_writer->write_format_va("{}\n", .[ arg ]); + } + + stdio.stream->flush(); +} + +delayed_print_anys :: (args: ..any) -> (#type () -> void) { + copied_args := misc.any_deep_copy(cast([] any) args); + + return () use (copied_args) { + for arg in copied_args { + printf("DELAYED: {a}\n", arg); + } + }; +} + +main :: () { + f := Foo.{ + "Test", + Slice.copy(.[2, 3, 5, 7, 11]) + }; + + println(f); + + print_anys(f); + + func := delayed_print_anys(f, "works"); + func(); +} + diff --git a/tests/aoc-2020/day1.onyx b/tests/aoc-2020/day1.onyx index 82da7edd9..e9bfa9bce 100644 --- a/tests/aoc-2020/day1.onyx +++ b/tests/aoc-2020/day1.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core use core.io diff --git a/tests/aoc-2020/day10.onyx b/tests/aoc-2020/day10.onyx index 464be807e..8934f655b 100644 --- a/tests/aoc-2020/day10.onyx +++ b/tests/aoc-2020/day10.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/aoc-2020/day11.onyx b/tests/aoc-2020/day11.onyx index 24d6e940b..9ffe8b577 100644 --- a/tests/aoc-2020/day11.onyx +++ b/tests/aoc-2020/day11.onyx @@ -74,11 +74,11 @@ main :: (args: [] cstr) { gos.height = 0; while !string.empty(file) { - line, file~ := string.bisect(file, #char "\n"); + line, file~ := string.bisect(file, '\n'); for ch in line do switch ch { - case #char "." do array.push(&gos.seats, SeatState.Floor); - case #char "L" do array.push(&gos.seats, SeatState.Empty); - case #char "#" do array.push(&gos.seats, SeatState.Occupied); + case '.' do array.push(&gos.seats, SeatState.Floor); + case 'L' do array.push(&gos.seats, SeatState.Empty); + case '#' do array.push(&gos.seats, SeatState.Occupied); } gos.width = line.count; diff --git a/tests/aoc-2020/day12.onyx b/tests/aoc-2020/day12.onyx index 08dcaf0b1..48856c0e3 100644 --- a/tests/aoc-2020/day12.onyx +++ b/tests/aoc-2020/day12.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -45,22 +45,22 @@ main :: (args: [] cstr) { string.advance_line(&file); switch dir { - case #char "N" do ship.fy -= val; - case #char "E" do ship.fx += val; - case #char "S" do ship.fy += val; - case #char "W" do ship.fx -= val; - case #char "F" { + case 'N' do ship.fy -= val; + case 'E' do ship.fx += val; + case 'S' do ship.fy += val; + case 'W' do ship.fx -= val; + case 'F' { ship.x += ship.fx * val; ship.y += ship.fy * val; } - case #char "L" do switch val { + case 'L' do switch val { case 90 do rotate_left(&ship); case 180 do turn_around(&ship); case 270 do rotate_right(&ship); } - case #char "R" do switch val { + case 'R' do switch val { case 90 do rotate_right(&ship); case 180 do turn_around(&ship); case 270 do rotate_left(&ship); diff --git a/tests/aoc-2020/day13.onyx b/tests/aoc-2020/day13.onyx index 101037a45..89d7cdede 100644 --- a/tests/aoc-2020/day13.onyx +++ b/tests/aoc-2020/day13.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -51,7 +51,7 @@ main :: (args: [] cstr) { offset: i64 = 0; while !string.empty(file) { - if file.data[0] == #char "x" { + if file.data[0] == 'x' { string.advance(&file, 2); } else { bus := conv.parse_int(&file); diff --git a/tests/aoc-2020/day14.onyx b/tests/aoc-2020/day14.onyx index aa435d0d6..f32c59782 100644 --- a/tests/aoc-2020/day14.onyx +++ b/tests/aoc-2020/day14.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -30,20 +30,20 @@ BitmaskIter :: struct { } bitmask_p2 :: (mask: Bitmask, val: u64) -> Iterator(u64) { - iterator_next :: (data: rawptr) -> (u64, bool) { + iterator_next :: (data: rawptr) -> ? u64 { bmi := cast(&BitmaskIter) data; - if bmi.done do return 0, false; + if bmi.done do return .None; for ind in bmi.floating_indicies { is_set := (bmi.val & (1 << cast(u64) ind)) != 0; bmi.val ^= 1 << cast(u64) ind; - if !is_set do return bmi.val, true; + if !is_set do return bmi.val; } bmi.done = true; - return bmi.val, true; + return bmi.val; } iterator_close :: (data: rawptr) { @@ -89,12 +89,12 @@ main :: (args: [] cstr) { string.advance(&file, 3); i := 35; - m, file~ := string.bisect(file, #char "\n"); + m, file~ := string.bisect(file, '\n'); for ch in m { switch ch { - case #char "0" do mask[i] = 0; - case #char "1" do mask[i] = 1; - case #char "X" do mask[i] = 2; + case '0' do mask[i] = 0; + case '1' do mask[i] = 1; + case 'X' do mask[i] = 2; } i -= 1; diff --git a/tests/aoc-2020/day15.onyx b/tests/aoc-2020/day15.onyx index 8f8b303e3..483c32f6b 100644 --- a/tests/aoc-2020/day15.onyx +++ b/tests/aoc-2020/day15.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/aoc-2020/day16.onyx b/tests/aoc-2020/day16.onyx index 6a4c3790d..0eae12f48 100644 --- a/tests/aoc-2020/day16.onyx +++ b/tests/aoc-2020/day16.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -26,7 +26,7 @@ read_ticket_and_validate :: (file: &str, fields: [..] Field, ticket_store: [&] u n := cast(u32, conv.parse_int(file)); ticket_store[i] = n; - if file.data[0] == #char "," do string.advance(file, 1); + if file.data[0] == ',' do string.advance(file, 1); valid_count := 0; for &field in fields { @@ -52,9 +52,9 @@ main :: (args: [] cstr) { defer array.free(&fields); // Read until the first empty line - while file.data[0] != #char "\n" { + while file.data[0] != '\n' { field := Field.{}; - field.name, file = string.bisect(file, #char ":"); + field.name, file = string.bisect(file, ':'); string.advance(&file, 1); field.lower0 = ~~ conv.parse_int(&file); diff --git a/tests/aoc-2020/day17.onyx b/tests/aoc-2020/day17.onyx index f30695a0b..4024167f4 100644 --- a/tests/aoc-2020/day17.onyx +++ b/tests/aoc-2020/day17.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {package, *} @@ -14,22 +14,20 @@ CubePos :: struct { x, y, z, w : i32; } -#inject CubePos { - hash :: (c: CubePos) => { - hash: u32 = 7; - hash += hash << 5 + core.hash.to_u32(c.x); - hash += hash << 5 + core.hash.to_u32(c.y); - hash += hash << 5 + core.hash.to_u32(c.z); - hash += hash << 5 + core.hash.to_u32(c.w); - return hash; - } +CubePos.hash :: (c: CubePos) => { + hash: u32 = 7; + hash += hash << 5 + core.hash.to_u32(c.x); + hash += hash << 5 + core.hash.to_u32(c.y); + hash += hash << 5 + core.hash.to_u32(c.z); + hash += hash << 5 + core.hash.to_u32(c.w); + return hash; +} - equals :: (a, b: CubePos) => { - return (a.x == b.x) && - (a.y == b.y) && - (a.z == b.z) && - (a.w == b.w); - } +CubePos.equals :: (a, b: CubePos) => { + return (a.x == b.x) && + (a.y == b.y) && + (a.z == b.z) && + (a.w == b.w); } @@ -62,11 +60,11 @@ main :: (args: [] cstr) { z := 0; while !string.empty(file) { - line, file~ := string.bisect(file, #char "\n"); + line, file~ := string.bisect(file, '\n'); x := 0; for ch in line { - if ch == #char "#" do map.put(&cubes, .{ x, 0, z, 0 }, .{ alive = true }); + if ch == '#' do map.put(&cubes, .{ x, 0, z, 0 }, .{ alive = true }); x += 1; } diff --git a/tests/aoc-2020/day18.onyx b/tests/aoc-2020/day18.onyx index 87c7b5d37..4415d1b4f 100644 --- a/tests/aoc-2020/day18.onyx +++ b/tests/aoc-2020/day18.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -6,11 +6,11 @@ parse_factor :: (file: &str) -> u64 { string.strip_leading_whitespace(file); switch file.data[0] { - case #char "0" .. #char "9" { + case '0' ..= '9' { return conv.parse_int(file); } - case #char "(" { + case '(' { string.advance(file, 1); value := parse_expression_mul(file); @@ -31,7 +31,7 @@ parse_expression_add :: (file: &str) -> u64 { left := parse_factor(file); string.strip_leading_whitespace(file); - while file.data[0] == #char "+" { + while file.data[0] == '+' { op := file.data[0]; string.advance(file, 1); @@ -51,7 +51,7 @@ parse_expression_mul :: (file: &str) -> u64 { left := parse_expression_add(file); string.strip_leading_whitespace(file); - while file.data[0] == #char "*" { + while file.data[0] == '*' { op := file.data[0]; string.advance(file, 1); diff --git a/tests/aoc-2020/day19.onyx b/tests/aoc-2020/day19.onyx index 8fe239225..e357c3a23 100644 --- a/tests/aoc-2020/day19.onyx +++ b/tests/aoc-2020/day19.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -113,12 +113,12 @@ main :: (args: [] cstr) { grammar_init(&grammar); defer grammar_free(&grammar); - while file[0] != #char "\n" { + while file[0] != '\n' { nt0 := cast(u32, conv.parse_int(&file)); string.advance(&file, 2); // ': ' - if file[0] == #char "\"" { + if file[0] == '"' { string.advance(&file, 1); // '"' t := file[0]; string.advance(&file, 1); @@ -129,23 +129,23 @@ main :: (args: [] cstr) { while true { nt1 := cast(u32, conv.parse_int(&file)); - if file[0] == #char "\n" { + if file[0] == '\n' { array.push(&grammar.unit_rules, Unit.{ nt0, nt1 }); break; } else { string.advance(&file, 1); // ' ' - if next_ch := file[0]; next_ch >= #char "0" && next_ch <= #char "9" { + if next_ch := file[0]; next_ch >= '0' && next_ch <= '9' { nt2 := cast(u32, conv.parse_int(&file)); array.push(&grammar.production_rules, Prod.{ nt0, nt1, nt2 }); - if file[0] == #char " " do string.advance(&file, 1); + if file[0] == ' ' do string.advance(&file, 1); } else { array.push(&grammar.unit_rules, Unit.{ nt0, nt1 }); } - if file[0] == #char "|" { + if file[0] == '|' { string.advance(&file, 1); // ' |' } else { break; @@ -162,7 +162,7 @@ main :: (args: [] cstr) { valid_count := 0; string.advance_line(&file); while !string.empty(file) { - line, file~ := string.bisect(file, #char "\n"); + line, file~ := string.bisect(file, '\n'); if cyk_algorithm(&grammar, line) do valid_count += 1; } diff --git a/tests/aoc-2020/day2.onyx b/tests/aoc-2020/day2.onyx index 393f2260b..9de8bd768 100644 --- a/tests/aoc-2020/day2.onyx +++ b/tests/aoc-2020/day2.onyx @@ -1,6 +1,6 @@ package main -#load "core/module" + use core {*} diff --git a/tests/aoc-2020/day20.onyx b/tests/aoc-2020/day20.onyx index 979f27856..9ac597d97 100644 --- a/tests/aoc-2020/day20.onyx +++ b/tests/aoc-2020/day20.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -189,7 +189,7 @@ index_square_with_orientation :: (data: [&] $T, ori: TO, size: i32, x: i32, y: i case TO.FR90 => &data[y + x * size]; case TO.FR180 => &data[(size - 1 - x) + y * size]; case TO.FR270 => &data[(size - 1 - y) + (size - 1 - x) * size]; - case #default => null + case _ => null }; } @@ -210,8 +210,8 @@ scan_for_monsters :: (forest: [&] u8, ori: TO, width: u32, height: u32) -> bool for my in 0 .. sea_monster_height { for mx in 0 .. sea_monster_width { - if sea_monster[mx + my * sea_monster_width] != #char "#" do continue; - if *index_square_with_orientation(forest, ori, width, x + mx, y + my) != #char "." do continue; + if sea_monster[mx + my * sea_monster_width] != '#' do continue; + if *index_square_with_orientation(forest, ori, width, x + mx, y + my) != '.' do continue; is_monster = false; break break; @@ -221,10 +221,10 @@ scan_for_monsters :: (forest: [&] u8, ori: TO, width: u32, height: u32) -> bool if is_monster { for my in 0 .. sea_monster_height { for mx in 0 .. sea_monster_width { - if sea_monster[mx + my * sea_monster_width] != #char "#" do continue; - if *index_square_with_orientation(forest, ori, width, x + mx, y + my) != #char "#" do continue; + if sea_monster[mx + my * sea_monster_width] != '#' do continue; + if *index_square_with_orientation(forest, ori, width, x + mx, y + my) != '#' do continue; - *index_square_with_orientation(forest, ori, width, x + mx, y + my) = #char "o"; + *index_square_with_orientation(forest, ori, width, x + mx, y + my) = 'o'; } } @@ -266,10 +266,10 @@ main :: (args: [] cstr) { td := cast([&] bool) raw_alloc(tile_allocator, sizeof TileData); for y in 0 .. 10 { - line, file~ := string.bisect(file, #char "\n"); + line, file~ := string.bisect(file, '\n'); for x in 0 .. 10 { - td[x + y * TILE_DATA_WIDTH] = (line[x] == #char "#"); + td[x + y * TILE_DATA_WIDTH] = (line[x] == '#'); } } @@ -353,8 +353,8 @@ main :: (args: [] cstr) { for fx in 0 .. 8 { res := *index_square_with_orientation(cast([&] bool) tile.data.data, tile.orientation, 10, fx + 1, fy + 1); loc := (y * 12 * 8 * 8) + (fy * 12 * 8) + (x * 8) + fx; - if res do forest[loc] = #char "#"; - else do forest[loc] = #char "."; + if res do forest[loc] = '#'; + else do forest[loc] = '.'; } } } @@ -365,7 +365,7 @@ main :: (args: [] cstr) { } safe_count := 0; - for c in forest do if c == #char "#" do safe_count += 1; + for c in forest do if c == '#' do safe_count += 1; printf("Safe count: {}\n", safe_count); } diff --git a/tests/aoc-2020/day21.onyx b/tests/aoc-2020/day21.onyx index d5b733e0b..0097caa1e 100644 --- a/tests/aoc-2020/day21.onyx +++ b/tests/aoc-2020/day21.onyx @@ -1,12 +1,12 @@ -#load "core/module" + use core {*} /* - What questions the data layout needs to answer easily: - 1. What are the lists that this item appears on? - 2. What allergens are on each list? - 3. What are the lists that each allergen appears on? + What questions the data layout needs to answer easily: + 1. What are the lists that this item appears on? + 2. What allergens are on each list? + 3. What are the lists that each allergen appears on? */ Ingredient :: struct { @@ -18,7 +18,7 @@ Ingredient :: struct { } Allergen :: struct { - name : str = .{ null, 0 }; + name : str = .{ null, 0 }; appears_on : [..] u32 = .{ null, 0, 0, .{ null, null_proc } }; } @@ -31,9 +31,9 @@ ingredient_map : map.Map(str, Ingredient); allergen_map : map.Map(str, Allergen); main :: (args: [] cstr) { - contents := #file_contents "./input/day21.txt"; + contents := #file_contents "./input/day21.txt"; - file := contents; + file := contents; map.init(&ingredient_map); map.init(&allergen_map); @@ -46,12 +46,12 @@ main :: (args: [] cstr) { defer array.free(&foods); line_num := 0; - while !string.empty(file) { + while !string.empty(file) { food : Food; array.init(&food.ingredients, 16); array.init(&food.allergens); - while file[0] != #char "(" { + while file[0] != '(' { ingredient_name := string.read_alphanum(&file); string.advance(&file, 1); // ' ' @@ -69,9 +69,9 @@ main :: (args: [] cstr) { string.advance(&file, 10); // '(contains ' - while file[0] != #char ")" { + while file[0] != ')' { allergen_name := string.read_alphanum(&file); - if file[0] == #char "," do string.advance(&file, 2); // ', ' + if file[0] == ',' do string.advance(&file, 2); // ', ' array.push(&food.allergens, allergen_name); @@ -89,7 +89,7 @@ main :: (args: [] cstr) { string.advance_line(&file); line_num += 1; - } + } definitely_safe := array.make(str); defer array.free(&definitely_safe); diff --git a/tests/aoc-2020/day22.onyx b/tests/aoc-2020/day22.onyx index 5ecff2746..6054c571a 100644 --- a/tests/aoc-2020/day22.onyx +++ b/tests/aoc-2020/day22.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -110,7 +110,7 @@ main :: (args: [] cstr) { defer array.free(&player2); string.advance_line(&file); // 'Player 1:' - while file[0] != #char "\n" { + while file[0] != '\n' { card := cast(u32, conv.parse_int(&file)); array.push(&player1, card); diff --git a/tests/aoc-2020/day23.onyx b/tests/aoc-2020/day23.onyx index 71b3a4e79..2b0df480a 100644 --- a/tests/aoc-2020/day23.onyx +++ b/tests/aoc-2020/day23.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/aoc-2020/day24.onyx b/tests/aoc-2020/day24.onyx index e187343b9..14dd614ef 100644 --- a/tests/aoc-2020/day24.onyx +++ b/tests/aoc-2020/day24.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -41,24 +41,24 @@ main :: (args: [] cstr) { s := 0; for ch in line do switch s { case 0 do switch ch { - case #char "e" do loc.x += 1; - case #char "w" do loc.x -= 1; - case #char "n" do s = 1; - case #char "s" do s = 2; + case 'e' do loc.x += 1; + case 'w' do loc.x -= 1; + case 'n' do s = 1; + case 's' do s = 2; } case 1 { switch ch { - case #char "e" { loc.x += 1; loc.y -= 1; } - case #char "w" { loc.y -= 1; } + case 'e' { loc.x += 1; loc.y -= 1; } + case 'w' { loc.y -= 1; } } s = 0; } case 2 { switch ch { - case #char "e" { loc.y += 1; } - case #char "w" { loc.x -= 1; loc.y += 1; } + case 'e' { loc.y += 1; } + case 'w' { loc.x -= 1; loc.y += 1; } } s = 0; } diff --git a/tests/aoc-2020/day25.onyx b/tests/aoc-2020/day25.onyx index 047dce91e..178988684 100644 --- a/tests/aoc-2020/day25.onyx +++ b/tests/aoc-2020/day25.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/aoc-2020/day3.onyx b/tests/aoc-2020/day3.onyx index 5b46bbdab..33140f4da 100644 --- a/tests/aoc-2020/day3.onyx +++ b/tests/aoc-2020/day3.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -24,7 +24,7 @@ main :: (args: [] cstr) { width := 0; height := 0; while true { - line := string.read_until(&contents, #char "\n"); + line := string.read_until(&contents, '\n'); string.advance(&contents, 1); if line.count == 0 do break; @@ -53,7 +53,7 @@ main :: (args: [] cstr) { p.x %= width; - if forest[p.x + p.y * width] == #char "#" do tree_count += 1; + if forest[p.x + p.y * width] == '#' do tree_count += 1; } tree_prod *= tree_count; diff --git a/tests/aoc-2020/day4.onyx b/tests/aoc-2020/day4.onyx index aa1cee3c3..0d9650eeb 100644 --- a/tests/aoc-2020/day4.onyx +++ b/tests/aoc-2020/day4.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -7,15 +7,15 @@ process_passport :: (contents: &str) -> u32 { field_count := 0; while true { - line := string.read_until(contents, #char "\n"); + line := string.read_until(contents, '\n'); string.advance(contents, 1); if line.count == 0 do break; - fields := string.split(line, #char " "); + fields := string.split(line, ' '); defer cfree(fields.data); for field in fields { - data := string.split(field, #char ":"); + data := string.split(field, ':'); defer cfree(data.data); if !string.equal(data[0], "cid") { diff --git a/tests/aoc-2020/day5.onyx b/tests/aoc-2020/day5.onyx index d7e2b5390..95d5d33f7 100644 --- a/tests/aoc-2020/day5.onyx +++ b/tests/aoc-2020/day5.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -11,14 +11,14 @@ main :: (args: [] cstr) { max_val := 0; while true { - line := string.read_until(&contents, #char "\n"); + line := string.read_until(&contents, '\n'); string.advance(&contents); if line.count == 0 do break; val := 0; for ch in line { val *= 2; - if ch == #char "B" || ch == #char "R" do val += 1; + if ch == 'B' || ch == 'R' do val += 1; } max_val = math.max(max_val, val); diff --git a/tests/aoc-2020/day6.onyx b/tests/aoc-2020/day6.onyx index 3cbc5b173..9b5370d9f 100644 --- a/tests/aoc-2020/day6.onyx +++ b/tests/aoc-2020/day6.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -7,11 +7,11 @@ part_1 :: (contents: &str) -> u32 { for &ch in chars do *ch = false; while true { - line := string.read_until(contents, #char "\n"); + line := string.read_until(contents, '\n'); string.advance(contents, 1); if line.count == 0 do break; - for ch in line do chars[~~ch - cast(u32) #char "a"] = true; + for ch in line do chars[~~ch - cast(u32) 'a'] = true; } sum := 0; @@ -26,13 +26,13 @@ part_2 :: (contents: &str) -> u32 { person_count := 0; while true { - line := string.read_until(contents, #char "\n"); + line := string.read_until(contents, '\n'); string.advance(contents, 1); if line.count == 0 do break; person_count += 1; - for ch in line do chars[~~ch - cast(u32) #char "a"] += 1; + for ch in line do chars[~~ch - cast(u32) 'a'] += 1; } sum := 0; diff --git a/tests/aoc-2020/day7.onyx b/tests/aoc-2020/day7.onyx index d2dca4497..884d74372 100644 --- a/tests/aoc-2020/day7.onyx +++ b/tests/aoc-2020/day7.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -59,12 +59,12 @@ main :: (args: [] cstr) { defer bg_free(&graph); while true { - name := string.read_until(&file, #char " ", 1); + name := string.read_until(&file, ' ', 1); if name.count == 0 do break; container := bg_get_node(&graph, name); - string.read_until(&file, #char " ", 2); + string.read_until(&file, ' ', 2); while true { if string.starts_with(file, " no") do break; @@ -72,7 +72,7 @@ main :: (args: [] cstr) { count := cast(u32, conv.parse_int(&file)); string.advance(&file, 1); - contained_name := string.read_until(&file, #char " ", 1); + contained_name := string.read_until(&file, ' ', 1); contained := bg_get_node(&graph, contained_name); // Part 1 @@ -84,8 +84,8 @@ main :: (args: [] cstr) { // Part 2 array.push(&container.contain, .{ bag = contained, count = count }); - bag_word := string.read_until_any(&file, 1, #char " ", #char "\n"); - if bag_word[bag_word.count - 1] == #char "." do break; + bag_word := string.read_until_any(&file, 1, ' ', '\n'); + if bag_word[bag_word.count - 1] == '.' do break; } string.advance_line(&file); diff --git a/tests/aoc-2020/day8.onyx b/tests/aoc-2020/day8.onyx index c657512a3..428b1472d 100644 --- a/tests/aoc-2020/day8.onyx +++ b/tests/aoc-2020/day8.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -60,7 +60,7 @@ main :: (args: [] cstr) { string.advance_line(&file); - if sign == #char "-" do val *= -1; + if sign == '-' do val *= -1; opcode : OpCode; if string.equal(word, "nop") do opcode = OpCode.Nop; diff --git a/tests/aoc-2020/day9.onyx b/tests/aoc-2020/day9.onyx index cb857b32a..e5f2e03a9 100644 --- a/tests/aoc-2020/day9.onyx +++ b/tests/aoc-2020/day9.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/aoc-2021/day01.onyx b/tests/aoc-2021/day01.onyx index e408c798e..6f61b7d78 100644 --- a/tests/aoc-2021/day01.onyx +++ b/tests/aoc-2021/day01.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core.io use core.os diff --git a/tests/aoc-2021/day02.onyx b/tests/aoc-2021/day02.onyx index edb46ed5d..2138978de 100644 --- a/tests/aoc-2021/day02.onyx +++ b/tests/aoc-2021/day02.onyx @@ -3,45 +3,44 @@ PART :: 2 use core {*} main :: (args) => { - for file in os.with_file("tests/aoc-2021/input/day02.txt") { - reader := io.reader_make(file); - - #if PART == 1 { - horizontal, vertical := 0, 0; - while !io.reader_empty(&reader) { - parts := string.split(io.read_line(&reader, inplace=true), #char " "); - defer memory.free_slice(&parts); - - value := cast(i32) conv.str_to_i64(parts[1]); - switch parts[0] { - case "forward" do horizontal += value; - case "down" do vertical += value; - case "up" do vertical -= value; - } + use file := os.open("tests/aoc-2021/input/day02.txt")->unwrap(); + use reader := io.reader_make(&file); + + #if PART == 1 { + horizontal, vertical := 0, 0; + while !io.reader_empty(&reader) { + parts := string.split(io.read_line(&reader, inplace=true), ' '); + defer memory.free_slice(&parts); + + value := cast(i32) conv.str_to_i64(parts[1]); + switch parts[0] { + case "forward" do horizontal += value; + case "down" do vertical += value; + case "up" do vertical -= value; } - - printf("Part 1: {}\n", horizontal * vertical); } - #if PART == 2 { - horizontal, vertical, aim : i64; - horizontal, vertical, aim = 0, 0, 0; - while !io.reader_empty(&reader) { - parts := string.split(io.read_line(&reader, inplace=true), #char " "); - defer memory.free_slice(&parts); - - value := conv.str_to_i64(parts[1]); - switch parts[0] { - case "forward" { - horizontal += value; - vertical += aim * value; - } - case "down" do aim += value; - case "up" do aim -= value; + printf("Part 1: {}\n", horizontal * vertical); + } + + #if PART == 2 { + horizontal, vertical, aim : i64; + horizontal, vertical, aim = 0, 0, 0; + while !io.reader_empty(&reader) { + parts := string.split(io.read_line(&reader, inplace=true), ' '); + defer memory.free_slice(&parts); + + value := conv.str_to_i64(parts[1]); + switch parts[0] { + case "forward" { + horizontal += value; + vertical += aim * value; } + case "down" do aim += value; + case "up" do aim -= value; } - - printf("Part 2: {}\n", horizontal * vertical); } + + printf("Part 2: {}\n", horizontal * vertical); } -} \ No newline at end of file +} diff --git a/tests/aoc-2021/day03.onyx b/tests/aoc-2021/day03.onyx index 9228403ff..294975289 100644 --- a/tests/aoc-2021/day03.onyx +++ b/tests/aoc-2021/day03.onyx @@ -1,6 +1,6 @@ PART :: 2 -#load "core/module" + use core {*} @@ -11,9 +11,9 @@ read_binary :: (r: &io.Reader) -> i32 { curr, err := io.peek_byte(r); if err != .None do return n; - while curr == #char "0" || curr == #char "1" { + while curr == '0' || curr == '1' { n *= 2; - n += cast(u32) (curr - #char "0"); + n += cast(u32) (curr - '0'); r.start += 1; curr, err = io.peek_byte(r); @@ -26,57 +26,56 @@ read_binary :: (r: &io.Reader) -> i32 { main :: (args) => { BITS :: 12 - for os.with_file("./tests/aoc-2021/input/day03.txt") { - reader := io.reader_make(it); - - nums: [..] i32; - while !io.reader_empty(&reader) { - nums << read_binary(&reader); - } + use file := os.open("./tests/aoc-2021/input/day03.txt")->unwrap(); + use reader := io.reader_make(&file); - num1 := 0; - for BITS { - one_count := 0; - for num in nums { - if num & (1 << it) != 0 do one_count += 1; - } + nums: [..] i32; + while !io.reader_empty(&reader) { + nums << read_binary(&reader); + } - if one_count >= (nums.count / 2) do num1 |= (1 << it); + num1 := 0; + for BITS { + one_count := 0; + for num in nums { + if num & (1 << it) != 0 do one_count += 1; } - num2 := ((1 << BITS) - 1) & (~num1); + if one_count >= (nums.count / 2) do num1 |= (1 << it); + } - printf("Part 1: {}\n", num1 * num2); + num2 := ((1 << BITS) - 1) & (~num1); - oxygen_array := array.copy(&nums); - co2_array := array.copy(&nums); + printf("Part 1: {}\n", num1 * num2); - filter_array :: macro (arr: [..] i32, index: i32, comparison: Code) { - A := 0; - B := (arr.count + 1) / 2; + oxygen_array := array.copy(&nums); + co2_array := array.copy(&nums); - // Count the number of ones - for arr { - if (it & (1 << index)) != 0 do A += 1; - } + filter_array :: macro (arr: [..] i32, index: i32, comparison: Code) { + A := 0; + B := (arr.count + 1) / 2; - expected := (1 << index) if (#unquote comparison) else 0; + // Count the number of ones + for arr { + if (it & (1 << index)) != 0 do A += 1; + } - while i := 0; i < arr.count { - defer i += 1; + expected := (1 << index) if (#unquote comparison) else 0; - if (arr[i] & (1 << index)) != expected { - array.fast_delete(&arr, i); - i -= 1; - } - } - } + while i := 0; i < arr.count { + defer i += 1; - for iter.as_iter(range.{ BITS - 1, 0, -1 }) { - filter_array(oxygen_array, it, [](A >= B)); - filter_array(co2_array, it, [](A < B)); + if (arr[i] & (1 << index)) != expected { + array.fast_delete(&arr, i); + i -= 1; + } } + } - printf("Part 2: {}\n", oxygen_array[0] * co2_array[0]); + for iter.as_iter(range.{ BITS - 1, 0, -1 }) { + filter_array(oxygen_array, it, [](A >= B)); + filter_array(co2_array, it, [](A < B)); } + + printf("Part 2: {}\n", oxygen_array[0] * co2_array[0]); } diff --git a/tests/aoc-2021/day04.onyx b/tests/aoc-2021/day04.onyx index 831dff835..6ad3565c3 100644 --- a/tests/aoc-2021/day04.onyx +++ b/tests/aoc-2021/day04.onyx @@ -17,63 +17,62 @@ board_score :: (use b: &Board) => { } main :: (args) => { - for os.with_file("./tests/aoc-2021/input/day04.txt") { - reader := io.reader_make(it); - - numbers_line := io.read_line(&reader, inplace=true, consume_newline=true); - numbers_str := string.split(numbers_line, #char ","); - numbers := memory.make_slice(Cell, numbers_str.count); - for numbers_str.count do numbers[it] = ~~ conv.parse_int(numbers_str[it]); - - boards: [..] Board; - while !io.reader_empty(&reader) { - board := array.alloc_one(&boards); - board.has_won = false; - memory.set(&board.marked, 0, sizeof typeof board.marked); - - for 25 { - board.cells[it] = ~~ io.read_u32(&reader); - } - - io.skip_whitespace(&reader); + use file := os.open("./tests/aoc-2021/input/day04.txt")->unwrap(); + use reader := io.reader_make(&file); + + numbers_line := io.read_line(&reader, inplace=true, consume_newline=true); + numbers_str := string.split(numbers_line, ','); + numbers := memory.make_slice(Cell, numbers_str.count); + for numbers_str.count do numbers[it] = ~~ conv.parse_int(numbers_str[it]); + + boards: [..] Board; + while !io.reader_empty(&reader) { + board := array.alloc_one(&boards); + board.has_won = false; + memory.set(&board.marked, 0, sizeof typeof board.marked); + + for 25 { + board.cells[it] = ~~ io.read_u32(&reader); } - winning_board: &Board = null; - worst_board : &Board = null; + io.skip_whitespace(&reader); + } + + winning_board: &Board = null; + worst_board : &Board = null; - for called in numbers { - for & board in boards { - if board.has_won do continue; + for called in numbers { + for & board in boards { + if board.has_won do continue; - // Whatever the last board we touch is must be the worst one. - worst_board = board; + // Whatever the last board we touch is must be the worst one. + worst_board = board; - for 25 { - if board.cells[it] == called { - board.marked[it] = true; + for 25 { + if board.cells[it] == called { + board.marked[it] = true; - x, y := it % 5, it / 5; - v_marked_count := 0; - h_marked_count := 0; - for 5 do if board.marked[it + y * 5] do h_marked_count += 1; - for 5 do if board.marked[x + it * 5] do v_marked_count += 1; + x, y := it % 5, it / 5; + v_marked_count := 0; + h_marked_count := 0; + for 5 do if board.marked[it + y * 5] do h_marked_count += 1; + for 5 do if board.marked[x + it * 5] do v_marked_count += 1; - if v_marked_count == 5 || h_marked_count == 5 { - board.won_on = called; - board.has_won = true; + if v_marked_count == 5 || h_marked_count == 5 { + board.won_on = called; + board.has_won = true; - if winning_board == null { - winning_board = board; - } + if winning_board == null { + winning_board = board; } } } } } - - winning_board_score := board_score(winning_board); - worst_board_score := board_score(worst_board); - printf("Part 1: {}\n", winning_board_score); - printf("Part 2: {}\n", worst_board_score); } + + winning_board_score := board_score(winning_board); + worst_board_score := board_score(worst_board); + printf("Part 1: {}\n", winning_board_score); + printf("Part 2: {}\n", worst_board_score); } diff --git a/tests/aoc-2021/day05.onyx b/tests/aoc-2021/day05.onyx index a9745fb15..4faec9dad 100644 --- a/tests/aoc-2021/day05.onyx +++ b/tests/aoc-2021/day05.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -17,7 +17,7 @@ line_points :: (l: Line) -> Iterator(Point) { if l.x1 != l.x2 && l.y1 != l.y2 { if math.abs(l.x1 - l.x2) != math.abs(l.y1 - l.y2) { - return .{ null, ((a:rawptr) => .{0,0}, false), null_proc }; + return iter.empty(Point); } } @@ -28,11 +28,11 @@ line_points :: (l: Line) -> Iterator(Point) { c.dx = 1 if l.x2 > l.x1 else -1 if l.x2 < l.x1 else 0; c.dy = 1 if l.y2 > l.y1 else -1 if l.y2 < l.y1 else 0; - next :: (use c: &Context) -> (Point, bool) { - if curr_t > max_t do return .{0,0}, false; + next :: (use c: &Context) -> ? Point { + if curr_t > max_t do return .None; defer curr_t += 1; - return .{ l.x1 + curr_t * dx, l.y1 + curr_t * dy }, true; + return Point.{ l.x1 + curr_t * dx, l.y1 + curr_t * dy }; } return .{c, next, cfree}; @@ -43,38 +43,37 @@ Point :: struct {x, y: u32;} #operator == (p1, p2: Point) => p1.x == p2.x && p1.y == p2.y; main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day05.txt") { - reader := io.reader_make(file); + use file := os.open("./tests/aoc-2021/input/day05.txt")->unwrap(); + use reader := io.reader_make(&file); - lines: [..] Line; + lines: [..] Line; - while !io.reader_empty(&reader) { - x1 := io.read_u32(&reader); - io.skip_bytes(&reader, 1); - y1 := io.read_u32(&reader); - io.skip_bytes(&reader, 4); + while !io.reader_empty(&reader) { + x1 := io.read_u32(&reader); + io.skip_bytes(&reader, 1); + y1 := io.read_u32(&reader); + io.skip_bytes(&reader, 4); - x2 := io.read_u32(&reader); - io.skip_bytes(&reader, 1); - y2 := io.read_u32(&reader); - io.skip_whitespace(&reader); + x2 := io.read_u32(&reader); + io.skip_bytes(&reader, 1); + y2 := io.read_u32(&reader); + io.skip_whitespace(&reader); - lines << .{x1, y1, x2, y2}; - } + lines << .{x1, y1, x2, y2}; + } - point_count: Map(Point, u32); + point_count: Map(Point, u32); - for &line in lines { - for p in line_points(*line) { - point_count[p] = (point_count[p] ?? 0) + 1; - } - } - - count := 0; - for & point_count.entries { - if it.value >= 2 do count += 1; + for &line in lines { + for p in line_points(*line) { + point_count[p] = (point_count[p] ?? 0) + 1; } + } - printf("Part 2: {}\n", count); + count := 0; + for & point_count.entries { + if it.value >= 2 do count += 1; } + + printf("Part 2: {}\n", count); } diff --git a/tests/aoc-2021/day06.onyx b/tests/aoc-2021/day06.onyx index d5e9f6c60..773a42690 100644 --- a/tests/aoc-2021/day06.onyx +++ b/tests/aoc-2021/day06.onyx @@ -1,28 +1,27 @@ -#load "core/module" + use core {*} main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day06.txt") { - reader := io.reader_make(file); - start_str := io.read_all(&reader); - - start_list := string.split(start_str, #char ","); + use file := os.open("./tests/aoc-2021/input/day06.txt")->unwrap(); + use reader := io.reader_make(&file); + start_str := io.read_all(&reader); - fish: [9] i64; - for start_list { - value := cast(i32) conv.str_to_i64(it); - fish[value] += 1; - } + start_list := string.split(start_str, ','); - for day in 256 { - new_fish := fish[0]; - for 8 do fish[it] = fish[it + 1]; - fish[6] += new_fish; - fish[8] = new_fish; - } + fish: [9] i64; + for start_list { + value := cast(i32) conv.str_to_i64(it); + fish[value] += 1; + } - total := array.sum(fish); - printf("Part 2: {}\n", total); + for day in 256 { + new_fish := fish[0]; + for 8 do fish[it] = fish[it + 1]; + fish[6] += new_fish; + fish[8] = new_fish; } -} \ No newline at end of file + + total := array.sum(fish); + printf("Part 2: {}\n", total); +} diff --git a/tests/aoc-2021/day07.onyx b/tests/aoc-2021/day07.onyx index 029653d64..8f31f9617 100644 --- a/tests/aoc-2021/day07.onyx +++ b/tests/aoc-2021/day07.onyx @@ -1,34 +1,33 @@ -#load "core/module" + use core {*} main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day07.txt") { - reader := io.reader_make(file); - nums := io.read_all(&reader) - |> string.split(#char ",") - |> iter.as_iter() - |> iter.map((x) => cast(i32) conv.parse_int(x)) - |> iter.to_array(); - - min := array.fold(nums, nums[0], math.min); - max := array.fold(nums, nums[0], math.max); + use file := os.open("./tests/aoc-2021/input/day07.txt")->unwrap(); + use reader := io.reader_make(&file); + nums := io.read_all(&reader) + |> string.split(',') + |> iter.as_iter() + |> iter.map((x) => cast(i32) conv.parse_int(x)) + |> iter.to_array(); - min_cost := 0x7fffffff; - best_middle := 0; - for middle in min .. max { - total_cost := 0; - for nums { - dist := math.abs(it - middle); - total_cost += dist * (dist + 1) / 2; // math.choose(dist + 1, 2); - } + min := array.fold(nums, nums[0], math.min); + max := array.fold(nums, nums[0], math.max); - if total_cost < min_cost { - min_cost = total_cost; - best_middle = middle; - } + min_cost := 0x7fffffff; + best_middle := 0; + for middle in min .. max { + total_cost := 0; + for nums { + dist := math.abs(it - middle); + total_cost += dist * (dist + 1) / 2; // math.choose(dist + 1, 2); } - printf("Part 2: {} with fuel {}\n", best_middle, min_cost); + if total_cost < min_cost { + min_cost = total_cost; + best_middle = middle; + } } + + printf("Part 2: {} with fuel {}\n", best_middle, min_cost); } diff --git a/tests/aoc-2021/day08.onyx b/tests/aoc-2021/day08.onyx index e5f954bd3..cc17a1d1b 100644 --- a/tests/aoc-2021/day08.onyx +++ b/tests/aoc-2021/day08.onyx @@ -1,5 +1,5 @@ PART :: 2 -#load "core/module" + use core {*} @@ -28,7 +28,7 @@ segments := ([7] bool).[ decode_line :: (left, right: str) -> u32 { solved_segments: Map(u8, u32); - left_segments := string.split(left, #char " "); + left_segments := string.split(left, ' '); defer memory.free_slice(&left_segments); // Look for 1. @@ -120,7 +120,7 @@ decode_line :: (left, right: str) -> u32 { } sum := 0; - words := string.split(right, #char " "); + words := string.split(right, ' '); defer memory.free_slice(&words); for& words { string.strip_whitespace(it); @@ -158,35 +158,34 @@ decode_line :: (left, right: str) -> u32 { } main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day08.txt") { - reader := io.reader_make(file); - - answer := 0; - while !io.reader_empty(&reader) { - line := io.read_line(&reader, consume_newline=true, inplace=true); - left, right := do { - parts := string.split(line, #char "|"); - defer memory.free_slice(&parts); - return parts[0], parts[1]; - }; - string.strip_whitespace(&left); - string.strip_whitespace(&right); - - #if PART == 1 { - words := string.split(right, #char " "); - for& words { - string.strip_whitespace(it); - switch it.count { - case 2, 3, 4, 7 do answer += 1; - } + use file := os.open("./tests/aoc-2021/input/day08.txt")->unwrap(); + use reader := io.reader_make(&file); + + answer := 0; + while !io.reader_empty(&reader) { + line := io.read_line(&reader, consume_newline=true, inplace=true); + left, right := do { + parts := string.split(line, '|'); + defer memory.free_slice(&parts); + return parts[0], parts[1]; + }; + string.strip_whitespace(&left); + string.strip_whitespace(&right); + + #if PART == 1 { + words := string.split(right, ' '); + for& words { + string.strip_whitespace(it); + switch it.count { + case 2, 3, 4, 7 do answer += 1; } } - - #if PART == 2 { - answer += decode_line(left, right); - } } - printf("Part {}: {}\n", 1 if PART == 1 else 2, answer); + #if PART == 2 { + answer += decode_line(left, right); + } } + + printf("Part {}: {}\n", 1 if PART == 1 else 2, answer); } diff --git a/tests/aoc-2021/day09.onyx b/tests/aoc-2021/day09.onyx index 54ee6a3b9..a6b265281 100644 --- a/tests/aoc-2021/day09.onyx +++ b/tests/aoc-2021/day09.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -65,66 +65,65 @@ find_span :: macro (low: Pos) -> u32 { } main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day09.txt") { - reader := io.reader_make(file); - - heightmap: Map(Pos, Cell); + use file := os.open("./tests/aoc-2021/input/day09.txt")->unwrap(); + use reader := io.reader_make(&file); - height, width := 0, 0; - while !io.reader_empty(&reader) { - line := io.read_line(&reader, consume_newline=false, inplace=true); - io.skip_whitespace(&reader); + heightmap: Map(Pos, Cell); - for line.count { - heightmap[Pos.{it, height}] = Cell.{cast(u32) (line[it] - #char "0")}; - } + height, width := 0, 0; + while !io.reader_empty(&reader) { + line := io.read_line(&reader, consume_newline=false, inplace=true); + io.skip_whitespace(&reader); - width = line.count; - height += 1; + for line.count { + heightmap[Pos.{it, height}] = Cell.{cast(u32) (line[it] - '0')}; } - for y in height - 1 do for x in width { - map.update(&heightmap, .{x,y}) { - it.dy = it.height - heightmap[Pos.{x,y+1}]->unwrap().height; - } + width = line.count; + height += 1; + } + + for y in height - 1 do for x in width { + map.update(&heightmap, .{x,y}) { + it.dy = it.height - heightmap[Pos.{x,y+1}]->unwrap().height; } - for x in width - 1 do for y in height { - map.update(&heightmap, .{x,y}) { - it.dx = it.height - heightmap[Pos.{x+1,y}]->unwrap().height; - } + } + for x in width - 1 do for y in height { + map.update(&heightmap, .{x,y}) { + it.dx = it.height - heightmap[Pos.{x+1,y}]->unwrap().height; } + } - lowest: [..] Pos; - risk_sum := 0; - for y in height do for x in width { - h := &heightmap[Pos.{x,y}]; - if x < width - 1 && h.dx >= 0 do continue; - if y < height - 1 && h.dy >= 0 do continue; - - if x > 0 { - if heightmap[Pos.{x-1, y}]->unwrap().dx <= 0 do continue; - } + lowest: [..] Pos; + risk_sum := 0; + for y in height do for x in width { + h := &heightmap[Pos.{x,y}]; + if x < width - 1 && h.dx >= 0 do continue; + if y < height - 1 && h.dy >= 0 do continue; - if y > 0 { - if heightmap[Pos.{x, y-1}]->unwrap().dy <= 0 do continue; - } + if x > 0 { + if heightmap[Pos.{x-1, y}]->unwrap().dx <= 0 do continue; + } - lowest << .{x, y}; - risk_sum += h.height + 1; + if y > 0 { + if heightmap[Pos.{x, y-1}]->unwrap().dy <= 0 do continue; } - printf("Part 1: {}\n", risk_sum); + lowest << .{x, y}; + risk_sum += h.height + 1; + } + + printf("Part 1: {}\n", risk_sum); - lowest_count: Map(Pos, i32); - for low in lowest do lowest_count[low] = find_span(low); + lowest_count: Map(Pos, i32); + for low in lowest do lowest_count[low] = find_span(low); - array.quicksort(lowest_count.entries, (a, b) => b.value - a.value); + array.quicksort(lowest_count.entries, (a, b) => b.value - a.value); - answer := iter.as_iter(lowest_count.entries) - |> iter.map((x) => x.value) - |> iter.take(3) - |> iter.fold(1, (x, y) => x * y); + answer := iter.as_iter(lowest_count.entries) + |> iter.map((x) => x.value) + |> iter.take(3) + |> iter.fold(1, (x, y) => x * y); - printf("Part 2: {}\n", answer); - } + printf("Part 2: {}\n", answer); } diff --git a/tests/aoc-2021/day10.onyx b/tests/aoc-2021/day10.onyx index d8cbf8969..189999b5d 100644 --- a/tests/aoc-2021/day10.onyx +++ b/tests/aoc-2021/day10.onyx @@ -1,73 +1,72 @@ -#load "core/module" + use core {*} main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day10.txt") { - reader := io.reader_make(file); + use file := os.open("./tests/aoc-2021/input/day10.txt")->unwrap(); + use reader := io.reader_make(&file); - bracket_map: Map(u8, u8); - bracket_map[#char "("] = #char ")"; - bracket_map[#char "["] = #char "]"; - bracket_map[#char "{"] = #char "}"; - bracket_map[#char "<"] = #char ">"; + bracket_map: Map(u8, u8); + bracket_map['('] = ')'; + bracket_map['['] = ']'; + bracket_map['{'] = '}'; + bracket_map['<'] = '>'; - score_map: Map(u8, u32); - score_map[#char ")"] = 3; - score_map[#char "]"] = 57; - score_map[#char "}"] = 1197; - score_map[#char ">"] = 25137; + score_map: Map(u8, u32); + score_map[')'] = 3; + score_map[']'] = 57; + score_map['}'] = 1197; + score_map['>'] = 25137; - corrupted_score := 0; - completion_scores: [..] u64; - while !io.reader_empty(&reader) { - line := io.read_line(&reader, consume_newline=false, inplace=true); - io.skip_whitespace(&reader); + corrupted_score := 0; + completion_scores: [..] u64; + while !io.reader_empty(&reader) { + line := io.read_line(&reader, consume_newline=false, inplace=true); + io.skip_whitespace(&reader); - char_stack: [..] u8; - defer array.free(&char_stack); - for ch in line { - switch ch { - case #char "(", #char "[", #char "<", #char "{" { - char_stack << bracket_map[ch]->unwrap(); - } + char_stack: [..] u8; + defer array.free(&char_stack); + for ch in line { + switch ch { + case '(', '[', '<', '{' { + char_stack << bracket_map[ch]->unwrap(); + } - case #char ")", #char "]", #char ">", #char "}" { - x := array.pop(&char_stack); - if x != ch { - // printf("Expected '{}', found '{}' instead.\n", x, ch); - corrupted_score += score_map[ch]->unwrap(); - continue continue; - } + case ')', ']', '>', '}' { + x := array.pop(&char_stack); + if x != ch { + // printf("Expected '{}', found '{}' instead.\n", x, ch); + corrupted_score += score_map[ch]->unwrap(); + continue continue; } } } + } - assert(char_stack.count != 0, "Invalid input."); + assert(char_stack.count != 0, "Invalid input."); - complete_score: u64 = 0; - while char_stack.count != 0 { - complete_score *= 5; - switch array.pop(&char_stack) { - case #char ")" do complete_score += 1; - case #char "]" do complete_score += 2; - case #char "}" do complete_score += 3; - case #char ">" do complete_score += 4; - } + complete_score: u64 = 0; + while char_stack.count != 0 { + complete_score *= 5; + switch array.pop(&char_stack) { + case ')' do complete_score += 1; + case ']' do complete_score += 2; + case '}' do complete_score += 3; + case '>' do complete_score += 4; } - - completion_scores << complete_score; } - printf("Part 1: {}\n", corrupted_score); + completion_scores << complete_score; + } + + printf("Part 1: {}\n", corrupted_score); - array.quicksort(completion_scores, (x, y) => { - if x == y do return 0; - if x < y do return -1; - return 1; - }); + array.quicksort(completion_scores, (x, y) => { + if x == y do return 0; + if x < y do return -1; + return 1; + }); - println(completion_scores); - printf("Part 2: {}\n", completion_scores[completion_scores.count / 2]); - } + println(completion_scores); + printf("Part 2: {}\n", completion_scores[completion_scores.count / 2]); } diff --git a/tests/aoc-2021/day11.onyx b/tests/aoc-2021/day11.onyx index 9aebe9819..16c00429f 100644 --- a/tests/aoc-2021/day11.onyx +++ b/tests/aoc-2021/day11.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -7,73 +7,72 @@ Pos :: struct {x, y:i32;} #operator == (p1, p2: Pos) => p1.x == p2.x && p1.y == p2.y; main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day11.txt") { - reader := io.reader_make(file); + use file := os.open("./tests/aoc-2021/input/day11.txt")->unwrap(); + use reader := io.reader_make(&file); - octopuses: [..] u32; - while !io.reader_empty(&reader) { - line := io.read_line(&reader, consume_newline=false, inplace=true); - io.skip_whitespace(&reader); + octopuses: [..] u32; + while !io.reader_empty(&reader) { + line := io.read_line(&reader, consume_newline=false, inplace=true); + io.skip_whitespace(&reader); - for ch in line do octopuses << ~~(ch - #char "0"); - } + for ch in line do octopuses << ~~(ch - '0'); + } - get_octopus :: macro (x, y) => { - if x < 0 || y < 0 || x >= 10 || y >= 10 do return -1; - return octopuses[y * 10 + x]; - } + get_octopus :: macro (x, y) => { + if x < 0 || y < 0 || x >= 10 || y >= 10 do return -1; + return octopuses[y * 10 + x]; + } - set_octopus :: macro (x, y, v: i32) { - if !(x < 0 || y < 0 || x >= 10 || y >= 10) { - octopuses[y * 10 + x] = v; - } + set_octopus :: macro (x, y, v: i32) { + if !(x < 0 || y < 0 || x >= 10 || y >= 10) { + octopuses[y * 10 + x] = v; } + } - inc_octopus :: macro (x, y) => { - if x < 0 || y < 0 || x >= 10 || y >= 10 do return -1; - octopuses[y * 10 + x] += 1; - return octopuses[y * 10 + x]; - } + inc_octopus :: macro (x, y) => { + if x < 0 || y < 0 || x >= 10 || y >= 10 do return -1; + octopuses[y * 10 + x] += 1; + return octopuses[y * 10 + x]; + } - flash_count := 0; + flash_count := 0; - step := 0; - sync_step := 0; - while true { - step += 1; - for &o in octopuses do *o += 1; + step := 0; + sync_step := 0; + while true { + step += 1; + for &o in octopuses do *o += 1; - #persist to_flash: Set(Pos); - for y in 10 do for x in 10 { - if get_octopus(x, y) >= 10 { - to_flash << .{x, y}; - } + #persist to_flash: Set(Pos); + for y in 10 do for x in 10 { + if get_octopus(x, y) >= 10 { + to_flash << .{x, y}; } + } - for flash in iter.as_iter(&to_flash) { - for y in -1 .. 2 do for x in -1 .. 2 { - if y == 0 && x == 0 do continue; + for flash in iter.as_iter(&to_flash) { + for y in -1 .. 2 do for x in -1 .. 2 { + if y == 0 && x == 0 do continue; - if inc_octopus(flash.x + x, flash.y + y) >= 10 { - to_flash << .{flash.x + x, flash.y + y}; - } + if inc_octopus(flash.x + x, flash.y + y) >= 10 { + to_flash << .{flash.x + x, flash.y + y}; } } + } - for flash in iter.as_iter(&to_flash) { - set_octopus(flash.x, flash.y, 0); - if step <= 100 do flash_count += 1; - } - - if to_flash.entries.count == 100 { - sync_step = step; - break; - } + for flash in iter.as_iter(&to_flash) { + set_octopus(flash.x, flash.y, 0); + if step <= 100 do flash_count += 1; + } - to_flash->clear(); + if to_flash.entries.count == 100 { + sync_step = step; + break; } - printf("Part 1: {}\n", flash_count); - printf("Part 2: {}\n", sync_step); + to_flash->clear(); } + + printf("Part 1: {}\n", flash_count); + printf("Part 2: {}\n", sync_step); } diff --git a/tests/aoc-2021/day12.onyx b/tests/aoc-2021/day12.onyx index b61ef3dab..29ed043ea 100644 --- a/tests/aoc-2021/day12.onyx +++ b/tests/aoc-2021/day12.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -17,84 +17,83 @@ Communative_Pair :: struct (T: type_expr) where hash.Hashable(T) { } main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day12.txt") { - reader := io.reader_make(file); - - verticies: Set(str); - edges: Set(Communative_Pair(str)); - while !io.reader_empty(&reader) { - line := io.read_line(&reader, consume_newline=true); - - left, right := do { - parts := string.split(line, #char "-"); - defer memory.free_slice(&parts); - return string.strip_whitespace(parts[0]), string.strip_whitespace(parts[1]); - }; - - edges << .{ left, right }; - verticies << left; - verticies << right; - } + use file := os.open("./tests/aoc-2021/input/day12.txt")->unwrap(); + use reader := io.reader_make(&file); + + verticies: Set(str); + edges: Set(Communative_Pair(str)); + while !io.reader_empty(&reader) { + line := io.read_line(&reader, consume_newline=true); + + left, right := do { + parts := string.split(line, '-'); + defer memory.free_slice(&parts); + return string.strip_whitespace(parts[0]), string.strip_whitespace(parts[1]); + }; + + edges << .{ left, right }; + verticies << left; + verticies << right; + } - Node :: struct { name: str; child_idx: u32; second_visit: bool; } - node_stack: [..] Node; - node_stack << .{ "start", 0, false }; + Node :: struct { name: str; child_idx: u32; second_visit: bool; } + node_stack: [..] Node; + node_stack << .{ "start", 0, false }; - children_of :: (edges: &$T, name: str) -> Iterator(str) { - return iter.concat( - iter.as_iter(edges) - ->filter((x, [name]) => x.a == name) - ->map(x => x.b), + children_of :: (edges: &$T, name: str) -> Iterator(str) { + return iter.concat( + iter.as_iter(edges) + ->filter((x) use (name) => x.a == name) + ->map(x => x.b), - iter.as_iter(edges) - ->filter((x, [name]) => x.b == name) - ->map(x => x.a) - ); - } + iter.as_iter(edges) + ->filter((x) use (name) => x.b == name) + ->map(x => x.a) + ); + } - cannot_visit_multiple :: (name) => { - c := name[0]; - return c >= #char "a" && c <= #char "z"; - } + cannot_visit_multiple :: (name) => { + c := name[0]; + return c >= 'a' && c <= 'z'; + } - edge_map: Map(str, [] str); - for v in iter.as_iter(&verticies) { - edge_map[*v] = children_of(&edges, *v) |> iter.to_array(); - } + edge_map: Map(str, [] str); + for v in iter.as_iter(&verticies) { + edge_map[*v] = children_of(&edges, *v) |> iter.to_array(); + } - paths_count := 0; - while node_stack.count != 0 { - node_idx := node_stack.count - 1; - defer node_stack[node_idx].child_idx += 1; + paths_count := 0; + while node_stack.count != 0 { + node_idx := node_stack.count - 1; + defer node_stack[node_idx].child_idx += 1; - children := edge_map[node_stack[node_idx].name] ?? .[]; - valid := node_stack[node_idx].child_idx < children.count; + children := edge_map[node_stack[node_idx].name] ?? .[]; + valid := node_stack[node_idx].child_idx < children.count; - if valid { - child := children[node_stack[node_idx].child_idx]; - second_visit := node_stack[node_idx].second_visit; + if valid { + child := children[node_stack[node_idx].child_idx]; + second_visit := node_stack[node_idx].second_visit; - if cannot_visit_multiple(child) { - visit_count := 0; - for& node_stack { - if it.name == child do visit_count += 1; - } + if cannot_visit_multiple(child) { + visit_count := 0; + for& node_stack { + if it.name == child do visit_count += 1; + } - if visit_count >= 1 { - if second_visit || child == "start" do continue; + if visit_count >= 1 { + if second_visit || child == "start" do continue; - second_visit = true; - } + second_visit = true; } + } - if child == "end" do paths_count += 1; - else do node_stack << .{ child, 0, second_visit }; + if child == "end" do paths_count += 1; + else do node_stack << .{ child, 0, second_visit }; - } else { - array.pop(&node_stack); - } + } else { + array.pop(&node_stack); } - - printf("Part 2: {}\n", paths_count); } + + printf("Part 2: {}\n", paths_count); } diff --git a/tests/aoc-2021/day13.onyx b/tests/aoc-2021/day13.onyx index e04916c0e..08d551123 100644 --- a/tests/aoc-2021/day13.onyx +++ b/tests/aoc-2021/day13.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -23,50 +23,49 @@ apply_fold :: (dots: &[] Point, axis_name: str, axis_value: i32) { } main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day13.txt") { - reader := io.reader_make(file); - - dots: [..] Point; - while true { - line := io.read_line(&reader, consume_newline=true, inplace=true); - string.strip_whitespace(&line); - if line.count == 0 do break; - - parts := string.split_iter(line, #char ",") - |> iter.map(x => cast(i32) conv.parse_int(x)); - - x, _ := iter.take_one(parts); - y, _ := iter.take_one(parts); - - dots << .{x,y}; - } + use file := os.open("./tests/aoc-2021/input/day13.txt")->unwrap(); + use reader := io.reader_make(&file); - part_1_answer := -1; - while !io.reader_empty(&reader) { - line := io.read_line(&reader, consume_newline=true, inplace=true); + dots: [..] Point; + while true { + line := io.read_line(&reader, consume_newline=true, inplace=true); + string.strip_whitespace(&line); + if line.count == 0 do break; - string.read_until(&line, #char " ", 1); - string.advance(&line, 1); + parts := string.split_iter(line, ',') + |> iter.map(x => cast(i32) conv.parse_int(x)); + + x := iter.next(parts)->unwrap(); + y := iter.next(parts)->unwrap(); - axis_name := string.read_until(&line, #char "="); - string.advance(&line, 1); - axis_value := cast(i32) conv.str_to_i64(line); + dots << .{x,y}; + } - apply_fold(~~ &dots, axis_name, axis_value); - if part_1_answer < 0 { - part_1_answer = dots.count; - } - } + part_1_answer := -1; + while !io.reader_empty(&reader) { + line := io.read_line(&reader, consume_newline=true, inplace=true); + + string.read_until(&line, ' ', 1); + string.advance(&line, 1); - printf("Part 1: {}\n", part_1_answer); + axis_name := string.read_until(&line, '='); + string.advance(&line, 1); + axis_value := cast(i32) conv.str_to_i64(line); - printf("Part 2:\n"); - for y in 7 { - for x in 50 { - print("X" if array.contains(dots, .{x, y}) else " "); - } + apply_fold(~~ &dots, axis_name, axis_value); + if part_1_answer < 0 { + part_1_answer = dots.count; + } + } + + printf("Part 1: {}\n", part_1_answer); - print("\n"); + printf("Part 2:\n"); + for y in 7 { + for x in 50 { + print("X" if array.contains(dots, .{x, y}) else " "); } + + print("\n"); } -} \ No newline at end of file +} diff --git a/tests/aoc-2021/day14.onyx b/tests/aoc-2021/day14.onyx index ee836e633..a51dbe718 100644 --- a/tests/aoc-2021/day14.onyx +++ b/tests/aoc-2021/day14.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -21,70 +21,69 @@ State :: struct { } main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day14.txt") { - reader := io.reader_make(file); + use file := os.open("./tests/aoc-2021/input/day14.txt")->unwrap(); + use reader := io.reader_make(&file); - start_polymer := io.read_line(&reader, consume_newline=false); - io.skip_whitespace(&reader); + start_polymer := io.read_line(&reader, consume_newline=false); + io.skip_whitespace(&reader); - rules: [..] Rule; - while !io.reader_empty(&reader) { - r: Rule; - io.read_bytes(&reader, .{cast([&] u8) &r.pair, 2}); - io.skip_bytes(&reader, 4); - r.insert = io.read_byte(&reader); + rules: [..] Rule; + while !io.reader_empty(&reader) { + r: Rule; + io.read_bytes(&reader, .{cast([&] u8) &r.pair, 2}); + io.skip_bytes(&reader, 4); + r.insert = io.read_byte(&reader); - rules << r; - io.skip_whitespace(&reader); - } - - polymer_state: Map(Pair(u8, u8), State); - add_to_state :: macro (pair: Pair(u8, u8), count: u64) { - if polymer_state->has(pair) { - (&polymer_state[pair]).next += ~~count; - } else { - polymer_state[pair] = .{ 0, ~~count }; - } - } + rules << r; + io.skip_whitespace(&reader); + } - step_state :: macro () { - for& polymer_state.entries { - it.value.now = it.value.next; - it.value.next = 0; - } + polymer_state: Map(Pair(u8, u8), State); + add_to_state :: macro (pair: Pair(u8, u8), count: u64) { + if polymer_state->has(pair) { + (&polymer_state[pair]).next += ~~count; + } else { + polymer_state[pair] = .{ 0, ~~count }; } + } - for start_polymer.count - 1 { - p := Pair(u8, u8).{ start_polymer[it], start_polymer[it + 1] }; - add_to_state(p, 1); + step_state :: macro () { + for& polymer_state.entries { + it.value.now = it.value.next; + it.value.next = 0; } + } - step_state(); + for start_polymer.count - 1 { + p := Pair(u8, u8).{ start_polymer[it], start_polymer[it + 1] }; + add_to_state(p, 1); + } - for 40 { - for& rule in rules { - pair_count := &polymer_state[rule.pair]; - if pair_count != null { - if pair_count.now > 0 { - pair1 := Pair(u8, u8).{ rule.pair.first, rule.insert }; - pair2 := Pair(u8, u8).{ rule.insert, rule.pair.second }; - add_to_state(pair1, pair_count.now); - add_to_state(pair2, pair_count.now); - } + step_state(); + + for 40 { + for& rule in rules { + pair_count := &polymer_state[rule.pair]; + if pair_count != null { + if pair_count.now > 0 { + pair1 := Pair(u8, u8).{ rule.pair.first, rule.insert }; + pair2 := Pair(u8, u8).{ rule.insert, rule.pair.second }; + add_to_state(pair1, pair_count.now); + add_to_state(pair2, pair_count.now); } } - step_state(); } + step_state(); + } - mode: Map(u8, u64); - for& polymer_state.entries { - mode[it.key.second] = (mode[it.key.second] ?? .{}) + it.value.now; - } + mode: Map(u8, u64); + for& polymer_state.entries { + mode[it.key.second] = (mode[it.key.second] ?? .{}) + it.value.now; + } - maximum := array.fold(mode.entries, cast(u64) 0, (x, y) => math.max(x.value, y)); - minimum := array.fold(mode.entries, maximum, (x, y) => math.min(x.value, y)); + maximum := array.fold(mode.entries, cast(u64) 0, (x, y) => math.max(x.value, y)); + minimum := array.fold(mode.entries, maximum, (x, y) => math.min(x.value, y)); - println(&mode); - printf("Part 2: {}\n", maximum - minimum - 1); - } + println(&mode); + printf("Part 2: {}\n", maximum - minimum - 1); } diff --git a/tests/aoc-2021/day15.onyx b/tests/aoc-2021/day15.onyx index cd431f07d..34911ea88 100644 --- a/tests/aoc-2021/day15.onyx +++ b/tests/aoc-2021/day15.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -10,59 +10,59 @@ pos :: struct { x, y: i32; } #operator == (p1, p2: pos) => p1.x == p2.x && p1.y == p2.y; main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day15.txt") { - reader := io.reader_make(file); + use file := os.open("./tests/aoc-2021/input/day15.txt")->unwrap(); + use reader := io.reader_make(&file); - cells: [..] u8; - height := 0; - while !io.reader_empty(&reader) { - line := io.read_line(&reader, consume_newline=false, inplace=true); - io.skip_whitespace(&reader); + cells: [..] u8; + height := 0; + while !io.reader_empty(&reader) { + line := io.read_line(&reader, consume_newline=false, inplace=true); + io.skip_whitespace(&reader); - for line do cells << it; - height += 1; - } + for line do cells << it; + height += 1; + } - width := cells.count / height; + width := cells.count / height; - to_try := heap.make(queued, (x, y) => x.cost - y.cost); - tried := set.make(pos); + to_try := heap.make(queued, (x, y) => x.cost - y.cost); + tried := set.make(pos); - to_try << .{ 0, 0, -cast(i32) (cells[0] - #char "0") }; + to_try << .{ 0, 0, -cast(i32) (cells[0] - '0') }; - minimum := 0; - while to_try.data.count != 0 { - try := heap.remove_top(&to_try); - tried << .{try.x, try.y}; + minimum := 0; + while !heap.empty(&to_try) { + try := heap.remove_top(&to_try)->unwrap(); + tried << .{try.x, try.y}; - cell_value := cast(u32) (cells[(try.y % height) * width + (try.x % width)] - #char "0"); - cell_value = ((cell_value - 1 + (try.y / height) + (try.x / width)) % 9) + 1; - cell_value += try.cost; + cell_value := cast(u32) (cells[(try.y % height) * width + (try.x % width)] - '0'); + cell_value = ((cell_value - 1 + (try.y / height) + (try.x / width)) % 9) + 1; + cell_value += try.cost; - if try.x == width * 5 - 1 && try.y == height * 5 - 1 { - minimum = cell_value; - break; - } + if try.x == width * 5 - 1 && try.y == height * 5 - 1 { + minimum = cell_value; + break; + } - attempt_add :: macro (cond: Code, dx, dy: i32) { - if #unquote cond { - if !(tried->has(.{try.x + dx, try.y + dy})) { - if found := array.find_ptr(to_try.data, .{try.x + dx, try.y + dy, 0}); found != null { - found.cost = math.min(cell_value, found.cost); - } else { - to_try << .{try.x + dx, try.y + dy, cell_value }; - } + attempt_add :: macro (cond: Code, dx, dy: i32) { + if #unquote cond { + if !(tried->has(.{try.x + dx, try.y + dy})) { + if found := array.find_ptr(to_try.data, .{try.x + dx, try.y + dy, 0}); found != null { + found.cost = math.min(cell_value, found.cost); + } else { + to_try << .{try.x + dx, try.y + dy, cell_value }; } } } - - attempt_add([](try.x > 0), -1, 0); - attempt_add([](try.x < width * 5 - 1), 1, 0); - attempt_add([](try.y > 0), 0, -1); - attempt_add([](try.y < height * 5 - 1), 0, 1); } - printf("Part 2: {}\n", minimum); + attempt_add([](try.x > 0), -1, 0); + attempt_add([](try.x < width * 5 - 1), 1, 0); + attempt_add([](try.y > 0), 0, -1); + attempt_add([](try.y < height * 5 - 1), 0, 1); + } + + printf("Part 2: {}\n", minimum); /* NAIVE SOLUTION min_paths := array.make(u32, capacity=cells.count*25); @@ -76,7 +76,7 @@ main :: (args) => { if y > 0 do a = min_paths[(y - 1) * width * 5 + x]; if x > 0 do b = min_paths[y * width * 5 + (x - 1)]; - cell_value := cast(u32) (cells[(y % height) * width + (x % width)] - #char "0"); + cell_value := cast(u32) (cells[(y % height) * width + (x % width)] - '0'); cell_value = ((cell_value - 1 + (y / height) + (x / width)) % 9) + 1; min_paths[y * width * 5 + x] = math.min(a, b) + cell_value; @@ -85,5 +85,4 @@ main :: (args) => { result := min_paths[width * height * 25 - 1]; printf("Part 2: {}\n", result); */ - } } diff --git a/tests/aoc-2021/day16.onyx b/tests/aoc-2021/day16.onyx index 484e6fc15..dd55a17f0 100644 --- a/tests/aoc-2021/day16.onyx +++ b/tests/aoc-2021/day16.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -7,10 +7,10 @@ base16_to_hex :: (s: str) -> u32 { for s { res *= 16; switch it { - case #char "0" .. #char "9" do res += ~~(it - #char "0"); - case #char "a" .. #char "f" do res += ~~(it - #char "a" + 10); - case #char "A" .. #char "F" do res += ~~(it - #char "A" + 10); - case #default do break break; + case '0' ..= '9' do res += ~~(it - '0'); + case 'a' ..= 'f' do res += ~~(it - 'a' + 10); + case 'A' ..= 'F' do res += ~~(it - 'A' + 10); + case _ do break break; } } @@ -26,8 +26,7 @@ BitIterator :: struct { bit_idx: i32; next :: (b: &BitIterator) -> u32 { - v, _ := iter.take_one(b.iter, no_close=true); - return v; + return iter.next(b.iter) ?? 0; } } @@ -38,8 +37,8 @@ bit_iterator :: (values: [] u8) -> &BitIterator { c.value_idx = 0; c.bit_idx = 7; - next :: (use c: &BitIterator) -> (u32, bool) { - if value_idx >= values.count do return 0, false; + next :: (use c: &BitIterator) -> ? u32 { + if value_idx >= values.count do return .None; defer { bits_read += 1; @@ -52,7 +51,7 @@ bit_iterator :: (values: [] u8) -> &BitIterator { ret := 0; if (values[value_idx] & ~~(1 << bit_idx)) != 0 do ret = 1; - return ret, true; + return ret; } return c; @@ -108,7 +107,7 @@ parse_packet :: (bit_provider: &BitIterator) -> &Packet { return p; } - case #default { + case _ { packets: [..] &Packet; l_type := read_bits(bit_provider, 1); @@ -141,7 +140,7 @@ packet_sum_version :: (p: &Packet) -> u64 { switch p.type { case 4 do return ~~p.version; - case #default { + case _ { sum: u64; for (cast (&Packet_Operator) p).subpackets { sum += packet_sum_version(it); @@ -191,22 +190,21 @@ packet_reduce :: (p: &Packet) -> u64 { main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day16.txt") { - reader := io.reader_make(file); - line := io.read_line(&reader, consume_newline=false, inplace=true); + use file := os.open("./tests/aoc-2021/input/day16.txt")->unwrap(); + use reader := io.reader_make(&file); + line := io.read_line(&reader, consume_newline=false, inplace=true); - transmission: [..] u8; - for i in range.{ 0, line.count, 2 } { - transmission << ~~(base16_to_hex(line[i .. i + 2])); - } + transmission: [..] u8; + for i in range.{ 0, line.count, 2 } { + transmission << ~~(base16_to_hex(line[i .. i + 2])); + } - bit_provider := bit_iterator(transmission); - packet := parse_packet(bit_provider); + bit_provider := bit_iterator(transmission); + packet := parse_packet(bit_provider); - result := packet_sum_version(packet); - printf("Part 1: {}\n", result); + result := packet_sum_version(packet); + printf("Part 1: {}\n", result); - result = packet_reduce(packet); - printf("Part 2: {}\n", result); - } + result = packet_reduce(packet); + printf("Part 2: {}\n", result); } diff --git a/tests/aoc-2021/day17.onyx b/tests/aoc-2021/day17.onyx index 8c058e439..4b066f87e 100644 --- a/tests/aoc-2021/day17.onyx +++ b/tests/aoc-2021/day17.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -30,18 +30,17 @@ simulate :: (dx, dy: i32) -> (i32, bool) { main :: (args) => { - for file in os.with_file("./tests/aoc-2021/input/day17.txt") { - reader := io.reader_make(file); - - io.skip_bytes(&reader, 15); - tx0 = io.read_i32(&reader); - io.skip_bytes(&reader, 2); - tx1 = io.read_i32(&reader); - io.skip_bytes(&reader, 4); - ty0 = io.read_i32(&reader); - io.skip_bytes(&reader, 2); - ty1 = io.read_i32(&reader); - } + use file := os.open("./tests/aoc-2021/input/day17.txt")->unwrap(); + use reader := io.reader_make(&file); + + io.skip_bytes(&reader, 15); + tx0 = io.read_i32(&reader); + io.skip_bytes(&reader, 2); + tx1 = io.read_i32(&reader); + io.skip_bytes(&reader, 4); + ty0 = io.read_i32(&reader); + io.skip_bytes(&reader, 2); + ty1 = io.read_i32(&reader); max := 0; count := 0; diff --git a/tests/aoc-2021/day18.onyx b/tests/aoc-2021/day18.onyx index 643c95394..28c8079e7 100644 --- a/tests/aoc-2021/day18.onyx +++ b/tests/aoc-2021/day18.onyx @@ -1,4 +1,4 @@ -#load "core/module" + PART :: 2 @@ -15,213 +15,211 @@ SnailNum :: struct { } -#inject SnailNum { - allocator: Allocator; +SnailNum.allocator: Allocator; - make :: () => { - return new(SnailNum, SnailNum.allocator); - } +SnailNum.make :: () => { + return new(SnailNum, SnailNum.allocator); +} - make_pair :: (left, right: u32) => { - n := SnailNum.make(); - n.left_val = left; - n.right_val = right; - return n; - } +SnailNum.make_pair :: (left, right: u32) => { + n := SnailNum.make(); + n.left_val = left; + n.right_val = right; + return n; +} - clone :: (n: &SnailNum) -> &SnailNum { - if !n do return null; +SnailNum.clone :: (n: &SnailNum) -> &SnailNum { + if !n do return null; - new_num := SnailNum.make(); - new_num->set_left(SnailNum.clone(n.left)); - new_num->set_right(SnailNum.clone(n.right)); - new_num.left_val = n.left_val; - new_num.right_val = n.right_val; + new_num := SnailNum.make(); + new_num->set_left(SnailNum.clone(n.left)); + new_num->set_right(SnailNum.clone(n.right)); + new_num.left_val = n.left_val; + new_num.right_val = n.right_val; - return new_num; - } + return new_num; +} - add :: (a, b: &SnailNum) => { - if !a do return b; - if !b do return a; +SnailNum.add :: (a, b: &SnailNum) => { + if !a do return b; + if !b do return a; - new_root := SnailNum.make(); - new_root->set_left(a); - new_root->set_right(b); + new_root := SnailNum.make(); + new_root->set_left(a); + new_root->set_right(b); - while new_root->reduce() --- + while new_root->reduce() --- - return new_root; - } + return new_root; +} - reduce :: (use n: &SnailNum) -> (reduced_something: bool) { - if r, _ := n->reduce_explodes(); r do return true; - if r := n->reduce_splits(); r do return true; - return false; - } +SnailNum.reduce :: (use n: &SnailNum) -> bool { + if r, _ := n->reduce_explodes(); r do return true; + if r := n->reduce_splits(); r do return true; + return false; +} - reduce_explodes :: (use n: &SnailNum, depth := 0) -> (reduced_something: bool, zero_node: bool) { - if depth <= 3 { - if left != null { - if did_reduce, zero_node := left->reduce_explodes(depth + 1); zero_node { - left = null; - return true, false; +SnailNum.reduce_explodes :: (use n: &SnailNum, depth := 0) -> (bool, bool) { + if depth <= 3 { + if left != null { + if did_reduce, zero_node := left->reduce_explodes(depth + 1); zero_node { + left = null; + return true, false; - } elseif did_reduce { - return true, false; - } + } elseif did_reduce { + return true, false; } + } - if right != null { - if did_reduce, zero_node := right->reduce_explodes(depth + 1); zero_node { - right = null; - return true, false; + if right != null { + if did_reduce, zero_node := right->reduce_explodes(depth + 1); zero_node { + right = null; + return true, false; - } elseif did_reduce { - return true, false; - } + } elseif did_reduce { + return true, false; } - - return false, false; } - pleft := n->number_to_left(); - pright := n->number_to_right(); - if pleft do *pleft += left_val; - if pright do *pright += right_val; + return false, false; + } - left_val = 0; - right_val = 0; + pleft := n->number_to_left(); + pright := n->number_to_right(); + if pleft do *pleft += left_val; + if pright do *pright += right_val; - return true, true; - } + left_val = 0; + right_val = 0; - reduce_splits :: (use n: &SnailNum) -> (reduced_something: bool) { - if left { - if left->reduce_splits() { - return true; - } + return true, true; +} - } elseif left_val >= 10 { - l1, l2 := split_number(left_val); - n->set_left(SnailNum.make_pair(l1, l2)); - left_val = 0; +SnailNum.reduce_splits :: (use n: &SnailNum) -> (bool) { + if left { + if left->reduce_splits() { return true; } - if right { - if right->reduce_splits() { - return true; - } + } elseif left_val >= 10 { + l1, l2 := split_number(left_val); + n->set_left(SnailNum.make_pair(l1, l2)); + left_val = 0; + return true; + } - } elseif right_val >= 10 { - r1, r2 := split_number(right_val); - n->set_right(SnailNum.make_pair(r1, r2)); - right_val = 0; + if right { + if right->reduce_splits() { return true; } - - return false; - split_number :: (n: u32) -> (u32, u32) { - h := n / 2; - return h, h + (n % 2); - } + } elseif right_val >= 10 { + r1, r2 := split_number(right_val); + n->set_right(SnailNum.make_pair(r1, r2)); + right_val = 0; + return true; } + + return false; - set_left :: (parent, new_left: &SnailNum) { - parent.left_val = 0; - parent.left = new_left; - if new_left do new_left.parent = parent; + split_number :: (n: u32) -> (u32, u32) { + h := n / 2; + return h, h + (n % 2); } +} - set_right :: (parent, new_right: &SnailNum) { - parent.right_val = 0; - parent.right = new_right; - if new_right do new_right.parent = parent; - } +SnailNum.set_left :: (parent, new_left: &SnailNum) { + parent.left_val = 0; + parent.left = new_left; + if new_left do new_left.parent = parent; +} - number_to_left :: (n: &SnailNum) -> &u32 { - while n.parent && n.parent.left == n { - n = n.parent; - } +SnailNum.set_right :: (parent, new_right: &SnailNum) { + parent.right_val = 0; + parent.right = new_right; + if new_right do new_right.parent = parent; +} - if !n.parent do return null; +SnailNum.number_to_left :: (n: &SnailNum) -> &u32 { + while n.parent && n.parent.left == n { + n = n.parent; + } - if !n.parent.left do return &n.parent.left_val; + if !n.parent do return null; - n = n.parent.left; + if !n.parent.left do return &n.parent.left_val; - while n.right { - n = n.right; - } + n = n.parent.left; - return &n.right_val; + while n.right { + n = n.right; } - number_to_right :: (n: &SnailNum) -> &u32 { - while n.parent && n.parent.right == n { - n = n.parent; - } + return &n.right_val; +} - if !n.parent do return null; +SnailNum.number_to_right :: (n: &SnailNum) -> &u32 { + while n.parent && n.parent.right == n { + n = n.parent; + } - if !n.parent.right do return &n.parent.right_val; + if !n.parent do return null; - n = n.parent.right; + if !n.parent.right do return &n.parent.right_val; - while n.left { - n = n.left; - } + n = n.parent.right; - return &n.left_val; + while n.left { + n = n.left; } - magnitude :: (use n: &SnailNum) => { - if !n { - return 0; - } + return &n.left_val; +} - return 3 * (left_val + left->magnitude()) + - 2 * (right_val + right->magnitude()); +SnailNum.magnitude :: (use n: &SnailNum) => { + if !n { + return 0; } - parse :: (line: &str) -> &SnailNum { - string.advance(line); // [ - - root := SnailNum.make(); - if line.data[0] == #char "[" { - root->set_left(SnailNum.parse(line)); - } else { - root.left_val = ~~ conv.str_to_i64(line); - } + return 3 * (left_val + left->magnitude()) + + 2 * (right_val + right->magnitude()); +} - string.advance(line); // , +SnailNum.parse :: (line: &str) -> &SnailNum { + string.advance(line); // [ - if line.data[0] == #char "[" { - root->set_right(SnailNum.parse(line)); - } else { - root.right_val = ~~ conv.str_to_i64(line); - } + root := SnailNum.make(); + if line.data[0] == '[' { + root->set_left(SnailNum.parse(line)); + } else { + root.left_val = ~~ conv.str_to_i64(line); + } - string.advance(line); // ] + string.advance(line); // , - return root; + if line.data[0] == '[' { + root->set_right(SnailNum.parse(line)); + } else { + root.right_val = ~~ conv.str_to_i64(line); } - format :: (output: &conv.Format_Output, s: &conv.Format, use n: &SnailNum) { - if !left && !right { - conv.format(output, "[{},{}]", left_val, right_val); - } - elseif !left && right { - conv.format(output, "[{},{*}]", left_val, right); - } - elseif left && !right { - conv.format(output, "[{*},{}]", left, right_val); - } - elseif left && right { - conv.format(output, "[{*},{*}]", left, right); - } + string.advance(line); // ] + + return root; +} + +SnailNum.format :: (output: &conv.Format_Output, s: &conv.Format, use n: &SnailNum) { + if !left && !right { + conv.format(output, "[{},{}]", left_val, right_val); + } + elseif !left && right { + conv.format(output, "[{},{*}]", left_val, right); + } + elseif left && !right { + conv.format(output, "[{*},{}]", left, right_val); + } + elseif left && right { + conv.format(output, "[{*},{*}]", left, right); } } @@ -233,47 +231,46 @@ main :: () { conv.register_custom_formatter(SnailNum.format); - for file in os.with_file("./tests/aoc-2021/input/day18.txt") { - r := io.reader_make(file); - - #if PART == 1 { - s: &SnailNum = null; - - for line in r->lines() { - n := SnailNum.parse(&line); - s = s->add(n); - } + use file := os.open("./tests/aoc-2021/input/day18.txt")->unwrap(); + use r := io.reader_make(&file); - printf("{*}\n", s->magnitude()); + #if PART == 1 { + s: &SnailNum = null; + + for line in r->lines() { + n := SnailNum.parse(&line); + s = s->add(n); } - #if PART == 2 { - nums := make([..] &SnailNum); - for line in r->lines() { - nums << SnailNum.parse(&line); - } + printf("{*}\n", s->magnitude()); + } - maximum := 0; - max_i, max_j : i32; + #if PART == 2 { + nums := make([..] &SnailNum); + for line in r->lines() { + nums << SnailNum.parse(&line); + } + + maximum := 0; + max_i, max_j : i32; - for i in nums.count { - for j in nums.count { - if i == j do continue; + for i in nums.count { + for j in nums.count { + if i == j do continue; - n1 := nums[i]->clone(); - n2 := nums[j]->clone(); + n1 := nums[i]->clone(); + n2 := nums[j]->clone(); - mag := n1->add(n2)->magnitude(); - if mag >= maximum { - maximum = mag; - max_i, max_j = i, j; - } + mag := n1->add(n2)->magnitude(); + if mag >= maximum { + maximum = mag; + max_i, max_j = i, j; } } - - println(maximum); - printf("i: {*}\n", nums[max_i]); - printf("j: {*}\n", nums[max_j]); } + + println(maximum); + printf("i: {*}\n", nums[max_i]); + printf("j: {*}\n", nums[max_j]); } } diff --git a/tests/aoc-2021/day21.onyx b/tests/aoc-2021/day21.onyx index 8956121da..628f1d85f 100644 --- a/tests/aoc-2021/day21.onyx +++ b/tests/aoc-2021/day21.onyx @@ -9,16 +9,14 @@ Die :: struct { rolls: u32; } -#inject Die { - make :: () -> Die { - return .{ 1, 0 }; - } - - roll :: (d: &Die) -> u32 { - defer d.value += 1; - defer d.rolls += 1; - return d.value; - } +Die.make :: () -> Die { + return .{ 1, 0 }; +} + +Die.roll :: (d: &Die) -> u32 { + defer d.value += 1; + defer d.rolls += 1; + return d.value; } Player :: struct { @@ -26,26 +24,25 @@ Player :: struct { score: u32; } -#inject Player { - move :: (p: &Player, squares: u32) { - p.square += squares; - p.square %= 10; - - p.score += (p.square + 1); - } +Player.move :: (p: &Player, squares: u32) { + p.square += squares; + p.square %= 10; + + p.score += (p.square + 1); } main :: () { p1, p2: Player; - for os.with_file("./tests/aoc-2021/input/day21.txt") { - r := io.reader_make(it); + { + use file := os.open("./tests/aoc-2021/input/day21.txt")->unwrap(); + use r := io.reader_make(&file); l1 := r->read_line(consume_newline=true); l2 := r->read_line(consume_newline=true); - _, s1 := string.bisect(l1, #char ":"); - _, s2 := string.bisect(l2, #char ":"); + _, s1 := string.bisect(l1, ':'); + _, s2 := string.bisect(l2, ':'); string.strip_whitespace(&s1); string.strip_whitespace(&s2); @@ -88,26 +85,24 @@ Player :: struct { score: u64; } -#inject Player { - hash :: (p: Player) -> u32 { - h := 7; - h += h << 5 + hash.to_u32(p.square); - h += h << 5 + hash.to_u32(p.score); - return h; - } +Player.hash :: (p: Player) -> u32 { + h := 7; + h += h << 5 + hash.to_u32(p.square); + h += h << 5 + hash.to_u32(p.score); + return h; +} - __eq :: (p1, p2: Player) => { - return p1.square == p2.square && p1.score == p2.score; - } +Player.__eq :: (p1, p2: Player) => { + return p1.square == p2.square && p1.score == p2.score; +} - move :: (p: Player, m: u32) -> Player { - n := p; - n.square += m; - n.square %= 10; - - n.score += ~~(n.square + 1); - return n; - } +Player.move :: (p: Player, m: u32) -> Player { + n := p; + n.square += m; + n.square %= 10; + + n.score += ~~(n.square + 1); + return n; } @@ -192,14 +187,15 @@ calc_wins :: (p1, p2: Player, player_1_turn := true) -> (u64, u64) { main :: () { p1, p2: Player; - for os.with_file("./tests/aoc-2021/input/day21.txt") { - r := io.reader_make(it); + { + use file := os.open("./tests/aoc-2021/input/day21.txt")->unwrap(); + use r := io.reader_make(&file); l1 := r->read_line(consume_newline=true); l2 := r->read_line(consume_newline=true); - _, s1 := string.bisect(l1, #char ":"); - _, s2 := string.bisect(l2, #char ":"); + _, s1 := string.bisect(l1, ':'); + _, s2 := string.bisect(l2, ':'); string.strip_whitespace(&s1); string.strip_whitespace(&s2); diff --git a/tests/array_accessors.onyx b/tests/array_accessors.onyx index 671bc49b1..5ead7ac07 100644 --- a/tests/array_accessors.onyx +++ b/tests/array_accessors.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/array_programming.onyx b/tests/array_programming.onyx index c4f8e8599..c1827802e 100644 --- a/tests/array_programming.onyx +++ b/tests/array_programming.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/array_struct_robustness.onyx b/tests/array_struct_robustness.onyx index 959e7fd82..3d0af9776 100644 --- a/tests/array_struct_robustness.onyx +++ b/tests/array_struct_robustness.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/arrays_passed_by_value b/tests/arrays_passed_by_value new file mode 100644 index 000000000..f49eeacd9 --- /dev/null +++ b/tests/arrays_passed_by_value @@ -0,0 +1,2 @@ +[ 4.0000, 7.0000 ] [ 8.0000, 14.0000 ] +[ 4, 7 ] [ 8, 14 ] diff --git a/tests/arrays_passed_by_value.onyx b/tests/arrays_passed_by_value.onyx new file mode 100644 index 000000000..b362b7864 --- /dev/null +++ b/tests/arrays_passed_by_value.onyx @@ -0,0 +1,27 @@ +use core {*} + +Vec2 :: [2] f32 +Vec2i :: [2] i32 + +foo :: (a: Vec2) -> void { + printf("{} {}\n", a, a + a) +} + +f2 :: () -> Vec2 { + return .[4, 7] +} + +ioo :: (a: Vec2i) -> void { + printf("{} {}\n", a, a + a) +} + +i2 :: () -> Vec2i { + return .[4, 7] +} + +main :: () { + foo(f2()) + ioo(i2()) +} + + diff --git a/tests/arrays_with_methods b/tests/arrays_with_methods new file mode 100644 index 000000000..a1d8fd0d5 --- /dev/null +++ b/tests/arrays_with_methods @@ -0,0 +1,6 @@ +true +[ 123, 123, 123, 123456, 123456, 456, 456, 456, 123, 123 ] +[ [ 123, 123, 123 ], [ 123456, 123456 ], [ 456, 456, 456 ], [ 123, 123 ] ] +hello World! +4 +7 diff --git a/tests/arrays_with_methods.onyx b/tests/arrays_with_methods.onyx new file mode 100644 index 000000000..a89cdb702 --- /dev/null +++ b/tests/arrays_with_methods.onyx @@ -0,0 +1,35 @@ +//+optional-semicolons + +use core {println} + +main :: () { + arr := make([..] i32) + + for i in 10 { + arr->push(i) + } + + arr->fill(123) + arr->fill_range(3 .. 8, 456) + Array.fill_range(arr, 3 .. 5, 123456) + + arr->some([x](x == 123)) |> println() + + sli: [] i32 = arr + + println(arr) + + groups := sli->group_by([x, y](x == y)) + println(groups) + + + // String methdos + + hello := "Hello"->copy()->to_lowercase()->to_dyn_str() + hello->append(" World!") + println(hello) + + hello_str: str = hello + hello_str->index_of('o') |> println() + hello_str->last_index_of('o') |> println() +} diff --git a/tests/arrow_notation.onyx b/tests/arrow_notation.onyx index c68613725..9f7b970fe 100644 --- a/tests/arrow_notation.onyx +++ b/tests/arrow_notation.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core use core.conv diff --git a/tests/atomics.onyx b/tests/atomics.onyx index a506be3fc..95674bb9f 100644 --- a/tests/atomics.onyx +++ b/tests/atomics.onyx @@ -1,5 +1,5 @@ -#load "core/module" -#load "core/intrinsics/atomics" + +#load "core:intrinsics/atomics" use core {*} use core.intrinsics.atomics {*} diff --git a/tests/auto_poly.onyx b/tests/auto_poly.onyx index a744e61fa..a10c25fe8 100644 --- a/tests/auto_poly.onyx +++ b/tests/auto_poly.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/avl_test.onyx b/tests/avl_test.onyx index edaf538ad..2feddabaf 100644 --- a/tests/avl_test.onyx +++ b/tests/avl_test.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/baked_parameters.onyx b/tests/baked_parameters.onyx index 47d952642..0fc2ed478 100644 --- a/tests/baked_parameters.onyx +++ b/tests/baked_parameters.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/better_field_accesses.onyx b/tests/better_field_accesses.onyx index 8d6e4d89e..ebb08c598 100644 --- a/tests/better_field_accesses.onyx +++ b/tests/better_field_accesses.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/bucket_array.onyx b/tests/bucket_array.onyx index eb3dc865b..77db6e177 100644 --- a/tests/bucket_array.onyx +++ b/tests/bucket_array.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/bugs/anonymous_struct_defaults.onyx b/tests/bugs/anonymous_struct_defaults.onyx index 5b6dc99b7..08d9539c4 100644 --- a/tests/bugs/anonymous_struct_defaults.onyx +++ b/tests/bugs/anonymous_struct_defaults.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/bugs/array_lengths.onyx b/tests/bugs/array_lengths.onyx index 4a5e2ada7..e11e58ccd 100644 --- a/tests/bugs/array_lengths.onyx +++ b/tests/bugs/array_lengths.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core { * } @@ -22,8 +22,8 @@ main :: () { arr2: [cast(i32)An_Enum.Count]A_Struct; println(arr1.count == (3 * 4 - 5)); - println(sizeof(typeof(arr1)) == (3 * 4 - 5) * sizeof(f32)); + println((sizeof typeof (arr1)) == (3 * 4 - 5) * sizeof f32); println(arr2.count == ~~An_Enum.Count); - println(sizeof(typeof(arr2)) == ~~An_Enum.Count * sizeof(A_Struct)); + println((sizeof typeof (arr2)) == ~~An_Enum.Count * sizeof A_Struct); } diff --git a/tests/bugs/defer_block_in_macro.onyx b/tests/bugs/defer_block_in_macro.onyx index ce5e62358..1e86afd10 100644 --- a/tests/bugs/defer_block_in_macro.onyx +++ b/tests/bugs/defer_block_in_macro.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/bugs/fallthrough_defer_interaction.onyx b/tests/bugs/fallthrough_defer_interaction.onyx index 8653e96f7..0974fe4d9 100644 --- a/tests/bugs/fallthrough_defer_interaction.onyx +++ b/tests/bugs/fallthrough_defer_interaction.onyx @@ -1,11 +1,11 @@ -#load "core/module" + use core {*} custom_iterator :: () -> Iterator(i32) { - next :: (data: rawptr) -> (i32, bool) { - return 1234, true; + next :: (data: rawptr) -> ? i32 { + return 1234; } close :: (data: rawptr) { @@ -37,8 +37,8 @@ main :: (args: [] cstr) { } } - case #default { + case _ { println("Default case"); } } -} \ No newline at end of file +} diff --git a/tests/bugs/gh_issue_157 b/tests/bugs/gh_issue_157 new file mode 100644 index 000000000..bc856dafa --- /dev/null +++ b/tests/bugs/gh_issue_157 @@ -0,0 +1,4 @@ +0 +1 +2 +3 diff --git a/tests/bugs/gh_issue_157.onyx b/tests/bugs/gh_issue_157.onyx new file mode 100644 index 000000000..cbc628d0a --- /dev/null +++ b/tests/bugs/gh_issue_157.onyx @@ -0,0 +1,21 @@ +use core {*} + +main :: () { + iter.counter() + + // comment + // comment + // comment + |> iter.take(4) + + // comment + |> iter.each([n] { + printf("{}\n", n) + }) +} + +iter.each :: macro (i: Iterator($T), body: Code) { + for __it in i { + #unquote body(__it) + } +} \ No newline at end of file diff --git a/tests/bugs/injecting_global_symbol_order.onyx b/tests/bugs/injecting_global_symbol_order.onyx index 60c0fcf20..9e672ace7 100644 --- a/tests/bugs/injecting_global_symbol_order.onyx +++ b/tests/bugs/injecting_global_symbol_order.onyx @@ -19,10 +19,8 @@ isPrime :: (n: i64) => { prime_generator :: () => iter.counter(2, i64)->filter(isPrime) -#inject iter { - range :: (min: $T, max: T) => - iter.counter(min, T)->take_while((x, [max]) => x < max) -} +iter.range :: (min: $T, max: T) => + iter.counter(min, T)->take_while((x) use (max) => x < max) main :: () { for prime in prime_generator() |> iter.take(10) { diff --git a/tests/bugs/macro_auto_return_not_resolved.onyx b/tests/bugs/macro_auto_return_not_resolved.onyx index 6b6485c85..ba2853b48 100644 --- a/tests/bugs/macro_auto_return_not_resolved.onyx +++ b/tests/bugs/macro_auto_return_not_resolved.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/bugs/namespace_aliasing.onyx b/tests/bugs/namespace_aliasing.onyx index aaa91b3be..a6d483fe4 100644 --- a/tests/bugs/namespace_aliasing.onyx +++ b/tests/bugs/namespace_aliasing.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} SomeNamespace :: struct { diff --git a/tests/bugs/nested_function_type_crash b/tests/bugs/nested_function_type_crash new file mode 100644 index 000000000..a28db645f --- /dev/null +++ b/tests/bugs/nested_function_type_crash @@ -0,0 +1,2 @@ +Worked +false diff --git a/tests/bugs/nested_function_type_crash.onyx b/tests/bugs/nested_function_type_crash.onyx new file mode 100644 index 000000000..3fb54178c --- /dev/null +++ b/tests/bugs/nested_function_type_crash.onyx @@ -0,0 +1,16 @@ +use core {*} + +foo :: (x: [] Pair(i32, #type () -> bool)) { + for x { + it.second() |> println() + } +} + +main :: () { + foo(.[ + .{0, () => { + println("Worked") + return false + }} + ]) +} diff --git a/tests/bugs/weird_overload_order b/tests/bugs/weird_overload_order new file mode 100644 index 000000000..1bdad883f --- /dev/null +++ b/tests/bugs/weird_overload_order @@ -0,0 +1,2 @@ +Some(0) +Some("Working") diff --git a/tests/bugs/weird_overload_order.onyx b/tests/bugs/weird_overload_order.onyx new file mode 100644 index 000000000..bb50a7349 --- /dev/null +++ b/tests/bugs/weird_overload_order.onyx @@ -0,0 +1,26 @@ +// This test case isn't *strictly* working... +// You should be able to rearrange these overloads and have it still work. + +use core {*} + +Foo :: struct { _: i32 } + +Foo.overload :: #match #local {} + +#overload +Foo.overload :: (f: Foo, a: str, $b: type_expr) -> ? b { + return .{ Some = .{} } +} + +#overload +Foo.overload :: (f: Foo, a: str) -> ? str { + return "Working" +} + + +main :: () { + f := Foo.{} + + Foo.overload(f, "test", u32) |> println() + Foo.overload(f, "test") |> println() +} diff --git a/tests/caller_location.onyx b/tests/caller_location.onyx index 2939defe9..75d4982f3 100644 --- a/tests/caller_location.onyx +++ b/tests/caller_location.onyx @@ -1,10 +1,10 @@ -#load "core/module" + use core {*} using_callsite :: (value: $T, site := #callsite) { println(value); - path := string.split(site.file, ~~(#char "\\" if runtime.compiler_os == .Windows else #char "/")); + path := string.split(site.file, ~~('\\' if runtime.compiler_os == .Windows else '/')); printf("I was called from {}:{}:{}\n", path[path.count - 1], site.line, site.column); } diff --git a/tests/code_block_captures.onyx b/tests/code_block_captures.onyx index 3a317b5bc..34a34276b 100644 --- a/tests/code_block_captures.onyx +++ b/tests/code_block_captures.onyx @@ -20,11 +20,9 @@ main :: () { }); } -#inject Map { - each :: macro (m: $M, body: Code) { - for& m.entries { - #unquote body(it.key, it.value); - } +Map.each :: macro (m: $M, body: Code) { + for& m.entries { + #unquote body(it.key, it.value); } } diff --git a/tests/compile_time_procedures.onyx b/tests/compile_time_procedures.onyx index 652b4a231..828d208a7 100644 --- a/tests/compile_time_procedures.onyx +++ b/tests/compile_time_procedures.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/complicated_polymorph.onyx b/tests/complicated_polymorph.onyx index db0833fc0..f6c60f8c7 100644 --- a/tests/complicated_polymorph.onyx +++ b/tests/complicated_polymorph.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/compound_decl_with_types b/tests/compound_decl_with_types new file mode 100644 index 000000000..a0ab8eb7f --- /dev/null +++ b/tests/compound_decl_with_types @@ -0,0 +1,4 @@ +0 +1 +str1 +str2 diff --git a/tests/compound_decl_with_types.onyx b/tests/compound_decl_with_types.onyx new file mode 100644 index 000000000..aaafa9866 --- /dev/null +++ b/tests/compound_decl_with_types.onyx @@ -0,0 +1,15 @@ +use core {*} + +main :: () { + x, y: u32 = 0, 1; + println(x); + println(y); + + a, b: str = two_strs(); + println(a); + println(b); +} + +two_strs :: () -> (str, str) { + return "str1", "str2"; +} diff --git a/tests/custom_section b/tests/custom_section new file mode 100644 index 000000000..746da39b4 --- /dev/null +++ b/tests/custom_section @@ -0,0 +1 @@ +Worked. diff --git a/tests/custom_section.onyx b/tests/custom_section.onyx new file mode 100644 index 000000000..c26efdd60 --- /dev/null +++ b/tests/custom_section.onyx @@ -0,0 +1,8 @@ +use core {*} + +#wasm_section "test-section" "This is the contents of the test section" +#wasm_section "test-section-2" #file "./custom_section.onyx" + +main :: () { + println("Worked."); +} diff --git a/tests/defer_with_continue.onyx b/tests/defer_with_continue.onyx index 10d27418a..a1d55ab86 100644 --- a/tests/defer_with_continue.onyx +++ b/tests/defer_with_continue.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -44,7 +44,7 @@ main :: (args: [] cstr) { println("Case 4!"); } - case #default { + case _ { println("Default!"); } } diff --git a/tests/defined_test.onyx b/tests/defined_test.onyx index b9962d38f..2c589efc1 100644 --- a/tests/defined_test.onyx +++ b/tests/defined_test.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/float_parsing.onyx b/tests/float_parsing.onyx index e6fc82323..9540a5a79 100644 --- a/tests/float_parsing.onyx +++ b/tests/float_parsing.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -49,4 +49,4 @@ main :: (args: [] cstr) { value := conv.str_to_f64(s); println(value); } -} \ No newline at end of file +} diff --git a/tests/float_parsing_compiler b/tests/float_parsing_compiler new file mode 100644 index 000000000..a5de9b7b1 --- /dev/null +++ b/tests/float_parsing_compiler @@ -0,0 +1,4 @@ +1000000 +100.1234 +123455.9999 +0.1234 diff --git a/tests/float_parsing_compiler.onyx b/tests/float_parsing_compiler.onyx new file mode 100644 index 000000000..ea461fc25 --- /dev/null +++ b/tests/float_parsing_compiler.onyx @@ -0,0 +1,17 @@ +//+optional-semicolons + +use core {*} + +main :: () { + x := 1_000_000 + println(x) + + y := 100.12_34_56 + println(y) + + z := 1_23.4_56e3 + println(z) + + w := 1234.567e-4 + println(w) +} diff --git a/tests/hello_world.onyx b/tests/hello_world.onyx index dff9ba359..7fa970cbc 100644 --- a/tests/hello_world.onyx +++ b/tests/hello_world.onyx @@ -1,6 +1,6 @@ package main -#load "core/module" + use core {*} diff --git a/tests/i32map.onyx b/tests/i32map.onyx index dfa46e761..5d8fcc49b 100644 --- a/tests/i32map.onyx +++ b/tests/i32map.onyx @@ -1,6 +1,6 @@ package main -#load "core/module" + use core {*} diff --git a/tests/if_expressions.onyx b/tests/if_expressions.onyx index 1de27b55d..1a16ad4d4 100644 --- a/tests/if_expressions.onyx +++ b/tests/if_expressions.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {package, println, printf} diff --git a/tests/ignored_return_values.onyx b/tests/ignored_return_values.onyx index d27f529a5..86863a951 100644 --- a/tests/ignored_return_values.onyx +++ b/tests/ignored_return_values.onyx @@ -13,10 +13,8 @@ Foo :: struct { m: str; } -#inject Foo { - method :: (f: ^Foo) -> (i32, i32, str) { - return f.x, f.y, f.m; - } +Foo.method :: (f: ^Foo) -> (i32, i32, str) { + return f.x, f.y, f.m; } main :: () { diff --git a/tests/implicit_initialize_locals.onyx b/tests/implicit_initialize_locals.onyx index 33c14b11c..5fbfba30f 100644 --- a/tests/implicit_initialize_locals.onyx +++ b/tests/implicit_initialize_locals.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/init_procedures.onyx b/tests/init_procedures.onyx index 17414000b..626ec2f23 100644 --- a/tests/init_procedures.onyx +++ b/tests/init_procedures.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/interface_scopes.onyx b/tests/interface_scopes.onyx index eeec1c166..69152f6b3 100644 --- a/tests/interface_scopes.onyx +++ b/tests/interface_scopes.onyx @@ -9,9 +9,8 @@ Speak :: interface (T: type_expr) { speak :: #match {} } -#inject Speak { - yell :: #match {} -} +Speak.yell :: #match {} + Dog :: struct {_: i32} diff --git a/tests/interfaces.onyx b/tests/interfaces.onyx index cd3f236ec..c430a8ed8 100644 --- a/tests/interfaces.onyx +++ b/tests/interfaces.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core use core.hash diff --git a/tests/lazy_iterators.onyx b/tests/lazy_iterators.onyx index 3fd0b2b4f..09b8a78a3 100644 --- a/tests/lazy_iterators.onyx +++ b/tests/lazy_iterators.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -9,10 +9,10 @@ count_iterator :: (lo: $T, hi: T, step: T = 1) -> Iterator(T) { (ctx) => { if ctx.current <= ctx.high { defer ctx.current += ctx.step; - return ctx.current, true; + return Optional.make(ctx.current); } - return 0, false; + return .None; }, (ctx) => { @@ -32,7 +32,7 @@ main :: (args: [] cstr) { count_iterator(1.0f, 20.0f) |> iter.map(x => x * 2) |> iter.filter(lower_bound, (x, l) => x > ~~l) - |> iter.map((x, [addition]) => x + ~~addition) + |> iter.map((x) use (addition) => x + ~~addition) |> iter.take(5); for v in quick_iterator { diff --git a/tests/multiple_returns_robustness.onyx b/tests/multiple_returns_robustness.onyx index a75713fe1..96e7b4507 100644 --- a/tests/multiple_returns_robustness.onyx +++ b/tests/multiple_returns_robustness.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {package, *} diff --git a/tests/named_arguments_test.onyx b/tests/named_arguments_test.onyx index 63cd648ab..3a1bfc40c 100644 --- a/tests/named_arguments_test.onyx +++ b/tests/named_arguments_test.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/named_return_values b/tests/named_return_values new file mode 100644 index 000000000..951ae12da --- /dev/null +++ b/tests/named_return_values @@ -0,0 +1,4 @@ +10 20 30 +40.0000 +15 +15.0000 diff --git a/tests/named_return_values.onyx b/tests/named_return_values.onyx new file mode 100644 index 000000000..eb3c5971e --- /dev/null +++ b/tests/named_return_values.onyx @@ -0,0 +1,31 @@ +//+optional-semicolons +use core {*} + +f :: () -> (x: i32, y: i32, z: i32) { + y = 20 + z = 30 + x = 10 + + return +} + +g :: (a: i32) -> (f: f32) { + f = ~~(a * 2) + return +} + +sum :: macro (a: [] $T) -> (total: T) { + for a do total += it + return +} + +main :: () { + x, y, z := f() + printf("{} {} {}\n", x, y, z) + + asdf := g(20) + println(asdf) + + sum(.[1, 2, 3, 4, 5]) |> println() + sum(f32.[1, 2, 3, 4, 5]) |> println() +} diff --git a/tests/new_auto_types b/tests/new_auto_types new file mode 100644 index 000000000..0f32d304e --- /dev/null +++ b/tests/new_auto_types @@ -0,0 +1,6 @@ +100 +i32 +123 +[] u8 +Allocator +f64 diff --git a/tests/new_auto_types.onyx b/tests/new_auto_types.onyx new file mode 100644 index 000000000..7b61588dc --- /dev/null +++ b/tests/new_auto_types.onyx @@ -0,0 +1,24 @@ +use core {*} + +main :: () { + x: _ + x = 100 + println(x) + println(typeof x) + + func_with_auto_return() |> println() + + a, b, c: _ + a = "Test" + b = context.allocator + c = 45.56 + println(typeof a) + println(typeof b) + println(typeof c) +} + +func_with_auto_return :: () -> _ { + return 123 +} + + diff --git a/tests/new_captures.onyx b/tests/new_captures.onyx new file mode 100644 index 000000000..d298d94d7 --- /dev/null +++ b/tests/new_captures.onyx @@ -0,0 +1,27 @@ +use core {*} + +main :: () { + message := "A test message" + arr := .[ 1, 2, 3, 6 ] + + do_something :: (arr: [] $T, f: (T) -> void) { + for arr { + f(it) + } + } + + do_something(arr, (elem: i32) use (message) { + printf("{}: {}\n", message, elem) + }) + + i := Iterator.from(arr) + |> .map((x: i32) use (message) => Pair.make(x, message)) + |> .filter((x) use (message) => { + return message[x.first] == ' ' + }) + + for i { + printf("{}\n", it) + } +} + diff --git a/tests/new_printf.onyx b/tests/new_printf.onyx index e1d46a19b..d6d81fa84 100644 --- a/tests/new_printf.onyx +++ b/tests/new_printf.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {printf, map} diff --git a/tests/new_struct_behaviour.onyx b/tests/new_struct_behaviour.onyx index 5e94952b3..efdc8ac55 100644 --- a/tests/new_struct_behaviour.onyx +++ b/tests/new_struct_behaviour.onyx @@ -1,6 +1,6 @@ // This is a needlessly complicated test of some of the newer features with structs. -#load "core/module" + use core {*} diff --git a/tests/operator_overload.onyx b/tests/operator_overload.onyx index 0352a9954..ee235b91f 100644 --- a/tests/operator_overload.onyx +++ b/tests/operator_overload.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/operators_as_methods.onyx b/tests/operators_as_methods.onyx index ff9858158..7903a63c7 100644 --- a/tests/operators_as_methods.onyx +++ b/tests/operators_as_methods.onyx @@ -1,7 +1,7 @@ use core {*} use runtime -#inject runtime.vars.Onyx_Enable_Operator_Methods :: true +runtime.vars.Onyx_Enable_Operator_Methods :: true main :: () { Point :: struct { diff --git a/tests/overload_precedence.onyx b/tests/overload_precedence.onyx index fc40f131a..395d91054 100644 --- a/tests/overload_precedence.onyx +++ b/tests/overload_precedence.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/overload_with_autocast.onyx b/tests/overload_with_autocast.onyx index 0323ebc76..4203b6a27 100644 --- a/tests/overload_with_autocast.onyx +++ b/tests/overload_with_autocast.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core use core {println} diff --git a/tests/persist_locals.onyx b/tests/persist_locals.onyx index c209911a8..ffb13aade 100644 --- a/tests/persist_locals.onyx +++ b/tests/persist_locals.onyx @@ -1,6 +1,6 @@ // This test does not make a whole ton of sense, but it does thoroughly test the #persist local capability. -#load "core/module" + use core {*} diff --git a/tests/pipe_into_method_call b/tests/pipe_into_method_call new file mode 100644 index 000000000..b2ee448f9 --- /dev/null +++ b/tests/pipe_into_method_call @@ -0,0 +1 @@ +Test: 123 diff --git a/tests/pipe_into_method_call.onyx b/tests/pipe_into_method_call.onyx new file mode 100644 index 000000000..0819e1825 --- /dev/null +++ b/tests/pipe_into_method_call.onyx @@ -0,0 +1,17 @@ +use core {*} + +Foo :: struct { + name: str +} + +Foo.print :: (self: &#Self, y: str, x: i32) { + printf("{}: {}\n", self.name, x) +} + +main :: () { + f := Foo.{"Test"} + + x := 123 + + x |> f->print("asdf", _) +} \ No newline at end of file diff --git a/tests/pipe_into_try b/tests/pipe_into_try new file mode 100644 index 000000000..ddb9dd08a --- /dev/null +++ b/tests/pipe_into_try @@ -0,0 +1,11 @@ +0xFFFC0 +Foo +Foo { x = 123, y = 456, z = 678 } +1: i32 +Str: [] u8 +other: [] u8 +12.3399: f64 +Foo { x = 123, y = 456, z = 678 }: Foo +ASDF +true +14 diff --git a/tests/pipe_into_try.onyx b/tests/pipe_into_try.onyx new file mode 100644 index 000000000..c024d8b7e --- /dev/null +++ b/tests/pipe_into_try.onyx @@ -0,0 +1,59 @@ +use core {*} + +f :: (x: Pair([5] any, #type () -> bool)) { + for x.first { + printf("{a}: {}\n", it, it.type) + } + + x.second() |> println() +} + + +Foo :: struct { x, y, z: i32 } + +main :: () { + v := new(Foo.{ + 123 + 456 + 678 + }) + x := misc.as_any(v) + + printf("{}\n", &x) + printf("{}\n", x.type) + printf("{a}\n", x) + + f(.{ + any.[ + temp_any(1) + temp_any("Str") + temp_any("other") + temp_any(12.34) + x + ] + () -> _ { + println("ASDF") + return true + } + }) + + do_it() +} + +asdf :: (x: i32) => Optional.make(x + 1) + +do_it :: () { + 10 + |> asdf()? + |> asdf()? + |> asdf()? + |> asdf()? + |> println() +} + +temp_any :: macro (x: $T) => any.{ + new_temp(x) + T +} + + diff --git a/tests/pipe_placeholder b/tests/pipe_placeholder new file mode 100644 index 000000000..b4544c36e --- /dev/null +++ b/tests/pipe_placeholder @@ -0,0 +1,2 @@ +The value is: Some(45) +The value is: 10 diff --git a/tests/pipe_placeholder.onyx b/tests/pipe_placeholder.onyx new file mode 100644 index 000000000..76abe9be6 --- /dev/null +++ b/tests/pipe_placeholder.onyx @@ -0,0 +1,9 @@ +use core {*} + +main :: () { + iter.as_iter(1 .. 10) + |> iter.fold1((x, y) => x + y) + |> printf("The value is: {}\n", _); + + 10 |> printf("The value is: {}\n", _); +} \ No newline at end of file diff --git a/tests/poly_struct_in_type_info.onyx b/tests/poly_struct_in_type_info.onyx index 8281ad549..989883ee8 100644 --- a/tests/poly_struct_in_type_info.onyx +++ b/tests/poly_struct_in_type_info.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} use runtime.info diff --git a/tests/poly_structs_with_values.onyx b/tests/poly_structs_with_values.onyx index 156f5d1d2..414088782 100644 --- a/tests/poly_structs_with_values.onyx +++ b/tests/poly_structs_with_values.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/polymorphic_array_lengths.onyx b/tests/polymorphic_array_lengths.onyx index bea44e83b..09728d8a9 100644 --- a/tests/polymorphic_array_lengths.onyx +++ b/tests/polymorphic_array_lengths.onyx @@ -1,22 +1,18 @@ -#load "core/module" + use core {*} main :: (args: [] cstr) { arr := u32.[ 1, 2, 3, 4, 5 ]; - for elem in array_to_slice(arr) do printf("{} ", elem); + for elem in cast([] u32) arr do printf("{} ", elem); roots : [20] f32; compute_roots(roots); for root in roots do println(root); - array_to_slice :: (arr: [$N] $T) -> [] T { - return (#type [] T).{ ~~arr, N }; - } - - compute_roots :: (arr: [$N] f32) { - for i in 0 .. N { + compute_roots :: (arr: [] f32) { + for i in 0 .. arr.count { arr[i] = math.sqrt(cast(f32) i); } } diff --git a/tests/range64 b/tests/range64 new file mode 100644 index 000000000..b59e86384 --- /dev/null +++ b/tests/range64 @@ -0,0 +1,13 @@ +range +range64 +i64: 0 +i64: 1 +i64: 2 +i64: 3 +i64: 4 +10 +11 +12 +13 +14 +15 diff --git a/tests/range64.onyx b/tests/range64.onyx new file mode 100644 index 000000000..76c0ec785 --- /dev/null +++ b/tests/range64.onyx @@ -0,0 +1,19 @@ +//+optional-semicolons +use core {*} + +main :: () { + println(typeof (0 .. 10)) + + x: u64 = 10 + println(typeof (x .. 10)) + + y: i32 + + for iter.as_iter(0 .. x) |> iter.filter(x => x < 5) { + printf("{}: {}\n", typeof it, it) + } + + for x ..= 15 { + println(it) + } +} diff --git a/tests/remove_test.onyx b/tests/remove_test.onyx index 753f2ae33..14f70c0cc 100644 --- a/tests/remove_test.onyx +++ b/tests/remove_test.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/sets.onyx b/tests/sets.onyx index eacb0a7eb..757481ddc 100644 --- a/tests/sets.onyx +++ b/tests/sets.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/short_circuit_bools b/tests/short_circuit_bools new file mode 100644 index 000000000..2f4ec9b11 --- /dev/null +++ b/tests/short_circuit_bools @@ -0,0 +1,4 @@ +OP2 +false +OP1 +true diff --git a/tests/short_circuit_bools.onyx b/tests/short_circuit_bools.onyx new file mode 100644 index 000000000..2803a966b --- /dev/null +++ b/tests/short_circuit_bools.onyx @@ -0,0 +1,18 @@ +package main + +use core {*} + +op1 :: () -> bool { + println("OP1") + return true +} + +op2 :: () -> bool { + println("OP2") + return false +} + +main :: () { + (op2() && op1()) |> println() + (op1() || op2()) |> println() +} diff --git a/tests/stdlib/base64 b/tests/stdlib/base64 index f5577a394..ccbffc7c3 100644 --- a/tests/stdlib/base64 +++ b/tests/stdlib/base64 @@ -2,6 +2,15 @@ Many hands make light work. light w light wo light wor +light work +light work +light work a +light work an +light work and +light work and l +light work and le +light work and les +light work and less TWFueSBoYW5kcyBtYWtlIGxpZ2h0IHdvcmsu @@ -9,3 +18,39 @@ bGlnaHQgdw== bGlnaHQgd28= bGlnaHQgd29y bGlnaHQgd29yaw== +bGlnaHQgd29yayA= +bGlnaHQgd29yayBh +bGlnaHQgd29yayBhbg== +bGlnaHQgd29yayBhbmQ= +bGlnaHQgd29yayBhbmQgbA== +bGlnaHQgd29yayBhbmQgbGU= +bGlnaHQgd29yayBhbmQgbGVz +bGlnaHQgd29yayBhbmQgbGVzcw== +Many hands make light work. +light w +light wo +light wor +light work +light work +light work a +light work an +light work and +light work and l +light work and le +light work and les +light work and less + + +TWFueSBoYW5kcyBtYWtlIGxpZ2h0IHdvcmsu +bGlnaHQgdw +bGlnaHQgd28 +bGlnaHQgd29y +bGlnaHQgd29yaw +bGlnaHQgd29yayA +bGlnaHQgd29yayBh +bGlnaHQgd29yayBhbg +bGlnaHQgd29yayBhbmQ +bGlnaHQgd29yayBhbmQgbA +bGlnaHQgd29yayBhbmQgbGU +bGlnaHQgd29yayBhbmQgbGVz +bGlnaHQgd29yayBhbmQgbGVzcw diff --git a/tests/stdlib/base64.onyx b/tests/stdlib/base64.onyx index 9af3134d7..63062d98c 100644 --- a/tests/stdlib/base64.onyx +++ b/tests/stdlib/base64.onyx @@ -7,6 +7,15 @@ decode_test :: () { "bGlnaHQgdw==", "bGlnaHQgd28=", "bGlnaHQgd29y", + "bGlnaHQgd29yaw==", + "bGlnaHQgd29yayA=", + "bGlnaHQgd29yayBh", + "bGlnaHQgd29yayBhbg==", + "bGlnaHQgd29yayBhbmQ=", + "bGlnaHQgd29yayBhbmQgbA==", + "bGlnaHQgd29yayBhbmQgbGU=", + "bGlnaHQgd29yayBhbmQgbGVz", + "bGlnaHQgd29yayBhbmQgbGVzcw==", ] { core.println(base64.decode(it)); } @@ -19,15 +28,68 @@ encode_test :: () { "light wo", "light wor", "light work", + "light work ", + "light work a", + "light work an", + "light work and", + "light work and l", + "light work and le", + "light work and les", + "light work and less", ] { encoded := base64.encode(it); core.println(encoded); } } +decode_url_test :: () { + for .[ + "TWFueSBoYW5kcyBtYWtlIGxpZ2h0IHdvcmsu", + "bGlnaHQgdw", + "bGlnaHQgd28", + "bGlnaHQgd29y", + "bGlnaHQgd29yaw", + "bGlnaHQgd29yayA", + "bGlnaHQgd29yayBh", + "bGlnaHQgd29yayBhbg", + "bGlnaHQgd29yayBhbmQ", + "bGlnaHQgd29yayBhbmQgbA", + "bGlnaHQgd29yayBhbmQgbGU", + "bGlnaHQgd29yayBhbmQgbGVz", + "bGlnaHQgd29yayBhbmQgbGVzcw", + ] { + core.println(base64.decode_url(it)) + } +} + +encode_url_test :: () { + for .[ + "Many hands make light work.", + "light w", + "light wo", + "light wor", + "light work", + "light work ", + "light work a", + "light work an", + "light work and", + "light work and l", + "light work and le", + "light work and les", + "light work and less", + ] { + encoded := base64.encode_url(it) + core.println(encoded) + } +} + main :: () { decode_test(); core.println("\n"); encode_test(); + + decode_url_test() + core.println("\n") + encode_url_test() } diff --git a/tests/stdlib/hmac_sha256 b/tests/stdlib/hmac_sha256 new file mode 100644 index 000000000..996040e55 --- /dev/null +++ b/tests/stdlib/hmac_sha256 @@ -0,0 +1,2 @@ +D2CFFA238749BA729CB43AD31B0551AFBA6FCD7DDE7AD33F6F7C6886202A9848 +7D4F10FF6889B8EA47E02AF5809A21CC7EB12389F885D6846D502E2E8CE8A13C diff --git a/tests/stdlib/hmac_sha256.onyx b/tests/stdlib/hmac_sha256.onyx new file mode 100644 index 000000000..630dd9e14 --- /dev/null +++ b/tests/stdlib/hmac_sha256.onyx @@ -0,0 +1,29 @@ +use core.crypto {hmac} +use core.hash {sha256} +use core.encoding {base64, hex} +use core {printf, print} + +MESSAGE :: """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor tortor enim. Nam consectetur nec leo at viverra. Nunc venenatis mauris dui, et interdum velit egestas in. Nulla efficitur odio nulla, sed aliquet metus aliquam eu. Aliquam egestas interdum mi, sed mattis nibh interdum in. Proin vulputate sapien eget ipsum consequat fringilla. Duis vestibulum bibendum placerat. + +Proin non lacinia arcu, sit amet vestibulum nisl. Vestibulum sed enim feugiat, dignissim massa ac, pellentesque mi. Pellentesque nec orci sem. Sed consectetur nunc a iaculis imperdiet. Integer id quam pellentesque, ultricies nulla quis, pellentesque eros. Nulla facilisi. Aenean ornare ut nisl id pretium. In hac habitasse platea dictumst. Phasellus mattis libero vitae venenatis aliquet. Phasellus vitae eros vitae tortor tincidunt suscipit vitae nec lacus. Aliquam nibh lacus, volutpat non neque vitae, rhoncus ornare ex. Vestibulum in diam nibh. Cras elementum egestas mattis. Maecenas condimentum ligula in malesuada egestas. Nam a enim in mi volutpat mollis. Curabitur ac tristique sapien. + +Quisque ligula felis, placerat sit amet magna quis, consectetur sollicitudin eros. Nulla ac neque lobortis, ultrices ex sed, aliquam neque. Quisque sed egestas ipsum, eu placerat lacus. Ut et consectetur mauris, in aliquam nunc. Aenean consectetur nunc vel felis laoreet bibendum. Proin tempus magna ut orci consectetur hendrerit. Quisque ut gravida dolor. Duis pellentesque sed quam sed egestas. Integer porttitor erat in imperdiet elementum. Suspendisse sagittis sem ac neque feugiat feugiat. Integer mollis fringilla nibh at aliquet. Aliquam lorem ipsum, auctor vitae aliquam et, venenatis et sem. Cras efficitur malesuada purus, sed molestie diam. + +Aenean bibendum nisi nunc, mollis facilisis est vulputate sed. Aenean at elit sollicitudin, pulvinar nunc at, scelerisque lorem. Fusce at augue laoreet, dictum elit finibus, ultrices lorem. Praesent in odio vitae ante posuere convallis nec in lorem. Aenean ac egestas ante. In vehicula justo nisl. Praesent non ornare nibh. Cras gravida velit elit, eget aliquam quam vulputate vel. Nunc dapibus varius finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Donec a risus nec nisi vulputate finibus. Phasellus luctus quam quis ligula congue rutrum. Proin lobortis lectus nisl, eu ornare eros tincidunt nec. Vivamus dictum risus vel efficitur pellentesque. Vivamus est sapien, pharetra et vestibulum sed, posuere vitae tortor. Nullam eleifend, magna sit amet pharetra auctor, magna lacus ultrices ligula, vel iaculis magna dolor ut diam. + +Nulla cursus nunc eget nisl efficitur euismod. Morbi nibh ipsum, scelerisque eget consequat quis, elementum at turpis. Nulla luctus eleifend arcu sed venenatis. Proin ornare consectetur blandit. Nunc dignissim leo sed luctus mollis. Nam fringilla mattis ex id lacinia. Quisque tincidunt eros ligula. Mauris quis neque at enim aliquam luctus eget volutpat tellus. Aenean sit amet sapien vitae dui venenatis porta. Sed vehicula mauris viverra pharetra lobortis. Maecenas nisl dui, fermentum ac laoreet ac, rhoncus quis ante. Phasellus consequat pharetra mi, ac varius ligula. Nam mollis sapien at felis egestas, in scelerisque massa sodales. Donec et felis sed ligula facilisis tincidunt.""" + +KEY :: "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + +print_str :: (s: [] u8) { + for x in s { + printf("{w2b16}", cast(u32) x) + } + print("\n") +} + +main :: () { + sha256.hash(MESSAGE) |> str.copy() |> print_str() + hmac(MESSAGE, KEY, .SHA256) |> print_str() +} \ No newline at end of file diff --git a/tests/stdlib/iter_group_by b/tests/stdlib/iter_group_by new file mode 100644 index 000000000..a9bf1af1a --- /dev/null +++ b/tests/stdlib/iter_group_by @@ -0,0 +1,40 @@ +People of age 1: + BPondh7nsm +People of age 2: + IX1nY6odRy + zlz2VLJBBg +People of age 3: + iwTNLszdqY + eQg782XXT6 + SBYQ6R8NoN +People of age 4: + e9tI1Jo5WT + YAc7lWSvGk + esqYiCp53f + NINb8JZOiu +People of age 5: + 9BHoH29xG7 + a0rmO45tSN + DHg3j2qTJQ +People of age 6: + bAqb0pSNa5 + 9DOx8GOrUF + OlgfBEU2qe +People of age 7: + DPfpvZjNNT + p6GrLt4ha4 + p1H5lNiWmY + 5ZznJ1qteo +People of age 8: + UvalPVf31Q + wgTU9NJVbI + uXxkRnrhXZ +People of age 9: + 3uEJ2Rphi3 + HRzZbatcFp + QP7amKnANF + t3Ctcc03f2 + mQvp4Xvi1F + 8A54ZXjzNX +People of age 10: + jHQJQP86AO diff --git a/tests/stdlib/iter_group_by.onyx b/tests/stdlib/iter_group_by.onyx new file mode 100644 index 000000000..4466bea75 --- /dev/null +++ b/tests/stdlib/iter_group_by.onyx @@ -0,0 +1,25 @@ +use core {*} + +Person :: struct { age: i32; name: str } + +main :: () { + random.set_seed(1234) + + people := iter.comp(1 ..= 30, [](Person.{ + random.between(1, 10) + random.string(10, alpha_numeric=true) + })) + + people->sort((a, b) => a.age - b.age) + + group_iter := iter.as_iter(people) + |> iter.group_by((a, b) => a.age == b.age) + + for group in group_iter { + printf("People of age {}:\n", group.first.age) + + for v in group.second { + printf(" {}\n", v.name) + } + } +} \ No newline at end of file diff --git a/tests/stdlib/iter_prod b/tests/stdlib/iter_prod new file mode 100644 index 000000000..892d8e8ca --- /dev/null +++ b/tests/stdlib/iter_prod @@ -0,0 +1,8 @@ +(1, test) +(2, test) +(3, test) +(4, test) +(1, hello) +(2, hello) +(3, hello) +(4, hello) diff --git a/tests/stdlib/iter_prod.onyx b/tests/stdlib/iter_prod.onyx new file mode 100644 index 000000000..6023f9165 --- /dev/null +++ b/tests/stdlib/iter_prod.onyx @@ -0,0 +1,12 @@ +use core {*} + +main :: () { + for v in iter.prod(1 .. 5, .["test", "hello"]) { + println(v) + } + + // Iterating over empty array doesn't break things + for v in iter.prod(1 .. 100, str.[]) { + println(v) + } +} \ No newline at end of file diff --git a/tests/stdlib/jwt b/tests/stdlib/jwt new file mode 100644 index 000000000..519da3840 --- /dev/null +++ b/tests/stdlib/jwt @@ -0,0 +1,7 @@ +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJ0ZXN0IiwiaWF0IjoxMjM0NTY3ODkwfQ.4VapgNAjlcllVk-LvapP6mkPCz_qI3Q9SyQE3bdh2y0 +{ + "alg" => "HS256" + "typ" => "JWT" +} +Key: iss +Key: iat diff --git a/tests/stdlib/jwt.onyx b/tests/stdlib/jwt.onyx new file mode 100644 index 000000000..17b691aa5 --- /dev/null +++ b/tests/stdlib/jwt.onyx @@ -0,0 +1,28 @@ +use core.crypto.keys +use core.encoding.json +use core.time +use core.alloc +use core {printf} + +main :: () { + alloc.report_leaks_in_scope() + + use claims := Map.literal(str, json.Value, .[ + .{ "iss", json.from_any("test") } + .{ "iat", json.from_any(1234567890) } + ]) + + use tkn := keys.JWT.make_with_claims(.HS256, claims) + + KEY :: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + KEY2 :: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAABC" + + use token := tkn->signed_string(KEY).Ok! + printf("{p}\n", token) + + use parsed_token := keys.JWT.parse(token, KEY)! + printf("{p}\n", parsed_token.headers) + for Iterator.from(parsed_token.claims) { + printf("Key: {}\n", it.key) + } +} diff --git a/tests/stdlib/md5_test b/tests/stdlib/md5_test new file mode 100644 index 000000000..6656ff891 --- /dev/null +++ b/tests/stdlib/md5_test @@ -0,0 +1 @@ +79D19109FBDFE4EBE8469C99472C7219 diff --git a/tests/stdlib/md5_test.onyx b/tests/stdlib/md5_test.onyx new file mode 100644 index 000000000..5d28b3724 --- /dev/null +++ b/tests/stdlib/md5_test.onyx @@ -0,0 +1,6 @@ +use core.hash.md5 +use core{println} + +main :: () { + println(md5.digest("This is a really long string to test the MD5 hashing capabilities of the Onyx standard library. There should not be any bugs in something as simple and well-defined like the MD5 implementation")->as_hex_str()); +} diff --git a/tests/stdlib/new_value b/tests/stdlib/new_value new file mode 100644 index 000000000..a74560f93 --- /dev/null +++ b/tests/stdlib/new_value @@ -0,0 +1,2 @@ +Value 1: Foo { x = 123, y = 456, z = 678 } +Value 2: Foo { x = 10, y = 20, z = 30 } diff --git a/tests/stdlib/new_value.onyx b/tests/stdlib/new_value.onyx new file mode 100644 index 000000000..b2309e5e8 --- /dev/null +++ b/tests/stdlib/new_value.onyx @@ -0,0 +1,23 @@ +use core {*} + +Foo :: struct { + x: i32 = 10 + y: i32 = 20 + z: i32 = 30 +} + +main :: () { + v1 := new(Foo.{ + 123 + 456 + 678 + }) + defer cfree(v1) + + v2 := new(Foo) + defer cfree(v2) + + printf("Value 1: {*}\n", v1) + printf("Value 2: {*}\n", v2) +} + diff --git a/tests/stdlib/os_args b/tests/stdlib/os_args new file mode 100644 index 000000000..98df4fa74 --- /dev/null +++ b/tests/stdlib/os_args @@ -0,0 +1,2 @@ +New Args: [ ] +Old Args: [ ] diff --git a/tests/stdlib/os_args.onyx b/tests/stdlib/os_args.onyx new file mode 100644 index 000000000..1ff6b72ed --- /dev/null +++ b/tests/stdlib/os_args.onyx @@ -0,0 +1,6 @@ +use core {*} + +main :: (args: [] cstr) { + os.args() |> printf("New Args: {}\n", _) + args |> printf("Old Args: {}\n", _) +} diff --git a/tests/stdlib/sha1 b/tests/stdlib/sha1 new file mode 100644 index 000000000..191f9ef85 --- /dev/null +++ b/tests/stdlib/sha1 @@ -0,0 +1 @@ +[Info] 761c457bf73b14d27e9e9265c46f4b4dda11f940 diff --git a/tests/stdlib/sha1.onyx b/tests/stdlib/sha1.onyx new file mode 100644 index 000000000..8956c9c9c --- /dev/null +++ b/tests/stdlib/sha1.onyx @@ -0,0 +1,10 @@ +use core.hash.sha1 +use core.encoding.hex + +main :: () { + msg := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + // msg := "The quick brown fox jumps over the lazy cog"; + + hash := sha1.hash(msg) |> hex.encode(); + logf(.Info, "{}", hash); +} diff --git a/tests/stdlib/xml b/tests/stdlib/xml new file mode 100644 index 000000000..e17b6e893 --- /dev/null +++ b/tests/stdlib/xml @@ -0,0 +1,66 @@ +Child: Element { + name = "Datapoint", + parent = 0, + attributes = [ + ], + children = [ + Element(2), + Element(3) + ] +} +Name (value): 'Name 1' (123.45) +Child: Element { + name = "Datapoint", + parent = 0, + attributes = [ + ], + children = [ + Element(5), + Element(6) + ] +} +Name (value): 'Name 2' (123.45) +Child: Element { + name = "Datapoint", + parent = 0, + attributes = [ + ], + children = [ + Element(8), + Element(9) + ] +} +Name (value): 'Name 3' (123.45) +Child: Element { + name = "Datapoint", + parent = 0, + attributes = [ + ], + children = [ + Element(11), + Element(12) + ] +} +Name (value): 'Name 4' (123.45) +Child: Element { + name = "Datapoint", + parent = 0, + attributes = [ + ], + children = [ + Element(14), + Element(15) + ] +} +Name (value): 'Name 5' (123.45) +Child: Element { + name = "Datapoint", + parent = 0, + attributes = [ + ], + children = [ + Element(17), + Element(18) + ] +} +Name (value): 'Name 6' (123.45) diff --git a/tests/stdlib/xml.onyx b/tests/stdlib/xml.onyx new file mode 100644 index 000000000..df52987fe --- /dev/null +++ b/tests/stdlib/xml.onyx @@ -0,0 +1,46 @@ +package main + +use core.encoding.xml +use core {printf} + +test_doc :: """ + + + + Name 1 + 123.45 + + + Name 2 + 123.45 + + + Name 3 + 123.45 + + + Name 4 + 123.45 + + + Name 5 + 123.45 + + + Name 6 + 123.45 + + +""" + +main :: () { + use doc := xml.parse(test_doc)! + + for ch in doc->children(0) { + printf("Child: {*p}\n", &doc.elements[ch]) + + name := doc.elements[doc->child_with_name(ch, "Name")!].children[0].Text! + value := doc.elements[doc->child_with_name(ch, "Value")!].children[0].Text! + printf("Name (value): {'} ({})\n", name, value) + } +} diff --git a/tests/string_stream_test.onyx b/tests/string_stream_test.onyx index 116b9e70f..92ed6c9d2 100644 --- a/tests/string_stream_test.onyx +++ b/tests/string_stream_test.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core use core.io diff --git a/tests/struct_literal_base_values b/tests/struct_literal_base_values new file mode 100644 index 000000000..cdffcd2be --- /dev/null +++ b/tests/struct_literal_base_values @@ -0,0 +1,3 @@ +Foo { name = "Phil", age = 34 } +Foo { name = "Joe", age = 34 } +Foo { name = "Joe", age = 10 } diff --git a/tests/struct_literal_base_values.onyx b/tests/struct_literal_base_values.onyx new file mode 100644 index 000000000..e0471dd65 --- /dev/null +++ b/tests/struct_literal_base_values.onyx @@ -0,0 +1,19 @@ +use core {*} + +Foo :: struct { + name: str + age: u32 +} + +main :: () { + joe := Foo.{"Joe", 34} + + phil := .{ ..joe, name = "Phil" } + + young_joe := .{ ..joe, age = 10 } + + println(phil) + println(joe) + println(young_joe) +} + diff --git a/tests/struct_robustness.onyx b/tests/struct_robustness.onyx index a940dc8bc..85998ee82 100644 --- a/tests/struct_robustness.onyx +++ b/tests/struct_robustness.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} diff --git a/tests/struct_use_pointer_member.onyx b/tests/struct_use_pointer_member.onyx index 0fce1d1cf..bd020a43e 100644 --- a/tests/struct_use_pointer_member.onyx +++ b/tests/struct_use_pointer_member.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} Person_Vtable :: struct { diff --git a/tests/switch_expressions.onyx b/tests/switch_expressions.onyx index efc5d458b..b4c8018bf 100644 --- a/tests/switch_expressions.onyx +++ b/tests/switch_expressions.onyx @@ -24,7 +24,7 @@ quick_map_test :: () { case 4 => .Val1; case 5 => .Val2; case 6 => .Val3; - case #default => .Val1; + case _ => .Val1; }; } } @@ -52,4 +52,4 @@ main :: () { assignment_test(); quick_map_test(); quick_union_map(); -} \ No newline at end of file +} diff --git a/tests/switch_using_equals.onyx b/tests/switch_using_equals.onyx index 1f18f55fc..473af599c 100644 --- a/tests/switch_using_equals.onyx +++ b/tests/switch_using_equals.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*} @@ -6,7 +6,7 @@ Vector2 :: struct { x, y: i32; } #operator == macro (v1: Vector2, v2: Vector2) => v1.x == v2.x && v1.y == v2.y; none_of_the_above :: [] { - case #default { + case _ { println("Got default!"); } } diff --git a/tests/sync_channel b/tests/sync_channel new file mode 100644 index 000000000..0d6d73082 --- /dev/null +++ b/tests/sync_channel @@ -0,0 +1,41 @@ +Int(0) +Int(5) +Int(10) +Int(15) +Int(20) +Int(25) +Int(30) +Int(35) +Int(40) +Int(45) +Int(50) +Int(55) +Int(60) +Int(65) +Int(70) +Int(75) +Int(80) +Int(85) +Int(90) +Int(95) +Int(100) +Int(105) +Int(110) +Int(115) +Int(120) +Int(125) +Int(130) +Int(135) +Int(140) +Int(145) +Int(150) +Int(155) +Int(160) +Int(165) +Int(170) +Int(175) +Int(180) +Int(185) +Int(190) +Int(195) +Str("Done") diff --git a/tests/sync_channel.onyx b/tests/sync_channel.onyx new file mode 100644 index 000000000..ead437fe9 --- /dev/null +++ b/tests/sync_channel.onyx @@ -0,0 +1,34 @@ +//+optional-semicolons + +use core {*} + +main :: () { + chan := sync.Channel.make(Msg) + + consumer_thread: thread.Thread + thread.spawn(&consumer_thread, &chan, consumer) + + for 20 { + os.sleep(150) + chan->send(.{ Int = it * 10 }) + chan->send(.{ Int = it * 10 + 5 }) + } + + chan->send(.{Str = "Done"}) + chan->close() + + thread.join(&consumer_thread) +} + +Msg :: union { + Str: str + Int: i32 +} + +consumer :: (channel: &sync.Channel(Msg)) { + for msg in channel->as_iter() { + println(msg) + } +} + + diff --git a/tests/unwrap_operator b/tests/unwrap_operator new file mode 100644 index 000000000..aa0aeeb75 --- /dev/null +++ b/tests/unwrap_operator @@ -0,0 +1,2 @@ +4 +asdf diff --git a/tests/unwrap_operator.onyx b/tests/unwrap_operator.onyx new file mode 100644 index 000000000..717a13354 --- /dev/null +++ b/tests/unwrap_operator.onyx @@ -0,0 +1,15 @@ +package main + +use core {*} + +main :: () { + m := Map.literal(i32, str, .[ + .{ 10, "asdf" } + .{ 20, "asdf" } + .{ 30, "asdf" } + ]) + + println(m[10] |> Optional.transform(x => x.length)!) + + m[30]->into_result("Expected 30 to exist!")! |> println() +} \ No newline at end of file diff --git a/tests/use_auto_dispose b/tests/use_auto_dispose new file mode 100644 index 000000000..75a83ba5f --- /dev/null +++ b/tests/use_auto_dispose @@ -0,0 +1,4 @@ +[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ] +In main! +From writer +[Info] Destroying foo... testing diff --git a/tests/use_auto_dispose.onyx b/tests/use_auto_dispose.onyx new file mode 100644 index 000000000..495ae10c1 --- /dev/null +++ b/tests/use_auto_dispose.onyx @@ -0,0 +1,33 @@ +use core {*} + +Foo :: struct { + data: str; +} + +#inject Foo { + make :: (s: str) => Foo.{s} + + dispose :: (f: &Foo) { + logf(.Info, "Destroying foo... {}", f.data); + } +} + +#overload +__dispose_used_local :: Foo.dispose + +main :: () { + // 'used' locals automatically have the following code inserted. + // defer __dispose_used_local(&local_name); + use f := Foo.make("testing"); + + // You are able to 'use' anything that has a `delete` for it. + use w := io.writer_make(&stdio.stream); + io.write(&w, "From writer\n"); + + use arr := make([..] u32); + for 10 do arr << it; + println(arr); + + println("In main!"); +} + diff --git a/tests/utf8_test.onyx b/tests/utf8_test.onyx index 941679cfa..be5a1c1a7 100644 --- a/tests/utf8_test.onyx +++ b/tests/utf8_test.onyx @@ -4,7 +4,7 @@ use runtime use core {print, println, printf} -#inject runtime.vars.Enable_Heap_Debug :: true +runtime.vars.Enable_Heap_Debug :: true main :: () { output := make(dyn_str); diff --git a/tests/vararg_test.onyx b/tests/vararg_test.onyx index 878a7c03d..0ea9e6ddc 100644 --- a/tests/vararg_test.onyx +++ b/tests/vararg_test.onyx @@ -1,6 +1,6 @@ package main -#load "core/module" + use core {*}; diff --git a/tests/variants_of_func_syntax b/tests/variants_of_func_syntax new file mode 100644 index 000000000..f8d302ae9 --- /dev/null +++ b/tests/variants_of_func_syntax @@ -0,0 +1,25 @@ +() {} works + +() use (...) {} works +closure size: 12 + +() -> _ {} works + +() use (...) -> _ {} works +closure size: 12 + +() => {} works + +() use (...) => {} works +closure size: 12 + +(x) => {} works + +(x) use (...) => {} works +closure size: 12 + +(x: i32) => {} works + +(x: i32) use (...) => {} works +closure size: 12 + diff --git a/tests/variants_of_func_syntax.onyx b/tests/variants_of_func_syntax.onyx new file mode 100644 index 000000000..80588eb19 --- /dev/null +++ b/tests/variants_of_func_syntax.onyx @@ -0,0 +1,46 @@ +use core {*} + +main :: () { + capture_me := 1234 + + f1 := () { println("() {} works") } + f2 := () use (capture_me) { println("() use (...) {} works") } + f3 := () -> _ { println("() -> _ {} works") } + f4 := () use (capture_me) -> _ { println("() use (...) -> _ {} works") } + f5 := () => { println("() => {} works") } + f6 := () use (capture_me) => { println("() use (...) => {} works") } + f7 : (i32) -> void = (x) => { println("(x) => {} works") } + f8 : (i32) -> void = (x) use (capture_me) => { println("(x) use (...) => {} works") } + f9 := (x: i32) => { println("(x: i32) => {} works") } + f10 := (x: i32) use (capture_me) => { println("(x: i32) use (...) => {} works") } + + call_func :: #match { + (f: () -> void) { + f() + if f.closure { + printf("closure size: {}\n", *cast(&u32) f.closure) + } + print("\n") + } + + (f: (i32) -> void) { + f(0) + if f.closure { + printf("closure size: {}\n", *cast(&u32) f.closure) + } + print("\n") + } + } + + + call_func(f1) + call_func(f2) + call_func(f3) + call_func(f4) + call_func(f5) + call_func(f6) + call_func(f7) + call_func(f8) + call_func(f9) + call_func(f10) +} diff --git a/tests/where_clauses.onyx b/tests/where_clauses.onyx index be3655f75..627519d0e 100644 --- a/tests/where_clauses.onyx +++ b/tests/where_clauses.onyx @@ -1,4 +1,4 @@ -#load "core/module" + use core {*};