wip -- lsp mappings to various types
This commit is contained in:
103
rust_compiler/src/ffi/mod.rs
Normal file
103
rust_compiler/src/ffi/mod.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
use compiler::Compiler;
|
||||
use parser::Parser;
|
||||
use safer_ffi::prelude::*;
|
||||
use std::io::BufWriter;
|
||||
use tokenizer::Tokenizer;
|
||||
|
||||
#[derive_ReprC]
|
||||
#[repr(C)]
|
||||
pub struct FfiToken {
|
||||
pub tooltip: safer_ffi::String,
|
||||
pub error: safer_ffi::String,
|
||||
pub column: i32,
|
||||
pub length: i32,
|
||||
pub token_kind: u32,
|
||||
}
|
||||
|
||||
#[derive_ReprC]
|
||||
#[repr(C)]
|
||||
pub struct FfiRange {
|
||||
start_col: u32,
|
||||
end_col: u32,
|
||||
start_line: u32,
|
||||
end_line: u32,
|
||||
}
|
||||
|
||||
impl From<lsp_types::Range> for FfiRange {
|
||||
fn from(value: lsp_types::Range) -> Self {
|
||||
Self {
|
||||
start_col: value.start.character,
|
||||
end_col: value.end.character,
|
||||
start_line: value.start.line,
|
||||
end_line: value.end.line,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive_ReprC]
|
||||
#[repr(C)]
|
||||
pub struct FfiDiagnostic {
|
||||
message: safer_ffi::String,
|
||||
severity: i32,
|
||||
range: FfiRange,
|
||||
}
|
||||
|
||||
impl From<lsp_types::Diagnostic> for FfiDiagnostic {
|
||||
fn from(value: lsp_types::Diagnostic) -> Self {
|
||||
use lsp_types::*;
|
||||
Self {
|
||||
message: value.message.into(),
|
||||
severity: match value.severity.unwrap_or(DiagnosticSeverity::ERROR) {
|
||||
DiagnosticSeverity::WARNING => 2,
|
||||
DiagnosticSeverity::INFORMATION => 3,
|
||||
DiagnosticSeverity::HINT => 4,
|
||||
_ => 1,
|
||||
},
|
||||
range: value.range.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[ffi_export]
|
||||
pub fn free_ffi_token_vec(v: safer_ffi::Vec<FfiToken>) {
|
||||
drop(v)
|
||||
}
|
||||
|
||||
#[ffi_export]
|
||||
pub fn free_ffi_diagnostic_vec(v: safer_ffi::Vec<FfiDiagnostic>) {
|
||||
drop(v)
|
||||
}
|
||||
|
||||
#[ffi_export]
|
||||
pub fn free_string(s: safer_ffi::String) {
|
||||
drop(s)
|
||||
}
|
||||
|
||||
/// C# handles strings as UTF16. We do NOT want to allocate that memory in C# because
|
||||
/// we want to avoid GC. So we pass it to Rust to handle all the memory allocations.
|
||||
/// This should result in the ability to compile many times without triggering frame drops
|
||||
/// from the GC from a `GetBytes()` call on a string in C#.
|
||||
#[ffi_export]
|
||||
pub fn compile_from_string(input: safer_ffi::slice::Ref<'_, u16>) -> safer_ffi::String {
|
||||
let mut writer = BufWriter::new(Vec::new());
|
||||
|
||||
let tokenizer = Tokenizer::from(String::from_utf16_lossy(input.as_slice()));
|
||||
let parser = Parser::new(tokenizer);
|
||||
let compiler = Compiler::new(parser, &mut writer, None);
|
||||
|
||||
if !compiler.compile().is_empty() {
|
||||
return safer_ffi::String::EMPTY;
|
||||
}
|
||||
|
||||
let Ok(compiled_vec) = writer.into_inner() else {
|
||||
return safer_ffi::String::EMPTY;
|
||||
};
|
||||
|
||||
// Safety: I know the compiler only outputs valid utf8
|
||||
safer_ffi::String::from(unsafe { String::from_utf8_unchecked(compiled_vec) })
|
||||
}
|
||||
|
||||
#[ffi_export]
|
||||
pub fn diagnose_source() -> safer_ffi::Vec<FfiDiagnostic> {
|
||||
vec![].into()
|
||||
}
|
||||
@@ -1,107 +1,5 @@
|
||||
use compiler::Compiler;
|
||||
use parser::Parser;
|
||||
use safer_ffi::prelude::*;
|
||||
use std::io::BufWriter;
|
||||
use tokenizer::{token::TokenType, Error as TokenizerError, Tokenizer};
|
||||
|
||||
#[derive_ReprC]
|
||||
#[repr(C)]
|
||||
pub struct FfiToken {
|
||||
pub tooltip: safer_ffi::String,
|
||||
pub error: safer_ffi::String,
|
||||
pub column: i32,
|
||||
pub length: i32,
|
||||
pub token_kind: u32,
|
||||
}
|
||||
|
||||
fn map_token_kind(t: &TokenType) -> u32 {
|
||||
use TokenType::*;
|
||||
match t {
|
||||
Keyword(_) => 1,
|
||||
Identifier(_) => 2,
|
||||
Number(_) => 3,
|
||||
String(_) => 4,
|
||||
Boolean(_) => 5,
|
||||
Symbol(_) => 6,
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// C# handles strings as UTF16. We do NOT want to allocate that memory in C# because
|
||||
/// we want to avoid GC. So we pass it to Rust to handle all the memory allocations.
|
||||
/// This should result in the ability to compile many times without triggering frame drops
|
||||
/// from the GC from a `GetBytes()` call on a string in C#.
|
||||
#[ffi_export]
|
||||
pub fn compile_from_string(input: safer_ffi::slice::Ref<'_, u16>) -> safer_ffi::String {
|
||||
let mut writer = BufWriter::new(Vec::new());
|
||||
|
||||
let tokenizer = Tokenizer::from(String::from_utf16_lossy(input.as_slice()));
|
||||
let parser = Parser::new(tokenizer);
|
||||
let compiler = Compiler::new(parser, &mut writer, None);
|
||||
|
||||
if compiler.compile().is_err() {
|
||||
return safer_ffi::String::EMPTY;
|
||||
}
|
||||
|
||||
let Ok(compiled_vec) = writer.into_inner() else {
|
||||
return safer_ffi::String::EMPTY;
|
||||
};
|
||||
|
||||
// Safety: I know the compiler only outputs valid utf8
|
||||
safer_ffi::String::from(unsafe { String::from_utf8_unchecked(compiled_vec) })
|
||||
}
|
||||
/// C# handles strings as UTF16. We do NOT want to allocate that memory in C# because
|
||||
/// we want to avoid GC. So we pass it to Rust to handle all the memory allocations.
|
||||
/// This should result in the ability to tokenize many times without triggering frame drops
|
||||
/// from the GC from a `GetBytes()` call on a string in C#.
|
||||
#[ffi_export]
|
||||
pub fn tokenize_line(input: safer_ffi::slice::Ref<'_, u16>) -> safer_ffi::Vec<FfiToken> {
|
||||
let tokenizer = Tokenizer::from(String::from_utf16_lossy(input.as_slice()));
|
||||
|
||||
let mut tokens = Vec::<FfiToken>::new();
|
||||
|
||||
for token in tokenizer {
|
||||
match token {
|
||||
Err(TokenizerError::NumberParseError(_, _, col, ref str))
|
||||
| Err(TokenizerError::UnknownSymbolError(_, _, col, ref str))
|
||||
| Err(TokenizerError::DecimalParseError(_, _, col, ref str))
|
||||
| Err(TokenizerError::UnknownKeywordOrIdentifierError(_, _, col, ref str)) => {
|
||||
tokens.push(FfiToken {
|
||||
column: col as i32 - 1,
|
||||
tooltip: "".into(),
|
||||
length: str.len() as i32,
|
||||
token_kind: 0,
|
||||
// Safety: it's okay to unwrap the err here because we are matching on the `Err` variant
|
||||
error: token.unwrap_err().to_string().into(),
|
||||
});
|
||||
}
|
||||
Err(_) => return safer_ffi::Vec::EMPTY,
|
||||
Ok(token) if !matches!(token.token_type, TokenType::EOF) => tokens.push(FfiToken {
|
||||
tooltip: "".into(),
|
||||
error: "".into(),
|
||||
length: token
|
||||
.original_string
|
||||
.map(|s| s.len() as i32)
|
||||
.unwrap_or_default(),
|
||||
token_kind: map_token_kind(&token.token_type),
|
||||
column: token.column as i32 - 1,
|
||||
}),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
tokens.into()
|
||||
}
|
||||
|
||||
#[ffi_export]
|
||||
pub fn free_ffi_token_vec(v: safer_ffi::Vec<FfiToken>) {
|
||||
drop(v)
|
||||
}
|
||||
|
||||
#[ffi_export]
|
||||
pub fn free_string(s: safer_ffi::String) {
|
||||
drop(s)
|
||||
}
|
||||
mod ffi;
|
||||
pub(crate) mod lsp;
|
||||
|
||||
#[cfg(feature = "headers")]
|
||||
pub fn generate_headers() -> std::io::Result<()> {
|
||||
|
||||
0
rust_compiler/src/lsp/mod.rs
Normal file
0
rust_compiler/src/lsp/mod.rs
Normal file
@@ -8,7 +8,7 @@ use compiler::Compiler;
|
||||
use parser::Parser as ASTParser;
|
||||
use std::{
|
||||
fs::File,
|
||||
io::{BufWriter, Read, Write},
|
||||
io::{stderr, BufWriter, Read, Write},
|
||||
path::PathBuf,
|
||||
};
|
||||
use tokenizer::{self, Tokenizer};
|
||||
@@ -75,7 +75,22 @@ fn run_logic() -> Result<(), StationlangError> {
|
||||
|
||||
let compiler = Compiler::new(parser, &mut writer, None);
|
||||
|
||||
compiler.compile()?;
|
||||
let mut errors = compiler.compile();
|
||||
|
||||
if !errors.is_empty() {
|
||||
let mut std_error = stderr();
|
||||
let last = errors.pop();
|
||||
let errors = errors.into_iter().map(StationlangError::from);
|
||||
|
||||
std_error.write_all(b"Compilation error:\n")?;
|
||||
|
||||
for err in errors {
|
||||
std_error.write_all(format!("{}\n", err).as_bytes())?;
|
||||
}
|
||||
|
||||
return Err(StationlangError::from(last.unwrap()));
|
||||
}
|
||||
|
||||
writer.flush()?;
|
||||
|
||||
Ok(())
|
||||
|
||||
Reference in New Issue
Block a user