Kalleby Santos
Kalleby Santos
DDeno
Created by Kalleby Santos on 1/24/2025 in #help
How to extend `serde_v8` to handle custom logic?
I'm already did the followin from_v8 impl:
impl<'a> FromV8<'a> for JsTensor {
type Error = StdAnyError;

fn from_v8(
scope: &mut deno_core::v8::HandleScope<'a>,
value: deno_core::v8::Local<'a, deno_core::v8::Value>,
) -> Result<Self, Self::Error> {
// Deserialize the raw `v8::Value` into a proxy struct to inspect "type" and data
#[derive(Deserialize)]
struct RawJsTensor<'a> {
#[serde(rename = "type", with = "JsTensorType")]
data_type: TensorElementType,
data: serde_v8::Value<'a>, // Keep data as raw `serde_v8::Value` for now
dims: Vec<i64>,
}

let js_tensor = serde_v8::from_v8::<RawJsTensor>(scope, value).map_err(AnyError::from)?;

// Apply the correct `JsTensorData`
let data = match js_tensor.data_type {
TensorElementType::String => JsTensorData::StringArray(
serde_v8::from_v8(scope, js_tensor.data.v8_value).map_err(AnyError::from)?,
),
_ => JsTensorData::TypedArrayBuffer(
serde_v8::from_v8(scope, js_tensor.data.v8_value).map_err(AnyError::from)?,
),
};

Ok(JsTensor {
data,
data_type: js_tensor.data_type,
dims: js_tensor.dims,
})
}
}
impl<'a> FromV8<'a> for JsTensor {
type Error = StdAnyError;

fn from_v8(
scope: &mut deno_core::v8::HandleScope<'a>,
value: deno_core::v8::Local<'a, deno_core::v8::Value>,
) -> Result<Self, Self::Error> {
// Deserialize the raw `v8::Value` into a proxy struct to inspect "type" and data
#[derive(Deserialize)]
struct RawJsTensor<'a> {
#[serde(rename = "type", with = "JsTensorType")]
data_type: TensorElementType,
data: serde_v8::Value<'a>, // Keep data as raw `serde_v8::Value` for now
dims: Vec<i64>,
}

let js_tensor = serde_v8::from_v8::<RawJsTensor>(scope, value).map_err(AnyError::from)?;

// Apply the correct `JsTensorData`
let data = match js_tensor.data_type {
TensorElementType::String => JsTensorData::StringArray(
serde_v8::from_v8(scope, js_tensor.data.v8_value).map_err(AnyError::from)?,
),
_ => JsTensorData::TypedArrayBuffer(
serde_v8::from_v8(scope, js_tensor.data.v8_value).map_err(AnyError::from)?,
),
};

Ok(JsTensor {
data,
data_type: js_tensor.data_type,
dims: js_tensor.dims,
})
}
}
But I need to handle it like this from my op function:
#[op2(async)]
#[serde]
pub async fn op_sb_ai_ort_run_session(
state: Rc<RefCell<OpState>>,
#[string] model_id: String,
#[serde] input_values: HashMap<String, JsTensor>,
) -> Result<HashMap<String, ToJsTensor>> {
...
}
#[op2(async)]
#[serde]
pub async fn op_sb_ai_ort_run_session(
state: Rc<RefCell<OpState>>,
#[string] model_id: String,
#[serde] input_values: HashMap<String, JsTensor>,
) -> Result<HashMap<String, ToJsTensor>> {
...
}
2 replies
DDeno
Created by amir3683 on 9/27/2024 in #help
Embedding Deno in a Rust App to evaluate dynamic javascript/typescript inputs/files?
You can check the Supabase Edge Runtime source code to get a strong example of embedding deno.
10 replies
DDeno
Created by amir3683 on 9/27/2024 in #help
Embedding Deno in a Rust App to evaluate dynamic javascript/typescript inputs/files?
You can also use the v8 crate direct to evaluate Js code directly from rust and combine it with the serde_v8 crate to I/O between rust and Js. All of this packages are re-exported by the deno_core crate as well the deno_runtime
10 replies
DDeno
Created by amir3683 on 9/27/2024 in #help
Embedding Deno in a Rust App to evaluate dynamic javascript/typescript inputs/files?
You can use the deno_runtime crate instead of raw deno_core it comes with standard Deno ops like: like fetch, Deno apis etc... you still need to hand craft the Typescript parser and the external import system like npm, esm etc... but its a good start point since already impl a loot of Js apis
10 replies
DDeno
Created by Kalleby Santos on 9/24/2024 in #help
RustyV8: Example of creating a TypedArray?
EDIT: I got it, I need to handle the ArrayBuffer length as multiple from the current type:
let buf_store = unsafe {
v8::ArrayBuffer::new_backing_store_from_ptr(
tensor_ptr as _,
tensor_len * size_of::<f32>(), // Here I need to multiply based on type size
drop_tensor,
tensor_rc as _,
)
}
.make_shared();
let buf_store = unsafe {
v8::ArrayBuffer::new_backing_store_from_ptr(
tensor_ptr as _,
tensor_len * size_of::<f32>(), // Here I need to multiply based on type size
drop_tensor,
tensor_rc as _,
)
}
.make_shared();
3 replies
DDeno
Created by Kalleby Santos on 9/24/2024 in #help
RustyV8: Example of creating a TypedArray?
/** HERE is my problem: **/
// I don't now how to pass: `byte_offset` and `length`

/** TEST 1: Trying a length of 1: **/
let tensor_data = v8::Float32Array::new(scope, array_buffer , 0, 1).unwrap().into()
// JS output: { Float32Array(1) [ 0.22348394989967346 ] } -> But I was expecting an array of 3456 like the raw tensor above.

/** TEST 2: Trying the same length of original buffer [3456]: **/
let length = array_buffer.byte_length(); // $ 3456
let tensor_data = v8::Float32Array::new(scope, array_buffer , 0, length).unwrap().into()
// $ Runtime Error: "Check failed: byte_length <= buffer->GetByteLength()"

/** TEST 3: Trying buffer length -1: **/
let length = array_buffer.byte_length() -1; // $ 3455
let tensor_data = v8::Float32Array::new(scope, array_buffer , 0, length).unwrap().into()
// $ Runtime Error: "Check failed: byte_length <= buffer->GetByteLength()"

/** TEST 4: Trying to divide based on size: **/
let length = array_buffer.byte_length() / size_of::<f32>(); // $ 864
let tensor_data = v8::Float32Array::new(scope, array_buffer , 0, length).unwrap().into()
// JS output: { Float32Array(864) [ 0.22348395, 0.13147816, ... ] } -> But I was expecting an array of 3456 like the raw tensor above.

/** TEST 5: Trying to divide and offset based on size: **/
let byte_offset = size_of::<f32>(); // 4
let length = array_buffer.byte_length() / size_of::<f32>(); // $ 864
let tensor_data = v8::Float32Array::new(scope, array_buffer , byte_offset, length).unwrap().into()
// $ Runtime Error: Check failed: byte_offset + byte_length <= buffer->GetByteLength()
/** HERE is my problem: **/
// I don't now how to pass: `byte_offset` and `length`

/** TEST 1: Trying a length of 1: **/
let tensor_data = v8::Float32Array::new(scope, array_buffer , 0, 1).unwrap().into()
// JS output: { Float32Array(1) [ 0.22348394989967346 ] } -> But I was expecting an array of 3456 like the raw tensor above.

/** TEST 2: Trying the same length of original buffer [3456]: **/
let length = array_buffer.byte_length(); // $ 3456
let tensor_data = v8::Float32Array::new(scope, array_buffer , 0, length).unwrap().into()
// $ Runtime Error: "Check failed: byte_length <= buffer->GetByteLength()"

/** TEST 3: Trying buffer length -1: **/
let length = array_buffer.byte_length() -1; // $ 3455
let tensor_data = v8::Float32Array::new(scope, array_buffer , 0, length).unwrap().into()
// $ Runtime Error: "Check failed: byte_length <= buffer->GetByteLength()"

/** TEST 4: Trying to divide based on size: **/
let length = array_buffer.byte_length() / size_of::<f32>(); // $ 864
let tensor_data = v8::Float32Array::new(scope, array_buffer , 0, length).unwrap().into()
// JS output: { Float32Array(864) [ 0.22348395, 0.13147816, ... ] } -> But I was expecting an array of 3456 like the raw tensor above.

/** TEST 5: Trying to divide and offset based on size: **/
let byte_offset = size_of::<f32>(); // 4
let length = array_buffer.byte_length() / size_of::<f32>(); // $ 864
let tensor_data = v8::Float32Array::new(scope, array_buffer , byte_offset, length).unwrap().into()
// $ Runtime Error: Check failed: byte_offset + byte_length <= buffer->GetByteLength()
3 replies