diff --git a/CMakeLists.txt b/CMakeLists.txt index 220ce4b67d28e..5042b17ad49da 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -741,6 +741,7 @@ add_library(ggml OBJECT print.hpp plugin_python.cpp plugin_nodejs.cpp + plugin_nodejs_metacall.cpp ggml-internal.hpp llama-internal.hpp ggml-alloc.cpp diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 3bf13477462e0..2d5a36811a3ef 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -33,8 +33,12 @@ #include "print.hpp" //#include "plugin_python.hpp" -#include "plugin_nodejs.hpp" -#define process_output_plugin process_output_plugin_node +//#include "plugin_nodejs.hpp" +#include "plugin_nodejs_metacall.hpp" +#define process_output_plugin process_output_plugin_metacall +#define process_output_plugin_destroy process_output_plugin_metacall_destroy +#define process_output_plugin_init process_output_plugin_metacall_init + static llama_context ** g_ctx; static llama_model ** g_model; @@ -138,7 +142,7 @@ int main(int argc, char ** argv) { // save choice to use color for later // (note for later: this is a slightly awkward choice) console::init(params.simple_io, params.use_color); - process_output_plugin_node_init(); + process_output_plugin_init(); atexit([]() { console::cleanup(); }); if (params.logits_all) { @@ -911,6 +915,6 @@ int main(int argc, char ** argv) { LOG_TEE("Log end\n"); #endif // LOG_DISABLE_LOGS - process_output_plugin_node_destroy(); + process_output_plugin_destroy(); return 0; } diff --git a/script.js b/script.js new file mode 100644 index 0000000000000..2b5d6ed11cb9f --- /dev/null +++ b/script.js @@ -0,0 +1,20 @@ +#!/usr/bin/env node + +'use strict'; + +function sum(a, b) { + return a + b; +} + +function timeout(ms, cb) { + return new Promise(resolve => setTimeout(() => resolve(cb()), ms)); +} + +async function async_sum(a, b) { + return await timeout(2000, () => sum(a, b)); +} + +module.exports = { + sum, + async_sum, +};